1/*
2 * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file contains all of the code that is specific to the
36 * InfiniPath 7322 chip
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <linux/jiffies.h>
44#include <linux/module.h>
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_smi.h>
47#ifdef CONFIG_INFINIBAND_QIB_DCA
48#include <linux/dca.h>
49#endif
50
51#include "qib.h"
52#include "qib_7322_regs.h"
53#include "qib_qsfp.h"
54
55#include "qib_mad.h"
56#include "qib_verbs.h"
57
58#undef pr_fmt
59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60
61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64static irqreturn_t qib_7322intr(int irq, void *data);
65static irqreturn_t qib_7322bufavail(int irq, void *data);
66static irqreturn_t sdma_intr(int irq, void *data);
67static irqreturn_t sdma_idle_intr(int irq, void *data);
68static irqreturn_t sdma_progress_intr(int irq, void *data);
69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71				  struct qib_ctxtdata *rcd);
72static u8 qib_7322_phys_portstate(u64);
73static u32 qib_7322_iblink_state(u64);
74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75				   u16 linitcmd);
76static void force_h1(struct qib_pportdata *);
77static void adj_tx_serdes(struct qib_pportdata *);
78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80
81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83static void serdes_7322_los_enable(struct qib_pportdata *, int);
84static int serdes_7322_init_old(struct qib_pportdata *);
85static int serdes_7322_init_new(struct qib_pportdata *);
86static void dump_sdma_7322_state(struct qib_pportdata *);
87
88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89
90/* LE2 serdes values for different cases */
91#define LE2_DEFAULT 5
92#define LE2_5m 4
93#define LE2_QME 0
94
95/* Below is special-purpose, so only really works for the IB SerDes blocks. */
96#define IBSD(hw_pidx) (hw_pidx + 2)
97
98/* these are variables for documentation and experimentation purposes */
99static const unsigned rcv_int_timeout = 375;
100static const unsigned rcv_int_count = 16;
101static const unsigned sdma_idle_cnt = 64;
102
103/* Time to stop altering Rx Equalization parameters, after link up. */
104#define RXEQ_DISABLE_MSECS 2500
105
106/*
107 * Number of VLs we are configured to use (to allow for more
108 * credits per vl, etc.)
109 */
110ushort qib_num_cfg_vls = 2;
111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113
114static ushort qib_chase = 1;
115module_param_named(chase, qib_chase, ushort, S_IRUGO);
116MODULE_PARM_DESC(chase, "Enable state chase handling");
117
118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120MODULE_PARM_DESC(long_attenuation,
121		 "attenuation cutoff (dB) for long copper cable setup");
122
123static ushort qib_singleport;
124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126
127static ushort qib_krcvq01_no_msi;
128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130
131/*
132 * Receive header queue sizes
133 */
134static unsigned qib_rcvhdrcnt;
135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137
138static unsigned qib_rcvhdrsize;
139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141
142static unsigned qib_rcvhdrentsize;
143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145
146#define MAX_ATTEN_LEN 64 /* plenty for any real system */
147/* for read back, default index is ~5m copper cable */
148static char txselect_list[MAX_ATTEN_LEN] = "10";
149static struct kparam_string kp_txselect = {
150	.string = txselect_list,
151	.maxlen = MAX_ATTEN_LEN
152};
153static int  setup_txselect(const char *, const struct kernel_param *);
154module_param_call(txselect, setup_txselect, param_get_string,
155		  &kp_txselect, S_IWUSR | S_IRUGO);
156MODULE_PARM_DESC(txselect,
157		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158
159#define BOARD_QME7342 5
160#define BOARD_QMH7342 6
161#define BOARD_QMH7360 9
162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163		    BOARD_QMH7342)
164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165		    BOARD_QME7342)
166
167#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
168
169#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170
171#define MASK_ACROSS(lsb, msb) \
172	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173
174#define SYM_RMASK(regname, fldname) ((u64)              \
175	QIB_7322_##regname##_##fldname##_RMASK)
176
177#define SYM_MASK(regname, fldname) ((u64)               \
178	QIB_7322_##regname##_##fldname##_RMASK <<       \
179	 QIB_7322_##regname##_##fldname##_LSB)
180
181#define SYM_FIELD(value, regname, fldname) ((u64)	\
182	(((value) >> SYM_LSB(regname, fldname)) &	\
183	 SYM_RMASK(regname, fldname)))
184
185/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
186#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188
189#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194/* Below because most, but not all, fields of IntMask have that full suffix */
195#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196
197
198#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199
200/*
201 * the size bits give us 2^N, in KB units.  0 marks as invalid,
202 * and 7 is reserved.  We currently use only 2KB and 4KB
203 */
204#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
206#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
207#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
208
209#define SendIBSLIDAssignMask \
210	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211#define SendIBSLMCMask \
212	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213
214#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220
221#define _QIB_GPIO_SDA_NUM 1
222#define _QIB_GPIO_SCL_NUM 0
223#define QIB_EEPROM_WEN_NUM 14
224#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
225
226/* HW counter clock is at 4nsec */
227#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228
229/* full speed IB port 1 only */
230#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231#define PORT_SPD_CAP_SHIFT 3
232
233/* full speed featuremask, both ports */
234#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235
236/*
237 * This file contains almost all the chip-specific register information and
238 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
239 */
240
241/* Use defines to tie machine-generated names to lower-case names */
242#define kr_contextcnt KREG_IDX(ContextCnt)
243#define kr_control KREG_IDX(Control)
244#define kr_counterregbase KREG_IDX(CntrRegBase)
245#define kr_errclear KREG_IDX(ErrClear)
246#define kr_errmask KREG_IDX(ErrMask)
247#define kr_errstatus KREG_IDX(ErrStatus)
248#define kr_extctrl KREG_IDX(EXTCtrl)
249#define kr_extstatus KREG_IDX(EXTStatus)
250#define kr_gpio_clear KREG_IDX(GPIOClear)
251#define kr_gpio_mask KREG_IDX(GPIOMask)
252#define kr_gpio_out KREG_IDX(GPIOOut)
253#define kr_gpio_status KREG_IDX(GPIOStatus)
254#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255#define kr_debugportval KREG_IDX(DebugPortValueReg)
256#define kr_fmask KREG_IDX(feature_mask)
257#define kr_act_fmask KREG_IDX(active_feature_mask)
258#define kr_hwerrclear KREG_IDX(HwErrClear)
259#define kr_hwerrmask KREG_IDX(HwErrMask)
260#define kr_hwerrstatus KREG_IDX(HwErrStatus)
261#define kr_intclear KREG_IDX(IntClear)
262#define kr_intmask KREG_IDX(IntMask)
263#define kr_intredirect KREG_IDX(IntRedirect0)
264#define kr_intstatus KREG_IDX(IntStatus)
265#define kr_pagealign KREG_IDX(PageAlign)
266#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
268#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275#define kr_revision KREG_IDX(Revision)
276#define kr_scratch KREG_IDX(Scratch)
277#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
278#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
279#define kr_sendctrl KREG_IDX(SendCtrl)
280#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
281#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
282#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283#define kr_sendpiobufbase KREG_IDX(SendBufBase)
284#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285#define kr_sendpiosize KREG_IDX(SendBufSize)
286#define kr_sendregbase KREG_IDX(SendRegBase)
287#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288#define kr_userregbase KREG_IDX(UserRegBase)
289#define kr_intgranted KREG_IDX(Int_Granted)
290#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291#define kr_intblocked KREG_IDX(IntBlocked)
292#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293
294/*
295 * per-port kernel registers.  Access only with qib_read_kreg_port()
296 * or qib_write_kreg_port()
297 */
298#define krp_errclear KREG_IBPORT_IDX(ErrClear)
299#define krp_errmask KREG_IBPORT_IDX(ErrMask)
300#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315#define krp_psstart KREG_IBPORT_IDX(PSStart)
316#define krp_psstat KREG_IBPORT_IDX(PSStat)
317#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348
349/*
350 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
351 * or qib_write_kreg_ctxt()
352 */
353#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355
356/*
357 * TID Flow table, per context.  Reduces
358 * number of hdrq updates to one per flow (or on errors).
359 * context 0 and 1 share same memory, but have distinct
360 * addresses.  Since for now, we never use expected sends
361 * on kernel contexts, we don't worry about that (we initialize
362 * those entries for ctxt 0/1 on driver load twice, for example).
363 */
364#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
365#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366
367/* these are the error bits in the tid flows, and are W1C */
368#define TIDFLOW_ERRBITS  ( \
369	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373
374/* Most (not all) Counters are per-IBport.
375 * Requires LBIntCnt is at offset 0 in the group
376 */
377#define CREG_IDX(regname) \
378((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379
380#define crp_badformat CREG_IDX(RxVersionErrCnt)
381#define crp_err_rlen CREG_IDX(RxLenErrCnt)
382#define crp_erricrc CREG_IDX(RxICRCErrCnt)
383#define crp_errlink CREG_IDX(RxLinkMalformCnt)
384#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394#define crp_pktrcv CREG_IDX(RxDataPktCnt)
395#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396#define crp_pktsend CREG_IDX(TxDataPktCnt)
397#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403#define crp_rcvebp CREG_IDX(RxEBPCnt)
404#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411#define crp_sendstall CREG_IDX(TxFlowStallCnt)
412#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414#define crp_txlenerr CREG_IDX(TxLenErrCnt)
415#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420#define crp_wordrcv CREG_IDX(RxDwordCnt)
421#define crp_wordsend CREG_IDX(TxDwordCnt)
422#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423
424/* these are the (few) counters that are not port-specific */
425#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428#define cr_lbint CREG_DEVIDX(LBIntCnt)
429#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434
435/* no chip register for # of IB ports supported, so define */
436#define NUM_IB_PORTS 2
437
438/* 1 VL15 buffer per hardware IB port, no register for this, so define */
439#define NUM_VL15_BUFS NUM_IB_PORTS
440
441/*
442 * context 0 and 1 are special, and there is no chip register that
443 * defines this value, so we have to define it here.
444 * These are all allocated to either 0 or 1 for single port
445 * hardware configuration, otherwise each gets half
446 */
447#define KCTXT0_EGRCNT 2048
448
449/* values for vl and port fields in PBC, 7322-specific */
450#define PBC_PORT_SEL_LSB 26
451#define PBC_PORT_SEL_RMASK 1
452#define PBC_VL_NUM_LSB 27
453#define PBC_VL_NUM_RMASK 7
454#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
455#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
456
457static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458	[IB_RATE_2_5_GBPS] = 16,
459	[IB_RATE_5_GBPS] = 8,
460	[IB_RATE_10_GBPS] = 4,
461	[IB_RATE_20_GBPS] = 2,
462	[IB_RATE_30_GBPS] = 2,
463	[IB_RATE_40_GBPS] = 1
464};
465
466static const char * const qib_sdma_state_names[] = {
467	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
468	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
469	[qib_sdma_state_s20_idle]             = "s20_Idle",
470	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
471	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
472	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
473	[qib_sdma_state_s99_running]          = "s99_Running",
474};
475
476#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
477#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
478
479/* link training states, from IBC */
480#define IB_7322_LT_STATE_DISABLED        0x00
481#define IB_7322_LT_STATE_LINKUP          0x01
482#define IB_7322_LT_STATE_POLLACTIVE      0x02
483#define IB_7322_LT_STATE_POLLQUIET       0x03
484#define IB_7322_LT_STATE_SLEEPDELAY      0x04
485#define IB_7322_LT_STATE_SLEEPQUIET      0x05
486#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
487#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
488#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
489#define IB_7322_LT_STATE_CFGIDLE         0x0b
490#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
491#define IB_7322_LT_STATE_TXREVLANES      0x0d
492#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
493#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
494#define IB_7322_LT_STATE_CFGENH          0x10
495#define IB_7322_LT_STATE_CFGTEST         0x11
496#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
497#define IB_7322_LT_STATE_CFGWAITENH      0x13
498
499/* link state machine states from IBC */
500#define IB_7322_L_STATE_DOWN             0x0
501#define IB_7322_L_STATE_INIT             0x1
502#define IB_7322_L_STATE_ARM              0x2
503#define IB_7322_L_STATE_ACTIVE           0x3
504#define IB_7322_L_STATE_ACT_DEFER        0x4
505
506static const u8 qib_7322_physportstate[0x20] = {
507	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
508	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
509	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
510	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
511	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
512	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
513	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
514	[IB_7322_LT_STATE_CFGRCVFCFG] =
515		IB_PHYSPORTSTATE_CFG_TRAIN,
516	[IB_7322_LT_STATE_CFGWAITRMT] =
517		IB_PHYSPORTSTATE_CFG_TRAIN,
518	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
519	[IB_7322_LT_STATE_RECOVERRETRAIN] =
520		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
521	[IB_7322_LT_STATE_RECOVERWAITRMT] =
522		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
523	[IB_7322_LT_STATE_RECOVERIDLE] =
524		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
525	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
526	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
527	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
528		IB_PHYSPORTSTATE_CFG_TRAIN,
529	[IB_7322_LT_STATE_CFGWAITENH] =
530		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
531	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
532	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
533	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
534	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
535};
536
537#ifdef CONFIG_INFINIBAND_QIB_DCA
538struct qib_irq_notify {
539	int rcv;
540	void *arg;
541	struct irq_affinity_notify notify;
542};
543#endif
544
545struct qib_chip_specific {
546	u64 __iomem *cregbase;
547	u64 *cntrs;
548	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
549	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
550	u64 main_int_mask;      /* clear bits which have dedicated handlers */
551	u64 int_enable_mask;  /* for per port interrupts in single port mode */
552	u64 errormask;
553	u64 hwerrmask;
554	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
555	u64 gpio_mask; /* shadow the gpio mask register */
556	u64 extctrl; /* shadow the gpio output enable, etc... */
557	u32 ncntrs;
558	u32 nportcntrs;
559	u32 cntrnamelen;
560	u32 portcntrnamelen;
561	u32 numctxts;
562	u32 rcvegrcnt;
563	u32 updthresh; /* current AvailUpdThld */
564	u32 updthresh_dflt; /* default AvailUpdThld */
565	u32 r1;
566	u32 num_msix_entries;
567	u32 sdmabufcnt;
568	u32 lastbuf_for_pio;
569	u32 stay_in_freeze;
570	u32 recovery_ports_initted;
571#ifdef CONFIG_INFINIBAND_QIB_DCA
572	u32 dca_ctrl;
573	int rhdr_cpu[18];
574	int sdma_cpu[2];
575	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
576#endif
577	struct qib_msix_entry *msix_entries;
578	unsigned long *sendchkenable;
579	unsigned long *sendgrhchk;
580	unsigned long *sendibchk;
581	u32 rcvavail_timeout[18];
582	char emsgbuf[128]; /* for device error interrupt msg buffer */
583};
584
585/* Table of entries in "human readable" form Tx Emphasis. */
586struct txdds_ent {
587	u8 amp;
588	u8 pre;
589	u8 main;
590	u8 post;
591};
592
593struct vendor_txdds_ent {
594	u8 oui[QSFP_VOUI_LEN];
595	u8 *partnum;
596	struct txdds_ent sdr;
597	struct txdds_ent ddr;
598	struct txdds_ent qdr;
599};
600
601static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
602
603#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
604#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
605#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
606#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
607
608#define H1_FORCE_VAL 8
609#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
610#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
611
612/* The static and dynamic registers are paired, and the pairs indexed by spd */
613#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
614	+ ((spd) * 2))
615
616#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
617#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
618#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
619#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
620#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
621
622struct qib_chippport_specific {
623	u64 __iomem *kpregbase;
624	u64 __iomem *cpregbase;
625	u64 *portcntrs;
626	struct qib_pportdata *ppd;
627	wait_queue_head_t autoneg_wait;
628	struct delayed_work autoneg_work;
629	struct delayed_work ipg_work;
630	struct timer_list chase_timer;
631	/*
632	 * these 5 fields are used to establish deltas for IB symbol
633	 * errors and linkrecovery errors.  They can be reported on
634	 * some chips during link negotiation prior to INIT, and with
635	 * DDR when faking DDR negotiations with non-IBTA switches.
636	 * The chip counters are adjusted at driver unload if there is
637	 * a non-zero delta.
638	 */
639	u64 ibdeltainprog;
640	u64 ibsymdelta;
641	u64 ibsymsnap;
642	u64 iblnkerrdelta;
643	u64 iblnkerrsnap;
644	u64 iblnkdownsnap;
645	u64 iblnkdowndelta;
646	u64 ibmalfdelta;
647	u64 ibmalfsnap;
648	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
649	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
650	unsigned long qdr_dfe_time;
651	unsigned long chase_end;
652	u32 autoneg_tries;
653	u32 recovery_init;
654	u32 qdr_dfe_on;
655	u32 qdr_reforce;
656	/*
657	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
658	 * entry zero is unused, to simplify indexing
659	 */
660	u8 h1_val;
661	u8 no_eep;  /* txselect table index to use if no qsfp info */
662	u8 ipg_tries;
663	u8 ibmalfusesnap;
664	struct qib_qsfp_data qsfp_data;
665	char epmsgbuf[192]; /* for port error interrupt msg buffer */
666	char sdmamsgbuf[192]; /* for per-port sdma error messages */
667};
668
669static struct {
670	const char *name;
671	irq_handler_t handler;
672	int lsb;
673	int port; /* 0 if not port-specific, else port # */
674	int dca;
675} irq_table[] = {
676	{ "", qib_7322intr, -1, 0, 0 },
677	{ " (buf avail)", qib_7322bufavail,
678		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
679	{ " (sdma 0)", sdma_intr,
680		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
681	{ " (sdma 1)", sdma_intr,
682		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
683	{ " (sdmaI 0)", sdma_idle_intr,
684		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
685	{ " (sdmaI 1)", sdma_idle_intr,
686		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
687	{ " (sdmaP 0)", sdma_progress_intr,
688		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
689	{ " (sdmaP 1)", sdma_progress_intr,
690		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
691	{ " (sdmaC 0)", sdma_cleanup_intr,
692		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
693	{ " (sdmaC 1)", sdma_cleanup_intr,
694		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
695};
696
697#ifdef CONFIG_INFINIBAND_QIB_DCA
698
699static const struct dca_reg_map {
700	int     shadow_inx;
701	int     lsb;
702	u64     mask;
703	u16     regno;
704} dca_rcvhdr_reg_map[] = {
705	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
706	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
707	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
708	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
709	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
710	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
711	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
712	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
713	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
714	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
715	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
716	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
717	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
718	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
719	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
720	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
721	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
722	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
723	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
724	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
725	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
726	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
727	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
728	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
729	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
730	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
731	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
732	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
733	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
734	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
735	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
736	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
737	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
738	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
739	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
740	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
741};
742#endif
743
744/* ibcctrl bits */
745#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
746/* cycle through TS1/TS2 till OK */
747#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
748/* wait for TS1, then go on */
749#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
750#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
751
752#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
753#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
754#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
755
756#define BLOB_7322_IBCHG 0x101
757
758static inline void qib_write_kreg(const struct qib_devdata *dd,
759				  const u32 regno, u64 value);
760static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
761static void write_7322_initregs(struct qib_devdata *);
762static void write_7322_init_portregs(struct qib_pportdata *);
763static void setup_7322_link_recovery(struct qib_pportdata *, u32);
764static void check_7322_rxe_status(struct qib_pportdata *);
765static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
766#ifdef CONFIG_INFINIBAND_QIB_DCA
767static void qib_setup_dca(struct qib_devdata *dd);
768static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
769static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
770#endif
771
772/**
773 * qib_read_ureg32 - read 32-bit virtualized per-context register
774 * @dd: device
775 * @regno: register number
776 * @ctxt: context number
777 *
778 * Return the contents of a register that is virtualized to be per context.
779 * Returns -1 on errors (not distinguishable from valid contents at
780 * runtime; we may add a separate error variable at some point).
781 */
782static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
783				  enum qib_ureg regno, int ctxt)
784{
785	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
786		return 0;
787	return readl(regno + (u64 __iomem *)(
788		(dd->ureg_align * ctxt) + (dd->userbase ?
789		 (char __iomem *)dd->userbase :
790		 (char __iomem *)dd->kregbase + dd->uregbase)));
791}
792
793/**
794 * qib_read_ureg - read virtualized per-context register
795 * @dd: device
796 * @regno: register number
797 * @ctxt: context number
798 *
799 * Return the contents of a register that is virtualized to be per context.
800 * Returns -1 on errors (not distinguishable from valid contents at
801 * runtime; we may add a separate error variable at some point).
802 */
803static inline u64 qib_read_ureg(const struct qib_devdata *dd,
804				enum qib_ureg regno, int ctxt)
805{
806
807	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
808		return 0;
809	return readq(regno + (u64 __iomem *)(
810		(dd->ureg_align * ctxt) + (dd->userbase ?
811		 (char __iomem *)dd->userbase :
812		 (char __iomem *)dd->kregbase + dd->uregbase)));
813}
814
815/**
816 * qib_write_ureg - write virtualized per-context register
817 * @dd: device
818 * @regno: register number
819 * @value: value
820 * @ctxt: context
821 *
822 * Write the contents of a register that is virtualized to be per context.
823 */
824static inline void qib_write_ureg(const struct qib_devdata *dd,
825				  enum qib_ureg regno, u64 value, int ctxt)
826{
827	u64 __iomem *ubase;
828
829	if (dd->userbase)
830		ubase = (u64 __iomem *)
831			((char __iomem *) dd->userbase +
832			 dd->ureg_align * ctxt);
833	else
834		ubase = (u64 __iomem *)
835			(dd->uregbase +
836			 (char __iomem *) dd->kregbase +
837			 dd->ureg_align * ctxt);
838
839	if (dd->kregbase && (dd->flags & QIB_PRESENT))
840		writeq(value, &ubase[regno]);
841}
842
843static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
844				  const u32 regno)
845{
846	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
847		return -1;
848	return readl((u32 __iomem *) &dd->kregbase[regno]);
849}
850
851static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
852				  const u32 regno)
853{
854	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
855		return -1;
856	return readq(&dd->kregbase[regno]);
857}
858
859static inline void qib_write_kreg(const struct qib_devdata *dd,
860				  const u32 regno, u64 value)
861{
862	if (dd->kregbase && (dd->flags & QIB_PRESENT))
863		writeq(value, &dd->kregbase[regno]);
864}
865
866/*
867 * not many sanity checks for the port-specific kernel register routines,
868 * since they are only used when it's known to be safe.
869*/
870static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
871				     const u16 regno)
872{
873	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
874		return 0ULL;
875	return readq(&ppd->cpspec->kpregbase[regno]);
876}
877
878static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
879				       const u16 regno, u64 value)
880{
881	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
882	    (ppd->dd->flags & QIB_PRESENT))
883		writeq(value, &ppd->cpspec->kpregbase[regno]);
884}
885
886/**
887 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
888 * @dd: the qlogic_ib device
889 * @regno: the register number to write
890 * @ctxt: the context containing the register
891 * @value: the value to write
892 */
893static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
894				       const u16 regno, unsigned ctxt,
895				       u64 value)
896{
897	qib_write_kreg(dd, regno + ctxt, value);
898}
899
900static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
901{
902	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
903		return 0;
904	return readq(&dd->cspec->cregbase[regno]);
905
906
907}
908
909static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
910{
911	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
912		return 0;
913	return readl(&dd->cspec->cregbase[regno]);
914
915
916}
917
918static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
919					u16 regno, u64 value)
920{
921	if (ppd->cpspec && ppd->cpspec->cpregbase &&
922	    (ppd->dd->flags & QIB_PRESENT))
923		writeq(value, &ppd->cpspec->cpregbase[regno]);
924}
925
926static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
927				      u16 regno)
928{
929	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930	    !(ppd->dd->flags & QIB_PRESENT))
931		return 0;
932	return readq(&ppd->cpspec->cpregbase[regno]);
933}
934
935static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
936					u16 regno)
937{
938	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
939	    !(ppd->dd->flags & QIB_PRESENT))
940		return 0;
941	return readl(&ppd->cpspec->cpregbase[regno]);
942}
943
944/* bits in Control register */
945#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
946#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
947
948/* bits in general interrupt regs */
949#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
950#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
951#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
952#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
953#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
954#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
955#define QIB_I_C_ERROR INT_MASK(Err)
956
957#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
958#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
959#define QIB_I_GPIO INT_MASK(AssertGPIO)
960#define QIB_I_P_SDMAINT(pidx) \
961	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
962	 INT_MASK_P(SDmaProgress, pidx) | \
963	 INT_MASK_PM(SDmaCleanupDone, pidx))
964
965/* Interrupt bits that are "per port" */
966#define QIB_I_P_BITSEXTANT(pidx) \
967	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
968	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
969	INT_MASK_P(SDmaProgress, pidx) | \
970	INT_MASK_PM(SDmaCleanupDone, pidx))
971
972/* Interrupt bits that are common to a device */
973/* currently unused: QIB_I_SPIOSENT */
974#define QIB_I_C_BITSEXTANT \
975	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
976	QIB_I_SPIOSENT | \
977	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
978
979#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
980	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
981
982/*
983 * Error bits that are "per port".
984 */
985#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
986#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
987#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
988#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
989#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
990#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
991#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
992#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
993#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
994#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
995#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
996#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
997#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
998#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
999#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
1000#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
1001#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
1002#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
1003#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
1004#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
1005#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
1006#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1007#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1008#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1009#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1010#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1011#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1012#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1013
1014#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1015#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1016#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1017#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1018#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1019#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1020#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1021#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1022#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1023#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1024#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1025
1026/* Error bits that are common to a device */
1027#define QIB_E_RESET ERR_MASK(ResetNegated)
1028#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1029#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1030
1031
1032/*
1033 * Per chip (rather than per-port) errors.  Most either do
1034 * nothing but trigger a print (because they self-recover, or
1035 * always occur in tandem with other errors that handle the
1036 * issue), or because they indicate errors with no recovery,
1037 * but we want to know that they happened.
1038 */
1039#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1040#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1041#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1042#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1043#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1044#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1045#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1046#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1047
1048/* SDMA chip errors (not per port)
1049 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1050 * the SDMAHALT error immediately, so we just print the dup error via the
1051 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1052 * as well, but since this is port-independent, by definition, it's
1053 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1054 * packet send errors, and so are handled in the same manner as other
1055 * per-packet errors.
1056 */
1057#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1058#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1059#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1060
1061/*
1062 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1063 * it is used to print "common" packet errors.
1064 */
1065#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1066	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1067	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1068	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1069	QIB_E_P_REBP)
1070
1071/* Error Bits that Packet-related (Receive, per-port) */
1072#define QIB_E_P_RPKTERRS (\
1073	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1074	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1075	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1076	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1077	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1078	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1079
1080/*
1081 * Error bits that are Send-related (per port)
1082 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1083 * All of these potentially need to have a buffer disarmed
1084 */
1085#define QIB_E_P_SPKTERRS (\
1086	QIB_E_P_SUNEXP_PKTNUM |\
1087	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1088	QIB_E_P_SMAXPKTLEN |\
1089	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1090	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1091	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1092
1093#define QIB_E_SPKTERRS ( \
1094		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1095		ERR_MASK_N(SendUnsupportedVLErr) |			\
1096		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1097
1098#define QIB_E_P_SDMAERRS ( \
1099	QIB_E_P_SDMAHALT | \
1100	QIB_E_P_SDMADESCADDRMISALIGN | \
1101	QIB_E_P_SDMAUNEXPDATA | \
1102	QIB_E_P_SDMAMISSINGDW | \
1103	QIB_E_P_SDMADWEN | \
1104	QIB_E_P_SDMARPYTAG | \
1105	QIB_E_P_SDMA1STDESC | \
1106	QIB_E_P_SDMABASE | \
1107	QIB_E_P_SDMATAILOUTOFBOUND | \
1108	QIB_E_P_SDMAOUTOFBOUND | \
1109	QIB_E_P_SDMAGENMISMATCH)
1110
1111/*
1112 * This sets some bits more than once, but makes it more obvious which
1113 * bits are not handled under other categories, and the repeat definition
1114 * is not a problem.
1115 */
1116#define QIB_E_P_BITSEXTANT ( \
1117	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1118	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1119	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1120	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1121	)
1122
1123/*
1124 * These are errors that can occur when the link
1125 * changes state while a packet is being sent or received.  This doesn't
1126 * cover things like EBP or VCRC that can be the result of a sending
1127 * having the link change state, so we receive a "known bad" packet.
1128 * All of these are "per port", so renamed:
1129 */
1130#define QIB_E_P_LINK_PKTERRS (\
1131	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1132	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1133	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1134	QIB_E_P_RUNEXPCHAR)
1135
1136/*
1137 * This sets some bits more than once, but makes it more obvious which
1138 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1139 * and the repeat definition is not a problem.
1140 */
1141#define QIB_E_C_BITSEXTANT (\
1142	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1143	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1144	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1145
1146/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1147#define E_SPKT_ERRS_IGNORE 0
1148
1149#define QIB_EXTS_MEMBIST_DISABLED \
1150	SYM_MASK(EXTStatus, MemBISTDisabled)
1151#define QIB_EXTS_MEMBIST_ENDTEST \
1152	SYM_MASK(EXTStatus, MemBISTEndTest)
1153
1154#define QIB_E_SPIOARMLAUNCH \
1155	ERR_MASK(SendArmLaunchErr)
1156
1157#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1158#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1159
1160/*
1161 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1162 * and also if forced QDR (only QDR enabled).  It's enabled for the
1163 * forced QDR case so that scrambling will be enabled by the TS3
1164 * exchange, when supported by both sides of the link.
1165 */
1166#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1167#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1168#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1169#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1170#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1171#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1172	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1173#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1174
1175#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1176#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1177
1178#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1179#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1180#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1181
1182#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1183#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1184#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1185	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1186#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1187	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1188#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1189
1190#define IBA7322_REDIRECT_VEC_PER_REG 12
1191
1192#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1193#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1194#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1195#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1196#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1197
1198#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1199
1200#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1201	.msg = #fldname , .sz = sizeof(#fldname) }
1202#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1203	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1204static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1205	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1206	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1207	HWE_AUTO(PCIESerdesPClkNotDetect),
1208	HWE_AUTO(PowerOnBISTFailed),
1209	HWE_AUTO(TempsenseTholdReached),
1210	HWE_AUTO(MemoryErr),
1211	HWE_AUTO(PCIeBusParityErr),
1212	HWE_AUTO(PcieCplTimeout),
1213	HWE_AUTO(PciePoisonedTLP),
1214	HWE_AUTO_P(SDmaMemReadErr, 1),
1215	HWE_AUTO_P(SDmaMemReadErr, 0),
1216	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1217	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1218	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1219	HWE_AUTO(statusValidNoEop),
1220	HWE_AUTO(LATriggered),
1221	{ .mask = 0, .sz = 0 }
1222};
1223
1224#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1225	.msg = #fldname, .sz = sizeof(#fldname) }
1226#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1227	.msg = #fldname, .sz = sizeof(#fldname) }
1228static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1229	E_AUTO(RcvEgrFullErr),
1230	E_AUTO(RcvHdrFullErr),
1231	E_AUTO(ResetNegated),
1232	E_AUTO(HardwareErr),
1233	E_AUTO(InvalidAddrErr),
1234	E_AUTO(SDmaVL15Err),
1235	E_AUTO(SBufVL15MisUseErr),
1236	E_AUTO(InvalidEEPCmd),
1237	E_AUTO(RcvContextShareErr),
1238	E_AUTO(SendVLMismatchErr),
1239	E_AUTO(SendArmLaunchErr),
1240	E_AUTO(SendSpecialTriggerErr),
1241	E_AUTO(SDmaWrongPortErr),
1242	E_AUTO(SDmaBufMaskDuplicateErr),
1243	{ .mask = 0, .sz = 0 }
1244};
1245
1246static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1247	E_P_AUTO(IBStatusChanged),
1248	E_P_AUTO(SHeadersErr),
1249	E_P_AUTO(VL15BufMisuseErr),
1250	/*
1251	 * SDmaHaltErr is not really an error, make it clearer;
1252	 */
1253	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1254		.sz = 11},
1255	E_P_AUTO(SDmaDescAddrMisalignErr),
1256	E_P_AUTO(SDmaUnexpDataErr),
1257	E_P_AUTO(SDmaMissingDwErr),
1258	E_P_AUTO(SDmaDwEnErr),
1259	E_P_AUTO(SDmaRpyTagErr),
1260	E_P_AUTO(SDma1stDescErr),
1261	E_P_AUTO(SDmaBaseErr),
1262	E_P_AUTO(SDmaTailOutOfBoundErr),
1263	E_P_AUTO(SDmaOutOfBoundErr),
1264	E_P_AUTO(SDmaGenMismatchErr),
1265	E_P_AUTO(SendBufMisuseErr),
1266	E_P_AUTO(SendUnsupportedVLErr),
1267	E_P_AUTO(SendUnexpectedPktNumErr),
1268	E_P_AUTO(SendDroppedDataPktErr),
1269	E_P_AUTO(SendDroppedSmpPktErr),
1270	E_P_AUTO(SendPktLenErr),
1271	E_P_AUTO(SendUnderRunErr),
1272	E_P_AUTO(SendMaxPktLenErr),
1273	E_P_AUTO(SendMinPktLenErr),
1274	E_P_AUTO(RcvIBLostLinkErr),
1275	E_P_AUTO(RcvHdrErr),
1276	E_P_AUTO(RcvHdrLenErr),
1277	E_P_AUTO(RcvBadTidErr),
1278	E_P_AUTO(RcvBadVersionErr),
1279	E_P_AUTO(RcvIBFlowErr),
1280	E_P_AUTO(RcvEBPErr),
1281	E_P_AUTO(RcvUnsupportedVLErr),
1282	E_P_AUTO(RcvUnexpectedCharErr),
1283	E_P_AUTO(RcvShortPktLenErr),
1284	E_P_AUTO(RcvLongPktLenErr),
1285	E_P_AUTO(RcvMaxPktLenErr),
1286	E_P_AUTO(RcvMinPktLenErr),
1287	E_P_AUTO(RcvICRCErr),
1288	E_P_AUTO(RcvVCRCErr),
1289	E_P_AUTO(RcvFormatErr),
1290	{ .mask = 0, .sz = 0 }
1291};
1292
1293/*
1294 * Below generates "auto-message" for interrupts not specific to any port or
1295 * context
1296 */
1297#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1298	.msg = #fldname, .sz = sizeof(#fldname) }
1299/* Below generates "auto-message" for interrupts specific to a port */
1300#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1301	SYM_LSB(IntMask, fldname##Mask##_0), \
1302	SYM_LSB(IntMask, fldname##Mask##_1)), \
1303	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1304/* For some reason, the SerDesTrimDone bits are reversed */
1305#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1306	SYM_LSB(IntMask, fldname##Mask##_1), \
1307	SYM_LSB(IntMask, fldname##Mask##_0)), \
1308	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1309/*
1310 * Below generates "auto-message" for interrupts specific to a context,
1311 * with ctxt-number appended
1312 */
1313#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1314	SYM_LSB(IntMask, fldname##0IntMask), \
1315	SYM_LSB(IntMask, fldname##17IntMask)), \
1316	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1317
1318#define TXSYMPTOM_AUTO_P(fldname) \
1319	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1320	.msg = #fldname, .sz = sizeof(#fldname) }
1321static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1322	TXSYMPTOM_AUTO_P(NonKeyPacket),
1323	TXSYMPTOM_AUTO_P(GRHFail),
1324	TXSYMPTOM_AUTO_P(PkeyFail),
1325	TXSYMPTOM_AUTO_P(QPFail),
1326	TXSYMPTOM_AUTO_P(SLIDFail),
1327	TXSYMPTOM_AUTO_P(RawIPV6),
1328	TXSYMPTOM_AUTO_P(PacketTooSmall),
1329	{ .mask = 0, .sz = 0 }
1330};
1331
1332#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1333
1334/*
1335 * Called when we might have an error that is specific to a particular
1336 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1337 * because we don't need to force the update of pioavail
1338 */
1339static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1340{
1341	struct qib_devdata *dd = ppd->dd;
1342	u32 i;
1343	int any;
1344	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1345	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1346	unsigned long sbuf[4];
1347
1348	/*
1349	 * It's possible that sendbuffererror could have bits set; might
1350	 * have already done this as a result of hardware error handling.
1351	 */
1352	any = 0;
1353	for (i = 0; i < regcnt; ++i) {
1354		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1355		if (sbuf[i]) {
1356			any = 1;
1357			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1358		}
1359	}
1360
1361	if (any)
1362		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1363}
1364
1365/* No txe_recover yet, if ever */
1366
1367/* No decode__errors yet */
1368static void err_decode(char *msg, size_t len, u64 errs,
1369		       const struct qib_hwerror_msgs *msp)
1370{
1371	u64 these, lmask;
1372	int took, multi, n = 0;
1373
1374	while (errs && msp && msp->mask) {
1375		multi = (msp->mask & (msp->mask - 1));
1376		while (errs & msp->mask) {
1377			these = (errs & msp->mask);
1378			lmask = (these & (these - 1)) ^ these;
1379			if (len) {
1380				if (n++) {
1381					/* separate the strings */
1382					*msg++ = ',';
1383					len--;
1384				}
1385				/* msp->sz counts the nul */
1386				took = min_t(size_t, msp->sz - (size_t)1, len);
1387				memcpy(msg,  msp->msg, took);
1388				len -= took;
1389				msg += took;
1390				if (len)
1391					*msg = '\0';
1392			}
1393			errs &= ~lmask;
1394			if (len && multi) {
1395				/* More than one bit this mask */
1396				int idx = -1;
1397
1398				while (lmask & msp->mask) {
1399					++idx;
1400					lmask >>= 1;
1401				}
1402				took = scnprintf(msg, len, "_%d", idx);
1403				len -= took;
1404				msg += took;
1405			}
1406		}
1407		++msp;
1408	}
1409	/* If some bits are left, show in hex. */
1410	if (len && errs)
1411		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1412			(unsigned long long) errs);
1413}
1414
1415/* only called if r1 set */
1416static void flush_fifo(struct qib_pportdata *ppd)
1417{
1418	struct qib_devdata *dd = ppd->dd;
1419	u32 __iomem *piobuf;
1420	u32 bufn;
1421	u32 *hdr;
1422	u64 pbc;
1423	const unsigned hdrwords = 7;
1424	static struct ib_header ibhdr = {
1425		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1426		.lrh[1] = IB_LID_PERMISSIVE,
1427		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1428		.lrh[3] = IB_LID_PERMISSIVE,
1429		.u.oth.bth[0] = cpu_to_be32(
1430			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1431		.u.oth.bth[1] = cpu_to_be32(0),
1432		.u.oth.bth[2] = cpu_to_be32(0),
1433		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1434		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1435	};
1436
1437	/*
1438	 * Send a dummy VL15 packet to flush the launch FIFO.
1439	 * This will not actually be sent since the TxeBypassIbc bit is set.
1440	 */
1441	pbc = PBC_7322_VL15_SEND |
1442		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1443		(hdrwords + SIZE_OF_CRC);
1444	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1445	if (!piobuf)
1446		return;
1447	writeq(pbc, piobuf);
1448	hdr = (u32 *) &ibhdr;
1449	if (dd->flags & QIB_PIO_FLUSH_WC) {
1450		qib_flush_wc();
1451		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1452		qib_flush_wc();
1453		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1454		qib_flush_wc();
1455	} else
1456		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1457	qib_sendbuf_done(dd, bufn);
1458}
1459
1460/*
1461 * This is called with interrupts disabled and sdma_lock held.
1462 */
1463static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1464{
1465	struct qib_devdata *dd = ppd->dd;
1466	u64 set_sendctrl = 0;
1467	u64 clr_sendctrl = 0;
1468
1469	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1470		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1471	else
1472		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1473
1474	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1475		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1476	else
1477		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1478
1479	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1480		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1481	else
1482		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1483
1484	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1485		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1486				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1487				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1488	else
1489		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1490				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1491				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1492
1493	spin_lock(&dd->sendctrl_lock);
1494
1495	/* If we are draining everything, block sends first */
1496	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1497		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1498		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1499		qib_write_kreg(dd, kr_scratch, 0);
1500	}
1501
1502	ppd->p_sendctrl |= set_sendctrl;
1503	ppd->p_sendctrl &= ~clr_sendctrl;
1504
1505	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1506		qib_write_kreg_port(ppd, krp_sendctrl,
1507				    ppd->p_sendctrl |
1508				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1509	else
1510		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1511	qib_write_kreg(dd, kr_scratch, 0);
1512
1513	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1514		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1515		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1516		qib_write_kreg(dd, kr_scratch, 0);
1517	}
1518
1519	spin_unlock(&dd->sendctrl_lock);
1520
1521	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1522		flush_fifo(ppd);
1523}
1524
1525static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1526{
1527	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1528}
1529
1530static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1531{
1532	/*
1533	 * Set SendDmaLenGen and clear and set
1534	 * the MSB of the generation count to enable generation checking
1535	 * and load the internal generation counter.
1536	 */
1537	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1538	qib_write_kreg_port(ppd, krp_senddmalengen,
1539			    ppd->sdma_descq_cnt |
1540			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1541}
1542
1543/*
1544 * Must be called with sdma_lock held, or before init finished.
1545 */
1546static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1547{
1548	/* Commit writes to memory and advance the tail on the chip */
1549	wmb();
1550	ppd->sdma_descq_tail = tail;
1551	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1552}
1553
1554/*
1555 * This is called with interrupts disabled and sdma_lock held.
1556 */
1557static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1558{
1559	/*
1560	 * Drain all FIFOs.
1561	 * The hardware doesn't require this but we do it so that verbs
1562	 * and user applications don't wait for link active to send stale
1563	 * data.
1564	 */
1565	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1566
1567	qib_sdma_7322_setlengen(ppd);
1568	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1569	ppd->sdma_head_dma[0] = 0;
1570	qib_7322_sdma_sendctrl(ppd,
1571		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1572}
1573
1574#define DISABLES_SDMA ( \
1575	QIB_E_P_SDMAHALT | \
1576	QIB_E_P_SDMADESCADDRMISALIGN | \
1577	QIB_E_P_SDMAMISSINGDW | \
1578	QIB_E_P_SDMADWEN | \
1579	QIB_E_P_SDMARPYTAG | \
1580	QIB_E_P_SDMA1STDESC | \
1581	QIB_E_P_SDMABASE | \
1582	QIB_E_P_SDMATAILOUTOFBOUND | \
1583	QIB_E_P_SDMAOUTOFBOUND | \
1584	QIB_E_P_SDMAGENMISMATCH)
1585
1586static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1587{
1588	unsigned long flags;
1589	struct qib_devdata *dd = ppd->dd;
1590
1591	errs &= QIB_E_P_SDMAERRS;
1592	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1593		   errs, qib_7322p_error_msgs);
1594
1595	if (errs & QIB_E_P_SDMAUNEXPDATA)
1596		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1597			    ppd->port);
1598
1599	spin_lock_irqsave(&ppd->sdma_lock, flags);
1600
1601	if (errs != QIB_E_P_SDMAHALT) {
1602		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1603		qib_dev_porterr(dd, ppd->port,
1604			"SDMA %s 0x%016llx %s\n",
1605			qib_sdma_state_names[ppd->sdma_state.current_state],
1606			errs, ppd->cpspec->sdmamsgbuf);
1607		dump_sdma_7322_state(ppd);
1608	}
1609
1610	switch (ppd->sdma_state.current_state) {
1611	case qib_sdma_state_s00_hw_down:
1612		break;
1613
1614	case qib_sdma_state_s10_hw_start_up_wait:
1615		if (errs & QIB_E_P_SDMAHALT)
1616			__qib_sdma_process_event(ppd,
1617				qib_sdma_event_e20_hw_started);
1618		break;
1619
1620	case qib_sdma_state_s20_idle:
1621		break;
1622
1623	case qib_sdma_state_s30_sw_clean_up_wait:
1624		break;
1625
1626	case qib_sdma_state_s40_hw_clean_up_wait:
1627		if (errs & QIB_E_P_SDMAHALT)
1628			__qib_sdma_process_event(ppd,
1629				qib_sdma_event_e50_hw_cleaned);
1630		break;
1631
1632	case qib_sdma_state_s50_hw_halt_wait:
1633		if (errs & QIB_E_P_SDMAHALT)
1634			__qib_sdma_process_event(ppd,
1635				qib_sdma_event_e60_hw_halted);
1636		break;
1637
1638	case qib_sdma_state_s99_running:
1639		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1640		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1641		break;
1642	}
1643
1644	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1645}
1646
1647/*
1648 * handle per-device errors (not per-port errors)
1649 */
1650static noinline void handle_7322_errors(struct qib_devdata *dd)
1651{
1652	char *msg;
1653	u64 iserr = 0;
1654	u64 errs;
1655	u64 mask;
1656
1657	qib_stats.sps_errints++;
1658	errs = qib_read_kreg64(dd, kr_errstatus);
1659	if (!errs) {
1660		qib_devinfo(dd->pcidev,
1661			"device error interrupt, but no error bits set!\n");
1662		goto done;
1663	}
1664
1665	/* don't report errors that are masked */
1666	errs &= dd->cspec->errormask;
1667	msg = dd->cspec->emsgbuf;
1668
1669	/* do these first, they are most important */
1670	if (errs & QIB_E_HARDWARE) {
1671		*msg = '\0';
1672		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1673	}
1674
1675	if (errs & QIB_E_SPKTERRS) {
1676		qib_disarm_7322_senderrbufs(dd->pport);
1677		qib_stats.sps_txerrs++;
1678	} else if (errs & QIB_E_INVALIDADDR)
1679		qib_stats.sps_txerrs++;
1680	else if (errs & QIB_E_ARMLAUNCH) {
1681		qib_stats.sps_txerrs++;
1682		qib_disarm_7322_senderrbufs(dd->pport);
1683	}
1684	qib_write_kreg(dd, kr_errclear, errs);
1685
1686	/*
1687	 * The ones we mask off are handled specially below
1688	 * or above.  Also mask SDMADISABLED by default as it
1689	 * is too chatty.
1690	 */
1691	mask = QIB_E_HARDWARE;
1692	*msg = '\0';
1693
1694	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1695		   qib_7322error_msgs);
1696
1697	/*
1698	 * Getting reset is a tragedy for all ports. Mark the device
1699	 * _and_ the ports as "offline" in way meaningful to each.
1700	 */
1701	if (errs & QIB_E_RESET) {
1702		int pidx;
1703
1704		qib_dev_err(dd,
1705			"Got reset, requires re-init (unload and reload driver)\n");
1706		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1707		/* mark as having had error */
1708		*dd->devstatusp |= QIB_STATUS_HWERROR;
1709		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1710			if (dd->pport[pidx].link_speed_supported)
1711				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1712	}
1713
1714	if (*msg && iserr)
1715		qib_dev_err(dd, "%s error\n", msg);
1716
1717	/*
1718	 * If there were hdrq or egrfull errors, wake up any processes
1719	 * waiting in poll.  We used to try to check which contexts had
1720	 * the overflow, but given the cost of that and the chip reads
1721	 * to support it, it's better to just wake everybody up if we
1722	 * get an overflow; waiters can poll again if it's not them.
1723	 */
1724	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1725		qib_handle_urcv(dd, ~0U);
1726		if (errs & ERR_MASK(RcvEgrFullErr))
1727			qib_stats.sps_buffull++;
1728		else
1729			qib_stats.sps_hdrfull++;
1730	}
1731
1732done:
1733	return;
1734}
1735
1736static void qib_error_tasklet(struct tasklet_struct *t)
1737{
1738	struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
1739
1740	handle_7322_errors(dd);
1741	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1742}
1743
1744static void reenable_chase(struct timer_list *t)
1745{
1746	struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1747	struct qib_pportdata *ppd = cp->ppd;
1748
1749	ppd->cpspec->chase_timer.expires = 0;
1750	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1751		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1752}
1753
1754static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1755		u8 ibclt)
1756{
1757	ppd->cpspec->chase_end = 0;
1758
1759	if (!qib_chase)
1760		return;
1761
1762	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1763		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1764	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1765	add_timer(&ppd->cpspec->chase_timer);
1766}
1767
1768static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1769{
1770	u8 ibclt;
1771	unsigned long tnow;
1772
1773	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1774
1775	/*
1776	 * Detect and handle the state chase issue, where we can
1777	 * get stuck if we are unlucky on timing on both sides of
1778	 * the link.   If we are, we disable, set a timer, and
1779	 * then re-enable.
1780	 */
1781	switch (ibclt) {
1782	case IB_7322_LT_STATE_CFGRCVFCFG:
1783	case IB_7322_LT_STATE_CFGWAITRMT:
1784	case IB_7322_LT_STATE_TXREVLANES:
1785	case IB_7322_LT_STATE_CFGENH:
1786		tnow = jiffies;
1787		if (ppd->cpspec->chase_end &&
1788		     time_after(tnow, ppd->cpspec->chase_end))
1789			disable_chase(ppd, tnow, ibclt);
1790		else if (!ppd->cpspec->chase_end)
1791			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1792		break;
1793	default:
1794		ppd->cpspec->chase_end = 0;
1795		break;
1796	}
1797
1798	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1799	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1800	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1801	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1802		force_h1(ppd);
1803		ppd->cpspec->qdr_reforce = 1;
1804		if (!ppd->dd->cspec->r1)
1805			serdes_7322_los_enable(ppd, 0);
1806	} else if (ppd->cpspec->qdr_reforce &&
1807		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1808		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1809		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1810		ibclt == IB_7322_LT_STATE_LINKUP))
1811		force_h1(ppd);
1812
1813	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1814	    ppd->link_speed_enabled == QIB_IB_QDR &&
1815	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1816	     ibclt == IB_7322_LT_STATE_CFGENH ||
1817	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1818	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1819		adj_tx_serdes(ppd);
1820
1821	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1822		u8 ltstate = qib_7322_phys_portstate(ibcst);
1823		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1824					  LinkTrainingState);
1825		if (!ppd->dd->cspec->r1 &&
1826		    pibclt == IB_7322_LT_STATE_LINKUP &&
1827		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1828		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1829		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1830		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1831			/* If the link went down (but no into recovery,
1832			 * turn LOS back on */
1833			serdes_7322_los_enable(ppd, 1);
1834		if (!ppd->cpspec->qdr_dfe_on &&
1835		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1836			ppd->cpspec->qdr_dfe_on = 1;
1837			ppd->cpspec->qdr_dfe_time = 0;
1838			/* On link down, reenable QDR adaptation */
1839			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1840					    ppd->dd->cspec->r1 ?
1841					    QDR_STATIC_ADAPT_DOWN_R1 :
1842					    QDR_STATIC_ADAPT_DOWN);
1843			pr_info(
1844				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1845				ppd->dd->unit, ppd->port, ibclt);
1846		}
1847	}
1848}
1849
1850static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1851
1852/*
1853 * This is per-pport error handling.
1854 * will likely get it's own MSIx interrupt (one for each port,
1855 * although just a single handler).
1856 */
1857static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1858{
1859	char *msg;
1860	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1861	struct qib_devdata *dd = ppd->dd;
1862
1863	/* do this as soon as possible */
1864	fmask = qib_read_kreg64(dd, kr_act_fmask);
1865	if (!fmask)
1866		check_7322_rxe_status(ppd);
1867
1868	errs = qib_read_kreg_port(ppd, krp_errstatus);
1869	if (!errs)
1870		qib_devinfo(dd->pcidev,
1871			 "Port%d error interrupt, but no error bits set!\n",
1872			 ppd->port);
1873	if (!fmask)
1874		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1875	if (!errs)
1876		goto done;
1877
1878	msg = ppd->cpspec->epmsgbuf;
1879	*msg = '\0';
1880
1881	if (errs & ~QIB_E_P_BITSEXTANT) {
1882		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1883			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1884		if (!*msg)
1885			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1886				 "no others");
1887		qib_dev_porterr(dd, ppd->port,
1888			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1889			(errs & ~QIB_E_P_BITSEXTANT), msg);
1890		*msg = '\0';
1891	}
1892
1893	if (errs & QIB_E_P_SHDR) {
1894		u64 symptom;
1895
1896		/* determine cause, then write to clear */
1897		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1898		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1899		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1900			   hdrchk_msgs);
1901		*msg = '\0';
1902		/* senderrbuf cleared in SPKTERRS below */
1903	}
1904
1905	if (errs & QIB_E_P_SPKTERRS) {
1906		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1907		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1908			/*
1909			 * This can happen when trying to bring the link
1910			 * up, but the IB link changes state at the "wrong"
1911			 * time. The IB logic then complains that the packet
1912			 * isn't valid.  We don't want to confuse people, so
1913			 * we just don't print them, except at debug
1914			 */
1915			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1916				   (errs & QIB_E_P_LINK_PKTERRS),
1917				   qib_7322p_error_msgs);
1918			*msg = '\0';
1919			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1920		}
1921		qib_disarm_7322_senderrbufs(ppd);
1922	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1923		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1924		/*
1925		 * This can happen when SMA is trying to bring the link
1926		 * up, but the IB link changes state at the "wrong" time.
1927		 * The IB logic then complains that the packet isn't
1928		 * valid.  We don't want to confuse people, so we just
1929		 * don't print them, except at debug
1930		 */
1931		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1932			   qib_7322p_error_msgs);
1933		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1934		*msg = '\0';
1935	}
1936
1937	qib_write_kreg_port(ppd, krp_errclear, errs);
1938
1939	errs &= ~ignore_this_time;
1940	if (!errs)
1941		goto done;
1942
1943	if (errs & QIB_E_P_RPKTERRS)
1944		qib_stats.sps_rcverrs++;
1945	if (errs & QIB_E_P_SPKTERRS)
1946		qib_stats.sps_txerrs++;
1947
1948	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1949
1950	if (errs & QIB_E_P_SDMAERRS)
1951		sdma_7322_p_errors(ppd, errs);
1952
1953	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1954		u64 ibcs;
1955		u8 ltstate;
1956
1957		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1958		ltstate = qib_7322_phys_portstate(ibcs);
1959
1960		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1961			handle_serdes_issues(ppd, ibcs);
1962		if (!(ppd->cpspec->ibcctrl_a &
1963		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1964			/*
1965			 * We got our interrupt, so init code should be
1966			 * happy and not try alternatives. Now squelch
1967			 * other "chatter" from link-negotiation (pre Init)
1968			 */
1969			ppd->cpspec->ibcctrl_a |=
1970				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1971			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1972					    ppd->cpspec->ibcctrl_a);
1973		}
1974
1975		/* Update our picture of width and speed from chip */
1976		ppd->link_width_active =
1977			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1978			    IB_WIDTH_4X : IB_WIDTH_1X;
1979		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1980			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1981			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1982				   QIB_IB_DDR : QIB_IB_SDR;
1983
1984		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1985		    IB_PHYSPORTSTATE_DISABLED)
1986			qib_set_ib_7322_lstate(ppd, 0,
1987			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1988		else
1989			/*
1990			 * Since going into a recovery state causes the link
1991			 * state to go down and since recovery is transitory,
1992			 * it is better if we "miss" ever seeing the link
1993			 * training state go into recovery (i.e., ignore this
1994			 * transition for link state special handling purposes)
1995			 * without updating lastibcstat.
1996			 */
1997			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1998			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1999			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2000			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2001				qib_handle_e_ibstatuschanged(ppd, ibcs);
2002	}
2003	if (*msg && iserr)
2004		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2005
2006	if (ppd->state_wanted & ppd->lflags)
2007		wake_up_interruptible(&ppd->state_wait);
2008done:
2009	return;
2010}
2011
2012/* enable/disable chip from delivering interrupts */
2013static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2014{
2015	if (enable) {
2016		if (dd->flags & QIB_BADINTR)
2017			return;
2018		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2019		/* cause any pending enabled interrupts to be re-delivered */
2020		qib_write_kreg(dd, kr_intclear, 0ULL);
2021		if (dd->cspec->num_msix_entries) {
2022			/* and same for MSIx */
2023			u64 val = qib_read_kreg64(dd, kr_intgranted);
2024
2025			if (val)
2026				qib_write_kreg(dd, kr_intgranted, val);
2027		}
2028	} else
2029		qib_write_kreg(dd, kr_intmask, 0ULL);
2030}
2031
2032/*
2033 * Try to cleanup as much as possible for anything that might have gone
2034 * wrong while in freeze mode, such as pio buffers being written by user
2035 * processes (causing armlaunch), send errors due to going into freeze mode,
2036 * etc., and try to avoid causing extra interrupts while doing so.
2037 * Forcibly update the in-memory pioavail register copies after cleanup
2038 * because the chip won't do it while in freeze mode (the register values
2039 * themselves are kept correct).
2040 * Make sure that we don't lose any important interrupts by using the chip
2041 * feature that says that writing 0 to a bit in *clear that is set in
2042 * *status will cause an interrupt to be generated again (if allowed by
2043 * the *mask value).
2044 * This is in chip-specific code because of all of the register accesses,
2045 * even though the details are similar on most chips.
2046 */
2047static void qib_7322_clear_freeze(struct qib_devdata *dd)
2048{
2049	int pidx;
2050
2051	/* disable error interrupts, to avoid confusion */
2052	qib_write_kreg(dd, kr_errmask, 0ULL);
2053
2054	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2055		if (dd->pport[pidx].link_speed_supported)
2056			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2057					    0ULL);
2058
2059	/* also disable interrupts; errormask is sometimes overwritten */
2060	qib_7322_set_intr_state(dd, 0);
2061
2062	/* clear the freeze, and be sure chip saw it */
2063	qib_write_kreg(dd, kr_control, dd->control);
2064	qib_read_kreg32(dd, kr_scratch);
2065
2066	/*
2067	 * Force new interrupt if any hwerr, error or interrupt bits are
2068	 * still set, and clear "safe" send packet errors related to freeze
2069	 * and cancelling sends.  Re-enable error interrupts before possible
2070	 * force of re-interrupt on pending interrupts.
2071	 */
2072	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2073	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2074	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2075	/* We need to purge per-port errs and reset mask, too */
2076	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2077		if (!dd->pport[pidx].link_speed_supported)
2078			continue;
2079		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2080		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2081	}
2082	qib_7322_set_intr_state(dd, 1);
2083}
2084
2085/* no error handling to speak of */
2086/**
2087 * qib_7322_handle_hwerrors - display hardware errors.
2088 * @dd: the qlogic_ib device
2089 * @msg: the output buffer
2090 * @msgl: the size of the output buffer
2091 *
2092 * Use same msg buffer as regular errors to avoid excessive stack
2093 * use.  Most hardware errors are catastrophic, but for right now,
2094 * we'll print them and continue.  We reuse the same message buffer as
2095 * qib_handle_errors() to avoid excessive stack usage.
2096 */
2097static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2098				     size_t msgl)
2099{
2100	u64 hwerrs;
2101	u32 ctrl;
2102	int isfatal = 0;
2103
2104	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2105	if (!hwerrs)
2106		goto bail;
2107	if (hwerrs == ~0ULL) {
2108		qib_dev_err(dd,
2109			"Read of hardware error status failed (all bits set); ignoring\n");
2110		goto bail;
2111	}
2112	qib_stats.sps_hwerrs++;
2113
2114	/* Always clear the error status register, except BIST fail */
2115	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2116		       ~HWE_MASK(PowerOnBISTFailed));
2117
2118	hwerrs &= dd->cspec->hwerrmask;
2119
2120	/* no EEPROM logging, yet */
2121
2122	if (hwerrs)
2123		qib_devinfo(dd->pcidev,
2124			"Hardware error: hwerr=0x%llx (cleared)\n",
2125			(unsigned long long) hwerrs);
2126
2127	ctrl = qib_read_kreg32(dd, kr_control);
2128	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2129		/*
2130		 * No recovery yet...
2131		 */
2132		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2133		    dd->cspec->stay_in_freeze) {
2134			/*
2135			 * If any set that we aren't ignoring only make the
2136			 * complaint once, in case it's stuck or recurring,
2137			 * and we get here multiple times
2138			 * Force link down, so switch knows, and
2139			 * LEDs are turned off.
2140			 */
2141			if (dd->flags & QIB_INITTED)
2142				isfatal = 1;
2143		} else
2144			qib_7322_clear_freeze(dd);
2145	}
2146
2147	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2148		isfatal = 1;
2149		strlcpy(msg,
2150			"[Memory BIST test failed, InfiniPath hardware unusable]",
2151			msgl);
2152		/* ignore from now on, so disable until driver reloaded */
2153		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2154		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2155	}
2156
2157	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2158
2159	/* Ignore esoteric PLL failures et al. */
2160
2161	qib_dev_err(dd, "%s hardware error\n", msg);
2162
2163	if (hwerrs &
2164		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2165		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2166		int pidx = 0;
2167		int err;
2168		unsigned long flags;
2169		struct qib_pportdata *ppd = dd->pport;
2170
2171		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2172			err = 0;
2173			if (pidx == 0 && (hwerrs &
2174				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2175				err++;
2176			if (pidx == 1 && (hwerrs &
2177				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2178				err++;
2179			if (err) {
2180				spin_lock_irqsave(&ppd->sdma_lock, flags);
2181				dump_sdma_7322_state(ppd);
2182				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2183			}
2184		}
2185	}
2186
2187	if (isfatal && !dd->diag_client) {
2188		qib_dev_err(dd,
2189			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2190			dd->serial);
2191		/*
2192		 * for /sys status file and user programs to print; if no
2193		 * trailing brace is copied, we'll know it was truncated.
2194		 */
2195		if (dd->freezemsg)
2196			snprintf(dd->freezemsg, dd->freezelen,
2197				 "{%s}", msg);
2198		qib_disable_after_error(dd);
2199	}
2200bail:;
2201}
2202
2203/**
2204 * qib_7322_init_hwerrors - enable hardware errors
2205 * @dd: the qlogic_ib device
2206 *
2207 * now that we have finished initializing everything that might reasonably
2208 * cause a hardware error, and cleared those errors bits as they occur,
2209 * we can enable hardware errors in the mask (potentially enabling
2210 * freeze mode), and enable hardware errors as errors (along with
2211 * everything else) in errormask
2212 */
2213static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2214{
2215	int pidx;
2216	u64 extsval;
2217
2218	extsval = qib_read_kreg64(dd, kr_extstatus);
2219	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2220			 QIB_EXTS_MEMBIST_ENDTEST)))
2221		qib_dev_err(dd, "MemBIST did not complete!\n");
2222
2223	/* never clear BIST failure, so reported on each driver load */
2224	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2225	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2226
2227	/* clear all */
2228	qib_write_kreg(dd, kr_errclear, ~0ULL);
2229	/* enable errors that are masked, at least this first time. */
2230	qib_write_kreg(dd, kr_errmask, ~0ULL);
2231	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2232	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2233		if (dd->pport[pidx].link_speed_supported)
2234			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2235					    ~0ULL);
2236}
2237
2238/*
2239 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2240 * on chips that are count-based, rather than trigger-based.  There is no
2241 * reference counting, but that's also fine, given the intended use.
2242 * Only chip-specific because it's all register accesses
2243 */
2244static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2245{
2246	if (enable) {
2247		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2248		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2249	} else
2250		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2251	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2252}
2253
2254/*
2255 * Formerly took parameter <which> in pre-shifted,
2256 * pre-merged form with LinkCmd and LinkInitCmd
2257 * together, and assuming the zero was NOP.
2258 */
2259static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2260				   u16 linitcmd)
2261{
2262	u64 mod_wd;
2263	struct qib_devdata *dd = ppd->dd;
2264	unsigned long flags;
2265
2266	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2267		/*
2268		 * If we are told to disable, note that so link-recovery
2269		 * code does not attempt to bring us back up.
2270		 * Also reset everything that we can, so we start
2271		 * completely clean when re-enabled (before we
2272		 * actually issue the disable to the IBC)
2273		 */
2274		qib_7322_mini_pcs_reset(ppd);
2275		spin_lock_irqsave(&ppd->lflags_lock, flags);
2276		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2277		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2278	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2279		/*
2280		 * Any other linkinitcmd will lead to LINKDOWN and then
2281		 * to INIT (if all is well), so clear flag to let
2282		 * link-recovery code attempt to bring us back up.
2283		 */
2284		spin_lock_irqsave(&ppd->lflags_lock, flags);
2285		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2286		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2287		/*
2288		 * Clear status change interrupt reduction so the
2289		 * new state is seen.
2290		 */
2291		ppd->cpspec->ibcctrl_a &=
2292			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2293	}
2294
2295	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2296		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2297
2298	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2299			    mod_wd);
2300	/* write to chip to prevent back-to-back writes of ibc reg */
2301	qib_write_kreg(dd, kr_scratch, 0);
2302
2303}
2304
2305/*
2306 * The total RCV buffer memory is 64KB, used for both ports, and is
2307 * in units of 64 bytes (same as IB flow control credit unit).
2308 * The consumedVL unit in the same registers are in 32 byte units!
2309 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2310 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2311 * in krp_rxcreditvl15, rather than 10.
2312 */
2313#define RCV_BUF_UNITSZ 64
2314#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2315
2316static void set_vls(struct qib_pportdata *ppd)
2317{
2318	int i, numvls, totcred, cred_vl, vl0extra;
2319	struct qib_devdata *dd = ppd->dd;
2320	u64 val;
2321
2322	numvls = qib_num_vls(ppd->vls_operational);
2323
2324	/*
2325	 * Set up per-VL credits. Below is kluge based on these assumptions:
2326	 * 1) port is disabled at the time early_init is called.
2327	 * 2) give VL15 17 credits, for two max-plausible packets.
2328	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2329	 */
2330	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2331	totcred = NUM_RCV_BUF_UNITS(dd);
2332	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2333	totcred -= cred_vl;
2334	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2335	cred_vl = totcred / numvls;
2336	vl0extra = totcred - cred_vl * numvls;
2337	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2338	for (i = 1; i < numvls; i++)
2339		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2340	for (; i < 8; i++) /* no buffer space for other VLs */
2341		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2342
2343	/* Notify IBC that credits need to be recalculated */
2344	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2345	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2346	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2347	qib_write_kreg(dd, kr_scratch, 0ULL);
2348	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2349	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2350
2351	for (i = 0; i < numvls; i++)
2352		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2353	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2354
2355	/* Change the number of operational VLs */
2356	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2357				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2358		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2359	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2360	qib_write_kreg(dd, kr_scratch, 0ULL);
2361}
2362
2363/*
2364 * The code that deals with actual SerDes is in serdes_7322_init().
2365 * Compared to the code for iba7220, it is minimal.
2366 */
2367static int serdes_7322_init(struct qib_pportdata *ppd);
2368
2369/**
2370 * qib_7322_bringup_serdes - bring up the serdes
2371 * @ppd: physical port on the qlogic_ib device
2372 */
2373static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2374{
2375	struct qib_devdata *dd = ppd->dd;
2376	u64 val, guid, ibc;
2377	unsigned long flags;
2378
2379	/*
2380	 * SerDes model not in Pd, but still need to
2381	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2382	 * eventually.
2383	 */
2384	/* Put IBC in reset, sends disabled (should be in reset already) */
2385	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2387	qib_write_kreg(dd, kr_scratch, 0ULL);
2388
2389	/* ensure previous Tx parameters are not still forced */
2390	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2391		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2392		reset_tx_deemphasis_override));
2393
2394	if (qib_compat_ddr_negotiate) {
2395		ppd->cpspec->ibdeltainprog = 1;
2396		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2397						crp_ibsymbolerr);
2398		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2399						crp_iblinkerrrecov);
2400	}
2401
2402	/* flowcontrolwatermark is in units of KBytes */
2403	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2404	/*
2405	 * Flow control is sent this often, even if no changes in
2406	 * buffer space occur.  Units are 128ns for this chip.
2407	 * Set to 3usec.
2408	 */
2409	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2410	/* max error tolerance */
2411	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2412	/* IB credit flow control. */
2413	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2414	/*
2415	 * set initial max size pkt IBC will send, including ICRC; it's the
2416	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2417	 */
2418	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2419		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2420	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2421
2422	/*
2423	 * Reset the PCS interface to the serdes (and also ibc, which is still
2424	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2425	 */
2426	qib_7322_mini_pcs_reset(ppd);
2427
2428	if (!ppd->cpspec->ibcctrl_b) {
2429		unsigned lse = ppd->link_speed_enabled;
2430
2431		/*
2432		 * Not on re-init after reset, establish shadow
2433		 * and force initial config.
2434		 */
2435		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2436							     krp_ibcctrl_b);
2437		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2438				IBA7322_IBC_SPEED_DDR |
2439				IBA7322_IBC_SPEED_SDR |
2440				IBA7322_IBC_WIDTH_AUTONEG |
2441				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2442		if (lse & (lse - 1)) /* Muliple speeds enabled */
2443			ppd->cpspec->ibcctrl_b |=
2444				(lse << IBA7322_IBC_SPEED_LSB) |
2445				IBA7322_IBC_IBTA_1_2_MASK |
2446				IBA7322_IBC_MAX_SPEED_MASK;
2447		else
2448			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2449				IBA7322_IBC_SPEED_QDR |
2450				 IBA7322_IBC_IBTA_1_2_MASK :
2451				(lse == QIB_IB_DDR) ?
2452					IBA7322_IBC_SPEED_DDR :
2453					IBA7322_IBC_SPEED_SDR;
2454		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2455		    (IB_WIDTH_1X | IB_WIDTH_4X))
2456			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2457		else
2458			ppd->cpspec->ibcctrl_b |=
2459				ppd->link_width_enabled == IB_WIDTH_4X ?
2460				IBA7322_IBC_WIDTH_4X_ONLY :
2461				IBA7322_IBC_WIDTH_1X_ONLY;
2462
2463		/* always enable these on driver reload, not sticky */
2464		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2465			IBA7322_IBC_HRTBT_MASK);
2466	}
2467	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2468
2469	/* setup so we have more time at CFGTEST to change H1 */
2470	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2471	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2472	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2473	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2474
2475	serdes_7322_init(ppd);
2476
2477	guid = be64_to_cpu(ppd->guid);
2478	if (!guid) {
2479		if (dd->base_guid)
2480			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2481		ppd->guid = cpu_to_be64(guid);
2482	}
2483
2484	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2485	/* write to chip to prevent back-to-back writes of ibc reg */
2486	qib_write_kreg(dd, kr_scratch, 0);
2487
2488	/* Enable port */
2489	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2490	set_vls(ppd);
2491
2492	/* initially come up DISABLED, without sending anything. */
2493	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2494					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2495	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2496	qib_write_kreg(dd, kr_scratch, 0ULL);
2497	/* clear the linkinit cmds */
2498	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2499
2500	/* be paranoid against later code motion, etc. */
2501	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2502	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2503	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2504	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2505
2506	/* Also enable IBSTATUSCHG interrupt.  */
2507	val = qib_read_kreg_port(ppd, krp_errmask);
2508	qib_write_kreg_port(ppd, krp_errmask,
2509		val | ERR_MASK_N(IBStatusChanged));
2510
2511	/* Always zero until we start messing with SerDes for real */
2512	return 0;
2513}
2514
2515/**
2516 * qib_7322_quiet_serdes - set serdes to txidle
2517 * @dd: the qlogic_ib device
2518 * Called when driver is being unloaded
2519 */
2520static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2521{
2522	u64 val;
2523	unsigned long flags;
2524
2525	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2526
2527	spin_lock_irqsave(&ppd->lflags_lock, flags);
2528	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2529	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2530	wake_up(&ppd->cpspec->autoneg_wait);
2531	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2532	if (ppd->dd->cspec->r1)
2533		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2534
2535	ppd->cpspec->chase_end = 0;
2536	if (ppd->cpspec->chase_timer.function) /* if initted */
2537		del_timer_sync(&ppd->cpspec->chase_timer);
2538
2539	/*
2540	 * Despite the name, actually disables IBC as well. Do it when
2541	 * we are as sure as possible that no more packets can be
2542	 * received, following the down and the PCS reset.
2543	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2544	 * along with the PCS being reset.
2545	 */
2546	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2547	qib_7322_mini_pcs_reset(ppd);
2548
2549	/*
2550	 * Update the adjusted counters so the adjustment persists
2551	 * across driver reload.
2552	 */
2553	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2554	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2555		struct qib_devdata *dd = ppd->dd;
2556		u64 diagc;
2557
2558		/* enable counter writes */
2559		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2560		qib_write_kreg(dd, kr_hwdiagctrl,
2561			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2562
2563		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2564			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2565			if (ppd->cpspec->ibdeltainprog)
2566				val -= val - ppd->cpspec->ibsymsnap;
2567			val -= ppd->cpspec->ibsymdelta;
2568			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2569		}
2570		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2571			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2572			if (ppd->cpspec->ibdeltainprog)
2573				val -= val - ppd->cpspec->iblnkerrsnap;
2574			val -= ppd->cpspec->iblnkerrdelta;
2575			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2576		}
2577		if (ppd->cpspec->iblnkdowndelta) {
2578			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2579			val += ppd->cpspec->iblnkdowndelta;
2580			write_7322_creg_port(ppd, crp_iblinkdown, val);
2581		}
2582		/*
2583		 * No need to save ibmalfdelta since IB perfcounters
2584		 * are cleared on driver reload.
2585		 */
2586
2587		/* and disable counter writes */
2588		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2589	}
2590}
2591
2592/**
2593 * qib_setup_7322_setextled - set the state of the two external LEDs
2594 * @ppd: physical port on the qlogic_ib device
2595 * @on: whether the link is up or not
2596 *
2597 * The exact combo of LEDs if on is true is determined by looking
2598 * at the ibcstatus.
2599 *
2600 * These LEDs indicate the physical and logical state of IB link.
2601 * For this chip (at least with recommended board pinouts), LED1
2602 * is Yellow (logical state) and LED2 is Green (physical state),
2603 *
2604 * Note:  We try to match the Mellanox HCA LED behavior as best
2605 * we can.  Green indicates physical link state is OK (something is
2606 * plugged in, and we can train).
2607 * Amber indicates the link is logically up (ACTIVE).
2608 * Mellanox further blinks the amber LED to indicate data packet
2609 * activity, but we have no hardware support for that, so it would
2610 * require waking up every 10-20 msecs and checking the counters
2611 * on the chip, and then turning the LED off if appropriate.  That's
2612 * visible overhead, so not something we will do.
2613 */
2614static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2615{
2616	struct qib_devdata *dd = ppd->dd;
2617	u64 extctl, ledblink = 0, val;
2618	unsigned long flags;
2619	int yel, grn;
2620
2621	/*
2622	 * The diags use the LED to indicate diag info, so we leave
2623	 * the external LED alone when the diags are running.
2624	 */
2625	if (dd->diag_client)
2626		return;
2627
2628	/* Allow override of LED display for, e.g. Locating system in rack */
2629	if (ppd->led_override) {
2630		grn = (ppd->led_override & QIB_LED_PHYS);
2631		yel = (ppd->led_override & QIB_LED_LOG);
2632	} else if (on) {
2633		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2634		grn = qib_7322_phys_portstate(val) ==
2635			IB_PHYSPORTSTATE_LINKUP;
2636		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2637	} else {
2638		grn = 0;
2639		yel = 0;
2640	}
2641
2642	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2643	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2644		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2645	if (grn) {
2646		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2647		/*
2648		 * Counts are in chip clock (4ns) periods.
2649		 * This is 1/16 sec (66.6ms) on,
2650		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2651		 */
2652		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2653			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2654	}
2655	if (yel)
2656		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2657	dd->cspec->extctrl = extctl;
2658	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2659	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2660
2661	if (ledblink) /* blink the LED on packet receive */
2662		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2663}
2664
2665#ifdef CONFIG_INFINIBAND_QIB_DCA
2666
2667static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2668{
2669	switch (event) {
2670	case DCA_PROVIDER_ADD:
2671		if (dd->flags & QIB_DCA_ENABLED)
2672			break;
2673		if (!dca_add_requester(&dd->pcidev->dev)) {
2674			qib_devinfo(dd->pcidev, "DCA enabled\n");
2675			dd->flags |= QIB_DCA_ENABLED;
2676			qib_setup_dca(dd);
2677		}
2678		break;
2679	case DCA_PROVIDER_REMOVE:
2680		if (dd->flags & QIB_DCA_ENABLED) {
2681			dca_remove_requester(&dd->pcidev->dev);
2682			dd->flags &= ~QIB_DCA_ENABLED;
2683			dd->cspec->dca_ctrl = 0;
2684			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2685				dd->cspec->dca_ctrl);
2686		}
2687		break;
2688	}
2689	return 0;
2690}
2691
2692static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2693{
2694	struct qib_devdata *dd = rcd->dd;
2695	struct qib_chip_specific *cspec = dd->cspec;
2696
2697	if (!(dd->flags & QIB_DCA_ENABLED))
2698		return;
2699	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2700		const struct dca_reg_map *rmp;
2701
2702		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2703		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2704		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2705		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2706			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2707		qib_devinfo(dd->pcidev,
2708			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2709			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2710		qib_write_kreg(dd, rmp->regno,
2711			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2712		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2713		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2714	}
2715}
2716
2717static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2718{
2719	struct qib_devdata *dd = ppd->dd;
2720	struct qib_chip_specific *cspec = dd->cspec;
2721	unsigned pidx = ppd->port - 1;
2722
2723	if (!(dd->flags & QIB_DCA_ENABLED))
2724		return;
2725	if (cspec->sdma_cpu[pidx] != cpu) {
2726		cspec->sdma_cpu[pidx] = cpu;
2727		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2728			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2729			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2730		cspec->dca_rcvhdr_ctrl[4] |=
2731			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2732				(ppd->hw_pidx ?
2733					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2734					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2735		qib_devinfo(dd->pcidev,
2736			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2737			(long long) cspec->dca_rcvhdr_ctrl[4]);
2738		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2739			       cspec->dca_rcvhdr_ctrl[4]);
2740		cspec->dca_ctrl |= ppd->hw_pidx ?
2741			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2742			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2743		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2744	}
2745}
2746
2747static void qib_setup_dca(struct qib_devdata *dd)
2748{
2749	struct qib_chip_specific *cspec = dd->cspec;
2750	int i;
2751
2752	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2753		cspec->rhdr_cpu[i] = -1;
2754	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2755		cspec->sdma_cpu[i] = -1;
2756	cspec->dca_rcvhdr_ctrl[0] =
2757		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2758		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2759		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2760		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2761	cspec->dca_rcvhdr_ctrl[1] =
2762		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2763		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2764		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2765		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2766	cspec->dca_rcvhdr_ctrl[2] =
2767		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2768		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2769		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2770		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2771	cspec->dca_rcvhdr_ctrl[3] =
2772		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2773		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2774		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2775		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2776	cspec->dca_rcvhdr_ctrl[4] =
2777		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2778		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2779	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2780		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2781			       cspec->dca_rcvhdr_ctrl[i]);
2782	for (i = 0; i < cspec->num_msix_entries; i++)
2783		setup_dca_notifier(dd, i);
2784}
2785
2786static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2787			     const cpumask_t *mask)
2788{
2789	struct qib_irq_notify *n =
2790		container_of(notify, struct qib_irq_notify, notify);
2791	int cpu = cpumask_first(mask);
2792
2793	if (n->rcv) {
2794		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2795
2796		qib_update_rhdrq_dca(rcd, cpu);
2797	} else {
2798		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2799
2800		qib_update_sdma_dca(ppd, cpu);
2801	}
2802}
2803
2804static void qib_irq_notifier_release(struct kref *ref)
2805{
2806	struct qib_irq_notify *n =
2807		container_of(ref, struct qib_irq_notify, notify.kref);
2808	struct qib_devdata *dd;
2809
2810	if (n->rcv) {
2811		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2812
2813		dd = rcd->dd;
2814	} else {
2815		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2816
2817		dd = ppd->dd;
2818	}
2819	qib_devinfo(dd->pcidev,
2820		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2821	kfree(n);
2822}
2823#endif
2824
2825static void qib_7322_free_irq(struct qib_devdata *dd)
2826{
2827	u64 intgranted;
2828	int i;
2829
2830	dd->cspec->main_int_mask = ~0ULL;
2831
2832	for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2833		/* only free IRQs that were allocated */
2834		if (dd->cspec->msix_entries[i].arg) {
2835#ifdef CONFIG_INFINIBAND_QIB_DCA
2836			reset_dca_notifier(dd, i);
2837#endif
2838			irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2839					      NULL);
2840			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2841			pci_free_irq(dd->pcidev, i,
2842				     dd->cspec->msix_entries[i].arg);
2843		}
2844	}
2845
2846	/* If num_msix_entries was 0, disable the INTx IRQ */
2847	if (!dd->cspec->num_msix_entries)
2848		pci_free_irq(dd->pcidev, 0, dd);
2849	else
2850		dd->cspec->num_msix_entries = 0;
2851
2852	pci_free_irq_vectors(dd->pcidev);
2853
2854	/* make sure no MSIx interrupts are left pending */
2855	intgranted = qib_read_kreg64(dd, kr_intgranted);
2856	if (intgranted)
2857		qib_write_kreg(dd, kr_intgranted, intgranted);
2858}
2859
2860static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2861{
2862	int i;
2863
2864#ifdef CONFIG_INFINIBAND_QIB_DCA
2865	if (dd->flags & QIB_DCA_ENABLED) {
2866		dca_remove_requester(&dd->pcidev->dev);
2867		dd->flags &= ~QIB_DCA_ENABLED;
2868		dd->cspec->dca_ctrl = 0;
2869		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2870	}
2871#endif
2872
2873	qib_7322_free_irq(dd);
2874	kfree(dd->cspec->cntrs);
2875	kfree(dd->cspec->sendchkenable);
2876	kfree(dd->cspec->sendgrhchk);
2877	kfree(dd->cspec->sendibchk);
2878	kfree(dd->cspec->msix_entries);
2879	for (i = 0; i < dd->num_pports; i++) {
2880		unsigned long flags;
2881		u32 mask = QSFP_GPIO_MOD_PRS_N |
2882			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2883
2884		kfree(dd->pport[i].cpspec->portcntrs);
2885		if (dd->flags & QIB_HAS_QSFP) {
2886			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2887			dd->cspec->gpio_mask &= ~mask;
2888			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2889			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2890		}
2891	}
2892}
2893
2894/* handle SDMA interrupts */
2895static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2896{
2897	struct qib_pportdata *ppd0 = &dd->pport[0];
2898	struct qib_pportdata *ppd1 = &dd->pport[1];
2899	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2900		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2901	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2902		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2903
2904	if (intr0)
2905		qib_sdma_intr(ppd0);
2906	if (intr1)
2907		qib_sdma_intr(ppd1);
2908
2909	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2910		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2911	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2912		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2913}
2914
2915/*
2916 * Set or clear the Send buffer available interrupt enable bit.
2917 */
2918static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2919{
2920	unsigned long flags;
2921
2922	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2923	if (needint)
2924		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2925	else
2926		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2927	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2928	qib_write_kreg(dd, kr_scratch, 0ULL);
2929	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2930}
2931
2932/*
2933 * Somehow got an interrupt with reserved bits set in interrupt status.
2934 * Print a message so we know it happened, then clear them.
2935 * keep mainline interrupt handler cache-friendly
2936 */
2937static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2938{
2939	u64 kills;
2940	char msg[128];
2941
2942	kills = istat & ~QIB_I_BITSEXTANT;
2943	qib_dev_err(dd,
2944		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2945		(unsigned long long) kills, msg);
2946	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2947}
2948
2949/* keep mainline interrupt handler cache-friendly */
2950static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2951{
2952	u32 gpiostatus;
2953	int handled = 0;
2954	int pidx;
2955
2956	/*
2957	 * Boards for this chip currently don't use GPIO interrupts,
2958	 * so clear by writing GPIOstatus to GPIOclear, and complain
2959	 * to developer.  To avoid endless repeats, clear
2960	 * the bits in the mask, since there is some kind of
2961	 * programming error or chip problem.
2962	 */
2963	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2964	/*
2965	 * In theory, writing GPIOstatus to GPIOclear could
2966	 * have a bad side-effect on some diagnostic that wanted
2967	 * to poll for a status-change, but the various shadows
2968	 * make that problematic at best. Diags will just suppress
2969	 * all GPIO interrupts during such tests.
2970	 */
2971	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2972	/*
2973	 * Check for QSFP MOD_PRS changes
2974	 * only works for single port if IB1 != pidx1
2975	 */
2976	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2977	     ++pidx) {
2978		struct qib_pportdata *ppd;
2979		struct qib_qsfp_data *qd;
2980		u32 mask;
2981
2982		if (!dd->pport[pidx].link_speed_supported)
2983			continue;
2984		mask = QSFP_GPIO_MOD_PRS_N;
2985		ppd = dd->pport + pidx;
2986		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2987		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2988			u64 pins;
2989
2990			qd = &ppd->cpspec->qsfp_data;
2991			gpiostatus &= ~mask;
2992			pins = qib_read_kreg64(dd, kr_extstatus);
2993			pins >>= SYM_LSB(EXTStatus, GPIOIn);
2994			if (!(pins & mask)) {
2995				++handled;
2996				qd->t_insert = jiffies;
2997				queue_work(ib_wq, &qd->work);
2998			}
2999		}
3000	}
3001
3002	if (gpiostatus && !handled) {
3003		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3004		u32 gpio_irq = mask & gpiostatus;
3005
3006		/*
3007		 * Clear any troublemakers, and update chip from shadow
3008		 */
3009		dd->cspec->gpio_mask &= ~gpio_irq;
3010		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3011	}
3012}
3013
3014/*
3015 * Handle errors and unusual events first, separate function
3016 * to improve cache hits for fast path interrupt handling.
3017 */
3018static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3019{
3020	if (istat & ~QIB_I_BITSEXTANT)
3021		unknown_7322_ibits(dd, istat);
3022	if (istat & QIB_I_GPIO)
3023		unknown_7322_gpio_intr(dd);
3024	if (istat & QIB_I_C_ERROR) {
3025		qib_write_kreg(dd, kr_errmask, 0ULL);
3026		tasklet_schedule(&dd->error_tasklet);
3027	}
3028	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3029		handle_7322_p_errors(dd->rcd[0]->ppd);
3030	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3031		handle_7322_p_errors(dd->rcd[1]->ppd);
3032}
3033
3034/*
3035 * Dynamically adjust the rcv int timeout for a context based on incoming
3036 * packet rate.
3037 */
3038static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3039{
3040	struct qib_devdata *dd = rcd->dd;
3041	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3042
3043	/*
3044	 * Dynamically adjust idle timeout on chip
3045	 * based on number of packets processed.
3046	 */
3047	if (npkts < rcv_int_count && timeout > 2)
3048		timeout >>= 1;
3049	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3050		timeout = min(timeout << 1, rcv_int_timeout);
3051	else
3052		return;
3053
3054	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3055	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3056}
3057
3058/*
3059 * This is the main interrupt handler.
3060 * It will normally only be used for low frequency interrupts but may
3061 * have to handle all interrupts if INTx is enabled or fewer than normal
3062 * MSIx interrupts were allocated.
3063 * This routine should ignore the interrupt bits for any of the
3064 * dedicated MSIx handlers.
3065 */
3066static irqreturn_t qib_7322intr(int irq, void *data)
3067{
3068	struct qib_devdata *dd = data;
3069	irqreturn_t ret;
3070	u64 istat;
3071	u64 ctxtrbits;
3072	u64 rmask;
3073	unsigned i;
3074	u32 npkts;
3075
3076	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3077		/*
3078		 * This return value is not great, but we do not want the
3079		 * interrupt core code to remove our interrupt handler
3080		 * because we don't appear to be handling an interrupt
3081		 * during a chip reset.
3082		 */
3083		ret = IRQ_HANDLED;
3084		goto bail;
3085	}
3086
3087	istat = qib_read_kreg64(dd, kr_intstatus);
3088
3089	if (unlikely(istat == ~0ULL)) {
3090		qib_bad_intrstatus(dd);
3091		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3092		/* don't know if it was our interrupt or not */
3093		ret = IRQ_NONE;
3094		goto bail;
3095	}
3096
3097	istat &= dd->cspec->main_int_mask;
3098	if (unlikely(!istat)) {
3099		/* already handled, or shared and not us */
3100		ret = IRQ_NONE;
3101		goto bail;
3102	}
3103
3104	this_cpu_inc(*dd->int_counter);
3105
3106	/* handle "errors" of various kinds first, device ahead of port */
3107	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3108			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3109			      INT_MASK_P(Err, 1))))
3110		unlikely_7322_intr(dd, istat);
3111
3112	/*
3113	 * Clear the interrupt bits we found set, relatively early, so we
3114	 * "know" know the chip will have seen this by the time we process
3115	 * the queue, and will re-interrupt if necessary.  The processor
3116	 * itself won't take the interrupt again until we return.
3117	 */
3118	qib_write_kreg(dd, kr_intclear, istat);
3119
3120	/*
3121	 * Handle kernel receive queues before checking for pio buffers
3122	 * available since receives can overflow; piobuf waiters can afford
3123	 * a few extra cycles, since they were waiting anyway.
3124	 */
3125	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3126	if (ctxtrbits) {
3127		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3128			(1ULL << QIB_I_RCVURG_LSB);
3129		for (i = 0; i < dd->first_user_ctxt; i++) {
3130			if (ctxtrbits & rmask) {
3131				ctxtrbits &= ~rmask;
3132				if (dd->rcd[i])
3133					qib_kreceive(dd->rcd[i], NULL, &npkts);
3134			}
3135			rmask <<= 1;
3136		}
3137		if (ctxtrbits) {
3138			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3139				(ctxtrbits >> QIB_I_RCVURG_LSB);
3140			qib_handle_urcv(dd, ctxtrbits);
3141		}
3142	}
3143
3144	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3145		sdma_7322_intr(dd, istat);
3146
3147	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3148		qib_ib_piobufavail(dd);
3149
3150	ret = IRQ_HANDLED;
3151bail:
3152	return ret;
3153}
3154
3155/*
3156 * Dedicated receive packet available interrupt handler.
3157 */
3158static irqreturn_t qib_7322pintr(int irq, void *data)
3159{
3160	struct qib_ctxtdata *rcd = data;
3161	struct qib_devdata *dd = rcd->dd;
3162	u32 npkts;
3163
3164	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3165		/*
3166		 * This return value is not great, but we do not want the
3167		 * interrupt core code to remove our interrupt handler
3168		 * because we don't appear to be handling an interrupt
3169		 * during a chip reset.
3170		 */
3171		return IRQ_HANDLED;
3172
3173	this_cpu_inc(*dd->int_counter);
3174
3175	/* Clear the interrupt bit we expect to be set. */
3176	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3177		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3178
3179	qib_kreceive(rcd, NULL, &npkts);
3180
3181	return IRQ_HANDLED;
3182}
3183
3184/*
3185 * Dedicated Send buffer available interrupt handler.
3186 */
3187static irqreturn_t qib_7322bufavail(int irq, void *data)
3188{
3189	struct qib_devdata *dd = data;
3190
3191	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3192		/*
3193		 * This return value is not great, but we do not want the
3194		 * interrupt core code to remove our interrupt handler
3195		 * because we don't appear to be handling an interrupt
3196		 * during a chip reset.
3197		 */
3198		return IRQ_HANDLED;
3199
3200	this_cpu_inc(*dd->int_counter);
3201
3202	/* Clear the interrupt bit we expect to be set. */
3203	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3204
3205	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3206	if (dd->flags & QIB_INITTED)
3207		qib_ib_piobufavail(dd);
3208	else
3209		qib_wantpiobuf_7322_intr(dd, 0);
3210
3211	return IRQ_HANDLED;
3212}
3213
3214/*
3215 * Dedicated Send DMA interrupt handler.
3216 */
3217static irqreturn_t sdma_intr(int irq, void *data)
3218{
3219	struct qib_pportdata *ppd = data;
3220	struct qib_devdata *dd = ppd->dd;
3221
3222	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3223		/*
3224		 * This return value is not great, but we do not want the
3225		 * interrupt core code to remove our interrupt handler
3226		 * because we don't appear to be handling an interrupt
3227		 * during a chip reset.
3228		 */
3229		return IRQ_HANDLED;
3230
3231	this_cpu_inc(*dd->int_counter);
3232
3233	/* Clear the interrupt bit we expect to be set. */
3234	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3235		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3236	qib_sdma_intr(ppd);
3237
3238	return IRQ_HANDLED;
3239}
3240
3241/*
3242 * Dedicated Send DMA idle interrupt handler.
3243 */
3244static irqreturn_t sdma_idle_intr(int irq, void *data)
3245{
3246	struct qib_pportdata *ppd = data;
3247	struct qib_devdata *dd = ppd->dd;
3248
3249	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3250		/*
3251		 * This return value is not great, but we do not want the
3252		 * interrupt core code to remove our interrupt handler
3253		 * because we don't appear to be handling an interrupt
3254		 * during a chip reset.
3255		 */
3256		return IRQ_HANDLED;
3257
3258	this_cpu_inc(*dd->int_counter);
3259
3260	/* Clear the interrupt bit we expect to be set. */
3261	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3262		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3263	qib_sdma_intr(ppd);
3264
3265	return IRQ_HANDLED;
3266}
3267
3268/*
3269 * Dedicated Send DMA progress interrupt handler.
3270 */
3271static irqreturn_t sdma_progress_intr(int irq, void *data)
3272{
3273	struct qib_pportdata *ppd = data;
3274	struct qib_devdata *dd = ppd->dd;
3275
3276	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3277		/*
3278		 * This return value is not great, but we do not want the
3279		 * interrupt core code to remove our interrupt handler
3280		 * because we don't appear to be handling an interrupt
3281		 * during a chip reset.
3282		 */
3283		return IRQ_HANDLED;
3284
3285	this_cpu_inc(*dd->int_counter);
3286
3287	/* Clear the interrupt bit we expect to be set. */
3288	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3289		       INT_MASK_P(SDmaProgress, 1) :
3290		       INT_MASK_P(SDmaProgress, 0));
3291	qib_sdma_intr(ppd);
3292
3293	return IRQ_HANDLED;
3294}
3295
3296/*
3297 * Dedicated Send DMA cleanup interrupt handler.
3298 */
3299static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3300{
3301	struct qib_pportdata *ppd = data;
3302	struct qib_devdata *dd = ppd->dd;
3303
3304	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3305		/*
3306		 * This return value is not great, but we do not want the
3307		 * interrupt core code to remove our interrupt handler
3308		 * because we don't appear to be handling an interrupt
3309		 * during a chip reset.
3310		 */
3311		return IRQ_HANDLED;
3312
3313	this_cpu_inc(*dd->int_counter);
3314
3315	/* Clear the interrupt bit we expect to be set. */
3316	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3317		       INT_MASK_PM(SDmaCleanupDone, 1) :
3318		       INT_MASK_PM(SDmaCleanupDone, 0));
3319	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3320
3321	return IRQ_HANDLED;
3322}
3323
3324#ifdef CONFIG_INFINIBAND_QIB_DCA
3325
3326static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3327{
3328	if (!dd->cspec->msix_entries[msixnum].dca)
3329		return;
3330
3331	qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3332		    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3333	irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3334	dd->cspec->msix_entries[msixnum].notifier = NULL;
3335}
3336
3337static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3338{
3339	struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3340	struct qib_irq_notify *n;
3341
3342	if (!m->dca)
3343		return;
3344	n = kzalloc(sizeof(*n), GFP_KERNEL);
3345	if (n) {
3346		int ret;
3347
3348		m->notifier = n;
3349		n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3350		n->notify.notify = qib_irq_notifier_notify;
3351		n->notify.release = qib_irq_notifier_release;
3352		n->arg = m->arg;
3353		n->rcv = m->rcv;
3354		qib_devinfo(dd->pcidev,
3355			"set notifier irq %d rcv %d notify %p\n",
3356			n->notify.irq, n->rcv, &n->notify);
3357		ret = irq_set_affinity_notifier(
3358				n->notify.irq,
3359				&n->notify);
3360		if (ret) {
3361			m->notifier = NULL;
3362			kfree(n);
3363		}
3364	}
3365}
3366
3367#endif
3368
3369/*
3370 * Set up our chip-specific interrupt handler.
3371 * The interrupt type has already been setup, so
3372 * we just need to do the registration and error checking.
3373 * If we are using MSIx interrupts, we may fall back to
3374 * INTx later, if the interrupt handler doesn't get called
3375 * within 1/2 second (see verify_interrupt()).
3376 */
3377static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3378{
3379	int ret, i, msixnum;
3380	u64 redirect[6];
3381	u64 mask;
3382	const struct cpumask *local_mask;
3383	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3384
3385	if (!dd->num_pports)
3386		return;
3387
3388	if (clearpend) {
3389		/*
3390		 * if not switching interrupt types, be sure interrupts are
3391		 * disabled, and then clear anything pending at this point,
3392		 * because we are starting clean.
3393		 */
3394		qib_7322_set_intr_state(dd, 0);
3395
3396		/* clear the reset error, init error/hwerror mask */
3397		qib_7322_init_hwerrors(dd);
3398
3399		/* clear any interrupt bits that might be set */
3400		qib_write_kreg(dd, kr_intclear, ~0ULL);
3401
3402		/* make sure no pending MSIx intr, and clear diag reg */
3403		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3404		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3405	}
3406
3407	if (!dd->cspec->num_msix_entries) {
3408		/* Try to get INTx interrupt */
3409try_intx:
3410		ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3411				      QIB_DRV_NAME);
3412		if (ret) {
3413			qib_dev_err(
3414				dd,
3415				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3416				pci_irq_vector(dd->pcidev, 0), ret);
3417			return;
3418		}
3419		dd->cspec->main_int_mask = ~0ULL;
3420		return;
3421	}
3422
3423	/* Try to get MSIx interrupts */
3424	memset(redirect, 0, sizeof(redirect));
3425	mask = ~0ULL;
3426	msixnum = 0;
3427	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3428	firstcpu = cpumask_first(local_mask);
3429	if (firstcpu >= nr_cpu_ids ||
3430			cpumask_weight(local_mask) == num_online_cpus()) {
3431		local_mask = topology_core_cpumask(0);
3432		firstcpu = cpumask_first(local_mask);
3433	}
3434	if (firstcpu < nr_cpu_ids) {
3435		secondcpu = cpumask_next(firstcpu, local_mask);
3436		if (secondcpu >= nr_cpu_ids)
3437			secondcpu = firstcpu;
3438		currrcvcpu = secondcpu;
3439	}
3440	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3441		irq_handler_t handler;
3442		void *arg;
3443		int lsb, reg, sh;
3444#ifdef CONFIG_INFINIBAND_QIB_DCA
3445		int dca = 0;
3446#endif
3447		if (i < ARRAY_SIZE(irq_table)) {
3448			if (irq_table[i].port) {
3449				/* skip if for a non-configured port */
3450				if (irq_table[i].port > dd->num_pports)
3451					continue;
3452				arg = dd->pport + irq_table[i].port - 1;
3453			} else
3454				arg = dd;
3455#ifdef CONFIG_INFINIBAND_QIB_DCA
3456			dca = irq_table[i].dca;
3457#endif
3458			lsb = irq_table[i].lsb;
3459			handler = irq_table[i].handler;
3460			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3461					      NULL, arg, QIB_DRV_NAME "%d%s",
3462					      dd->unit,
3463					      irq_table[i].name);
3464		} else {
3465			unsigned ctxt;
3466
3467			ctxt = i - ARRAY_SIZE(irq_table);
3468			/* per krcvq context receive interrupt */
3469			arg = dd->rcd[ctxt];
3470			if (!arg)
3471				continue;
3472			if (qib_krcvq01_no_msi && ctxt < 2)
3473				continue;
3474#ifdef CONFIG_INFINIBAND_QIB_DCA
3475			dca = 1;
3476#endif
3477			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3478			handler = qib_7322pintr;
3479			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3480					      NULL, arg,
3481					      QIB_DRV_NAME "%d (kctx)",
3482					      dd->unit);
3483		}
3484
3485		if (ret) {
3486			/*
3487			 * Shouldn't happen since the enable said we could
3488			 * have as many as we are trying to setup here.
3489			 */
3490			qib_dev_err(dd,
3491				    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3492				    msixnum,
3493				    pci_irq_vector(dd->pcidev, msixnum),
3494				    ret);
3495			qib_7322_free_irq(dd);
3496			pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3497					      PCI_IRQ_LEGACY);
3498			goto try_intx;
3499		}
3500		dd->cspec->msix_entries[msixnum].arg = arg;
3501#ifdef CONFIG_INFINIBAND_QIB_DCA
3502		dd->cspec->msix_entries[msixnum].dca = dca;
3503		dd->cspec->msix_entries[msixnum].rcv =
3504			handler == qib_7322pintr;
3505#endif
3506		if (lsb >= 0) {
3507			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3508			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3509				SYM_LSB(IntRedirect0, vec1);
3510			mask &= ~(1ULL << lsb);
3511			redirect[reg] |= ((u64) msixnum) << sh;
3512		}
3513		qib_read_kreg64(dd, 2 * msixnum + 1 +
3514				(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3515		if (firstcpu < nr_cpu_ids &&
3516			zalloc_cpumask_var(
3517				&dd->cspec->msix_entries[msixnum].mask,
3518				GFP_KERNEL)) {
3519			if (handler == qib_7322pintr) {
3520				cpumask_set_cpu(currrcvcpu,
3521					dd->cspec->msix_entries[msixnum].mask);
3522				currrcvcpu = cpumask_next(currrcvcpu,
3523					local_mask);
3524				if (currrcvcpu >= nr_cpu_ids)
3525					currrcvcpu = secondcpu;
3526			} else {
3527				cpumask_set_cpu(firstcpu,
3528					dd->cspec->msix_entries[msixnum].mask);
3529			}
3530			irq_set_affinity_hint(
3531				pci_irq_vector(dd->pcidev, msixnum),
3532				dd->cspec->msix_entries[msixnum].mask);
3533		}
3534		msixnum++;
3535	}
3536	/* Initialize the vector mapping */
3537	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3538		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3539	dd->cspec->main_int_mask = mask;
3540	tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
3541}
3542
3543/**
3544 * qib_7322_boardname - fill in the board name and note features
3545 * @dd: the qlogic_ib device
3546 *
3547 * info will be based on the board revision register
3548 */
3549static unsigned qib_7322_boardname(struct qib_devdata *dd)
3550{
3551	/* Will need enumeration of board-types here */
3552	u32 boardid;
3553	unsigned int features = DUAL_PORT_CAP;
3554
3555	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3556
3557	switch (boardid) {
3558	case 0:
3559		dd->boardname = "InfiniPath_QLE7342_Emulation";
3560		break;
3561	case 1:
3562		dd->boardname = "InfiniPath_QLE7340";
3563		dd->flags |= QIB_HAS_QSFP;
3564		features = PORT_SPD_CAP;
3565		break;
3566	case 2:
3567		dd->boardname = "InfiniPath_QLE7342";
3568		dd->flags |= QIB_HAS_QSFP;
3569		break;
3570	case 3:
3571		dd->boardname = "InfiniPath_QMI7342";
3572		break;
3573	case 4:
3574		dd->boardname = "InfiniPath_Unsupported7342";
3575		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3576		features = 0;
3577		break;
3578	case BOARD_QMH7342:
3579		dd->boardname = "InfiniPath_QMH7342";
3580		features = 0x24;
3581		break;
3582	case BOARD_QME7342:
3583		dd->boardname = "InfiniPath_QME7342";
3584		break;
3585	case 8:
3586		dd->boardname = "InfiniPath_QME7362";
3587		dd->flags |= QIB_HAS_QSFP;
3588		break;
3589	case BOARD_QMH7360:
3590		dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3591		dd->flags |= QIB_HAS_QSFP;
3592		break;
3593	case 15:
3594		dd->boardname = "InfiniPath_QLE7342_TEST";
3595		dd->flags |= QIB_HAS_QSFP;
3596		break;
3597	default:
3598		dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3599		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3600		break;
3601	}
3602	dd->board_atten = 1; /* index into txdds_Xdr */
3603
3604	snprintf(dd->boardversion, sizeof(dd->boardversion),
3605		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3606		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3607		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3608		 dd->majrev, dd->minrev,
3609		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3610
3611	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3612		qib_devinfo(dd->pcidev,
3613			    "IB%u: Forced to single port mode by module parameter\n",
3614			    dd->unit);
3615		features &= PORT_SPD_CAP;
3616	}
3617
3618	return features;
3619}
3620
3621/*
3622 * This routine sleeps, so it can only be called from user context, not
3623 * from interrupt context.
3624 */
3625static int qib_do_7322_reset(struct qib_devdata *dd)
3626{
3627	u64 val;
3628	u64 *msix_vecsave = NULL;
3629	int i, msix_entries, ret = 1;
3630	u16 cmdval;
3631	u8 int_line, clinesz;
3632	unsigned long flags;
3633
3634	/* Use dev_err so it shows up in logs, etc. */
3635	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3636
3637	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3638
3639	msix_entries = dd->cspec->num_msix_entries;
3640
3641	/* no interrupts till re-initted */
3642	qib_7322_set_intr_state(dd, 0);
3643
3644	qib_7322_free_irq(dd);
3645
3646	if (msix_entries) {
3647		/* can be up to 512 bytes, too big for stack */
3648		msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3649					     sizeof(u64),
3650					     GFP_KERNEL);
3651	}
3652
3653	/*
3654	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3655	 * info that is set up by the BIOS, so we have to save and restore
3656	 * it ourselves.   There is some risk something could change it,
3657	 * after we save it, but since we have disabled the MSIx, it
3658	 * shouldn't be touched...
3659	 */
3660	for (i = 0; i < msix_entries; i++) {
3661		u64 vecaddr, vecdata;
3662
3663		vecaddr = qib_read_kreg64(dd, 2 * i +
3664				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3665		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3666				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3667		if (msix_vecsave) {
3668			msix_vecsave[2 * i] = vecaddr;
3669			/* save it without the masked bit set */
3670			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3671		}
3672	}
3673
3674	dd->pport->cpspec->ibdeltainprog = 0;
3675	dd->pport->cpspec->ibsymdelta = 0;
3676	dd->pport->cpspec->iblnkerrdelta = 0;
3677	dd->pport->cpspec->ibmalfdelta = 0;
3678	/* so we check interrupts work again */
3679	dd->z_int_counter = qib_int_counter(dd);
3680
3681	/*
3682	 * Keep chip from being accessed until we are ready.  Use
3683	 * writeq() directly, to allow the write even though QIB_PRESENT
3684	 * isn't set.
3685	 */
3686	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3687	dd->flags |= QIB_DOING_RESET;
3688	val = dd->control | QLOGIC_IB_C_RESET;
3689	writeq(val, &dd->kregbase[kr_control]);
3690
3691	for (i = 1; i <= 5; i++) {
3692		/*
3693		 * Allow MBIST, etc. to complete; longer on each retry.
3694		 * We sometimes get machine checks from bus timeout if no
3695		 * response, so for now, make it *really* long.
3696		 */
3697		msleep(1000 + (1 + i) * 3000);
3698
3699		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3700
3701		/*
3702		 * Use readq directly, so we don't need to mark it as PRESENT
3703		 * until we get a successful indication that all is well.
3704		 */
3705		val = readq(&dd->kregbase[kr_revision]);
3706		if (val == dd->revision)
3707			break;
3708		if (i == 5) {
3709			qib_dev_err(dd,
3710				"Failed to initialize after reset, unusable\n");
3711			ret = 0;
3712			goto  bail;
3713		}
3714	}
3715
3716	dd->flags |= QIB_PRESENT; /* it's back */
3717
3718	if (msix_entries) {
3719		/* restore the MSIx vector address and data if saved above */
3720		for (i = 0; i < msix_entries; i++) {
3721			if (!msix_vecsave || !msix_vecsave[2 * i])
3722				continue;
3723			qib_write_kreg(dd, 2 * i +
3724				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3725				msix_vecsave[2 * i]);
3726			qib_write_kreg(dd, 1 + 2 * i +
3727				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3728				msix_vecsave[1 + 2 * i]);
3729		}
3730	}
3731
3732	/* initialize the remaining registers.  */
3733	for (i = 0; i < dd->num_pports; ++i)
3734		write_7322_init_portregs(&dd->pport[i]);
3735	write_7322_initregs(dd);
3736
3737	if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3738		qib_dev_err(dd,
3739			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3740
3741	dd->cspec->num_msix_entries = msix_entries;
3742	qib_setup_7322_interrupt(dd, 1);
3743
3744	for (i = 0; i < dd->num_pports; ++i) {
3745		struct qib_pportdata *ppd = &dd->pport[i];
3746
3747		spin_lock_irqsave(&ppd->lflags_lock, flags);
3748		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3749		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3750		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3751	}
3752
3753bail:
3754	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3755	kfree(msix_vecsave);
3756	return ret;
3757}
3758
3759/**
3760 * qib_7322_put_tid - write a TID to the chip
3761 * @dd: the qlogic_ib device
3762 * @tidptr: pointer to the expected TID (in chip) to update
3763 * @tidtype: 0 for eager, 1 for expected
3764 * @pa: physical address of in memory buffer; tidinvalid if freeing
3765 */
3766static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3767			     u32 type, unsigned long pa)
3768{
3769	if (!(dd->flags & QIB_PRESENT))
3770		return;
3771	if (pa != dd->tidinvalid) {
3772		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3773
3774		/* paranoia checks */
3775		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3776			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3777				    pa);
3778			return;
3779		}
3780		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3781			qib_dev_err(dd,
3782				"Physical page address 0x%lx larger than supported\n",
3783				pa);
3784			return;
3785		}
3786
3787		if (type == RCVHQ_RCV_TYPE_EAGER)
3788			chippa |= dd->tidtemplate;
3789		else /* for now, always full 4KB page */
3790			chippa |= IBA7322_TID_SZ_4K;
3791		pa = chippa;
3792	}
3793	writeq(pa, tidptr);
3794}
3795
3796/**
3797 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3798 * @dd: the qlogic_ib device
3799 * @ctxt: the ctxt
3800 *
3801 * clear all TID entries for a ctxt, expected and eager.
3802 * Used from qib_close().
3803 */
3804static void qib_7322_clear_tids(struct qib_devdata *dd,
3805				struct qib_ctxtdata *rcd)
3806{
3807	u64 __iomem *tidbase;
3808	unsigned long tidinv;
3809	u32 ctxt;
3810	int i;
3811
3812	if (!dd->kregbase || !rcd)
3813		return;
3814
3815	ctxt = rcd->ctxt;
3816
3817	tidinv = dd->tidinvalid;
3818	tidbase = (u64 __iomem *)
3819		((char __iomem *) dd->kregbase +
3820		 dd->rcvtidbase +
3821		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3822
3823	for (i = 0; i < dd->rcvtidcnt; i++)
3824		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3825				 tidinv);
3826
3827	tidbase = (u64 __iomem *)
3828		((char __iomem *) dd->kregbase +
3829		 dd->rcvegrbase +
3830		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3831
3832	for (i = 0; i < rcd->rcvegrcnt; i++)
3833		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3834				 tidinv);
3835}
3836
3837/**
3838 * qib_7322_tidtemplate - setup constants for TID updates
3839 * @dd: the qlogic_ib device
3840 *
3841 * We setup stuff that we use a lot, to avoid calculating each time
3842 */
3843static void qib_7322_tidtemplate(struct qib_devdata *dd)
3844{
3845	/*
3846	 * For now, we always allocate 4KB buffers (at init) so we can
3847	 * receive max size packets.  We may want a module parameter to
3848	 * specify 2KB or 4KB and/or make it per port instead of per device
3849	 * for those who want to reduce memory footprint.  Note that the
3850	 * rcvhdrentsize size must be large enough to hold the largest
3851	 * IB header (currently 96 bytes) that we expect to handle (plus of
3852	 * course the 2 dwords of RHF).
3853	 */
3854	if (dd->rcvegrbufsize == 2048)
3855		dd->tidtemplate = IBA7322_TID_SZ_2K;
3856	else if (dd->rcvegrbufsize == 4096)
3857		dd->tidtemplate = IBA7322_TID_SZ_4K;
3858	dd->tidinvalid = 0;
3859}
3860
3861/**
3862 * qib_init_7322_get_base_info - set chip-specific flags for user code
3863 * @rcd: the qlogic_ib ctxt
3864 * @kbase: qib_base_info pointer
3865 *
3866 * We set the PCIE flag because the lower bandwidth on PCIe vs
3867 * HyperTransport can affect some user packet algorithims.
3868 */
3869
3870static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3871				  struct qib_base_info *kinfo)
3872{
3873	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3874		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3875		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3876	if (rcd->dd->cspec->r1)
3877		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3878	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3879		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3880
3881	return 0;
3882}
3883
3884static struct qib_message_header *
3885qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3886{
3887	u32 offset = qib_hdrget_offset(rhf_addr);
3888
3889	return (struct qib_message_header *)
3890		(rhf_addr - dd->rhf_offset + offset);
3891}
3892
3893/*
3894 * Configure number of contexts.
3895 */
3896static void qib_7322_config_ctxts(struct qib_devdata *dd)
3897{
3898	unsigned long flags;
3899	u32 nchipctxts;
3900
3901	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3902	dd->cspec->numctxts = nchipctxts;
3903	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3904		dd->first_user_ctxt = NUM_IB_PORTS +
3905			(qib_n_krcv_queues - 1) * dd->num_pports;
3906		if (dd->first_user_ctxt > nchipctxts)
3907			dd->first_user_ctxt = nchipctxts;
3908		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3909	} else {
3910		dd->first_user_ctxt = NUM_IB_PORTS;
3911		dd->n_krcv_queues = 1;
3912	}
3913
3914	if (!qib_cfgctxts) {
3915		int nctxts = dd->first_user_ctxt + num_online_cpus();
3916
3917		if (nctxts <= 6)
3918			dd->ctxtcnt = 6;
3919		else if (nctxts <= 10)
3920			dd->ctxtcnt = 10;
3921		else if (nctxts <= nchipctxts)
3922			dd->ctxtcnt = nchipctxts;
3923	} else if (qib_cfgctxts < dd->num_pports)
3924		dd->ctxtcnt = dd->num_pports;
3925	else if (qib_cfgctxts <= nchipctxts)
3926		dd->ctxtcnt = qib_cfgctxts;
3927	if (!dd->ctxtcnt) /* none of the above, set to max */
3928		dd->ctxtcnt = nchipctxts;
3929
3930	/*
3931	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3932	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3933	 * Lock to be paranoid about later motion, etc.
3934	 */
3935	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3936	if (dd->ctxtcnt > 10)
3937		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3938	else if (dd->ctxtcnt > 6)
3939		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3940	/* else configure for default 6 receive ctxts */
3941
3942	/* The XRC opcode is 5. */
3943	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3944
3945	/*
3946	 * RcvCtrl *must* be written here so that the
3947	 * chip understands how to change rcvegrcnt below.
3948	 */
3949	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3950	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3951
3952	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3953	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3954	if (qib_rcvhdrcnt)
3955		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3956	else
3957		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3958				    dd->num_pports > 1 ? 1024U : 2048U);
3959}
3960
3961static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3962{
3963
3964	int lsb, ret = 0;
3965	u64 maskr; /* right-justified mask */
3966
3967	switch (which) {
3968
3969	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3970		ret = ppd->link_width_enabled;
3971		goto done;
3972
3973	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3974		ret = ppd->link_width_active;
3975		goto done;
3976
3977	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3978		ret = ppd->link_speed_enabled;
3979		goto done;
3980
3981	case QIB_IB_CFG_SPD: /* Get current Link spd */
3982		ret = ppd->link_speed_active;
3983		goto done;
3984
3985	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3986		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3987		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3988		break;
3989
3990	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3991		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3992		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3993		break;
3994
3995	case QIB_IB_CFG_LINKLATENCY:
3996		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3997			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3998		goto done;
3999
4000	case QIB_IB_CFG_OP_VLS:
4001		ret = ppd->vls_operational;
4002		goto done;
4003
4004	case QIB_IB_CFG_VL_HIGH_CAP:
4005		ret = 16;
4006		goto done;
4007
4008	case QIB_IB_CFG_VL_LOW_CAP:
4009		ret = 16;
4010		goto done;
4011
4012	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4013		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4014				OverrunThreshold);
4015		goto done;
4016
4017	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4018		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4019				PhyerrThreshold);
4020		goto done;
4021
4022	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4023		/* will only take effect when the link state changes */
4024		ret = (ppd->cpspec->ibcctrl_a &
4025		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4026			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4027		goto done;
4028
4029	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4030		lsb = IBA7322_IBC_HRTBT_LSB;
4031		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4032		break;
4033
4034	case QIB_IB_CFG_PMA_TICKS:
4035		/*
4036		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4037		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4038		 */
4039		if (ppd->link_speed_active == QIB_IB_QDR)
4040			ret = 3;
4041		else if (ppd->link_speed_active == QIB_IB_DDR)
4042			ret = 1;
4043		else
4044			ret = 0;
4045		goto done;
4046
4047	default:
4048		ret = -EINVAL;
4049		goto done;
4050	}
4051	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4052done:
4053	return ret;
4054}
4055
4056/*
4057 * Below again cribbed liberally from older version. Do not lean
4058 * heavily on it.
4059 */
4060#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4061#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4062	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4063
4064static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4065{
4066	struct qib_devdata *dd = ppd->dd;
4067	u64 maskr; /* right-justified mask */
4068	int lsb, ret = 0;
4069	u16 lcmd, licmd;
4070	unsigned long flags;
4071
4072	switch (which) {
4073	case QIB_IB_CFG_LIDLMC:
4074		/*
4075		 * Set LID and LMC. Combined to avoid possible hazard
4076		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4077		 */
4078		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4079		maskr = IBA7322_IBC_DLIDLMC_MASK;
4080		/*
4081		 * For header-checking, the SLID in the packet will
4082		 * be masked with SendIBSLMCMask, and compared
4083		 * with SendIBSLIDAssignMask. Make sure we do not
4084		 * set any bits not covered by the mask, or we get
4085		 * false-positives.
4086		 */
4087		qib_write_kreg_port(ppd, krp_sendslid,
4088				    val & (val >> 16) & SendIBSLIDAssignMask);
4089		qib_write_kreg_port(ppd, krp_sendslidmask,
4090				    (val >> 16) & SendIBSLMCMask);
4091		break;
4092
4093	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4094		ppd->link_width_enabled = val;
4095		/* convert IB value to chip register value */
4096		if (val == IB_WIDTH_1X)
4097			val = 0;
4098		else if (val == IB_WIDTH_4X)
4099			val = 1;
4100		else
4101			val = 3;
4102		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4103		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4104		break;
4105
4106	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4107		/*
4108		 * As with width, only write the actual register if the
4109		 * link is currently down, otherwise takes effect on next
4110		 * link change.  Since setting is being explicitly requested
4111		 * (via MAD or sysfs), clear autoneg failure status if speed
4112		 * autoneg is enabled.
4113		 */
4114		ppd->link_speed_enabled = val;
4115		val <<= IBA7322_IBC_SPEED_LSB;
4116		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4117			IBA7322_IBC_MAX_SPEED_MASK;
4118		if (val & (val - 1)) {
4119			/* Muliple speeds enabled */
4120			val |= IBA7322_IBC_IBTA_1_2_MASK |
4121				IBA7322_IBC_MAX_SPEED_MASK;
4122			spin_lock_irqsave(&ppd->lflags_lock, flags);
4123			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4124			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4125		} else if (val & IBA7322_IBC_SPEED_QDR)
4126			val |= IBA7322_IBC_IBTA_1_2_MASK;
4127		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4128		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4129		break;
4130
4131	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4132		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4133		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4134		break;
4135
4136	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4137		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4138		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4139		break;
4140
4141	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4142		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4143				  OverrunThreshold);
4144		if (maskr != val) {
4145			ppd->cpspec->ibcctrl_a &=
4146				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4147			ppd->cpspec->ibcctrl_a |= (u64) val <<
4148				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4149			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4150					    ppd->cpspec->ibcctrl_a);
4151			qib_write_kreg(dd, kr_scratch, 0ULL);
4152		}
4153		goto bail;
4154
4155	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4156		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4157				  PhyerrThreshold);
4158		if (maskr != val) {
4159			ppd->cpspec->ibcctrl_a &=
4160				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4161			ppd->cpspec->ibcctrl_a |= (u64) val <<
4162				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4163			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4164					    ppd->cpspec->ibcctrl_a);
4165			qib_write_kreg(dd, kr_scratch, 0ULL);
4166		}
4167		goto bail;
4168
4169	case QIB_IB_CFG_PKEYS: /* update pkeys */
4170		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4171			((u64) ppd->pkeys[2] << 32) |
4172			((u64) ppd->pkeys[3] << 48);
4173		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4174		goto bail;
4175
4176	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4177		/* will only take effect when the link state changes */
4178		if (val == IB_LINKINITCMD_POLL)
4179			ppd->cpspec->ibcctrl_a &=
4180				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4181		else /* SLEEP */
4182			ppd->cpspec->ibcctrl_a |=
4183				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4184		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4185		qib_write_kreg(dd, kr_scratch, 0ULL);
4186		goto bail;
4187
4188	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4189		/*
4190		 * Update our housekeeping variables, and set IBC max
4191		 * size, same as init code; max IBC is max we allow in
4192		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4193		 * Set even if it's unchanged, print debug message only
4194		 * on changes.
4195		 */
4196		val = (ppd->ibmaxlen >> 2) + 1;
4197		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4198		ppd->cpspec->ibcctrl_a |= (u64)val <<
4199			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4200		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4201				    ppd->cpspec->ibcctrl_a);
4202		qib_write_kreg(dd, kr_scratch, 0ULL);
4203		goto bail;
4204
4205	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4206		switch (val & 0xffff0000) {
4207		case IB_LINKCMD_DOWN:
4208			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4209			ppd->cpspec->ibmalfusesnap = 1;
4210			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4211				crp_errlink);
4212			if (!ppd->cpspec->ibdeltainprog &&
4213			    qib_compat_ddr_negotiate) {
4214				ppd->cpspec->ibdeltainprog = 1;
4215				ppd->cpspec->ibsymsnap =
4216					read_7322_creg32_port(ppd,
4217							      crp_ibsymbolerr);
4218				ppd->cpspec->iblnkerrsnap =
4219					read_7322_creg32_port(ppd,
4220						      crp_iblinkerrrecov);
4221			}
4222			break;
4223
4224		case IB_LINKCMD_ARMED:
4225			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4226			if (ppd->cpspec->ibmalfusesnap) {
4227				ppd->cpspec->ibmalfusesnap = 0;
4228				ppd->cpspec->ibmalfdelta +=
4229					read_7322_creg32_port(ppd,
4230							      crp_errlink) -
4231					ppd->cpspec->ibmalfsnap;
4232			}
4233			break;
4234
4235		case IB_LINKCMD_ACTIVE:
4236			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4237			break;
4238
4239		default:
4240			ret = -EINVAL;
4241			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4242			goto bail;
4243		}
4244		switch (val & 0xffff) {
4245		case IB_LINKINITCMD_NOP:
4246			licmd = 0;
4247			break;
4248
4249		case IB_LINKINITCMD_POLL:
4250			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4251			break;
4252
4253		case IB_LINKINITCMD_SLEEP:
4254			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4255			break;
4256
4257		case IB_LINKINITCMD_DISABLE:
4258			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4259			ppd->cpspec->chase_end = 0;
4260			/*
4261			 * stop state chase counter and timer, if running.
4262			 * wait forpending timer, but don't clear .data (ppd)!
4263			 */
4264			if (ppd->cpspec->chase_timer.expires) {
4265				del_timer_sync(&ppd->cpspec->chase_timer);
4266				ppd->cpspec->chase_timer.expires = 0;
4267			}
4268			break;
4269
4270		default:
4271			ret = -EINVAL;
4272			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4273				    val & 0xffff);
4274			goto bail;
4275		}
4276		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4277		goto bail;
4278
4279	case QIB_IB_CFG_OP_VLS:
4280		if (ppd->vls_operational != val) {
4281			ppd->vls_operational = val;
4282			set_vls(ppd);
4283		}
4284		goto bail;
4285
4286	case QIB_IB_CFG_VL_HIGH_LIMIT:
4287		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4288		goto bail;
4289
4290	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4291		if (val > 3) {
4292			ret = -EINVAL;
4293			goto bail;
4294		}
4295		lsb = IBA7322_IBC_HRTBT_LSB;
4296		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4297		break;
4298
4299	case QIB_IB_CFG_PORT:
4300		/* val is the port number of the switch we are connected to. */
4301		if (ppd->dd->cspec->r1) {
4302			cancel_delayed_work(&ppd->cpspec->ipg_work);
4303			ppd->cpspec->ipg_tries = 0;
4304		}
4305		goto bail;
4306
4307	default:
4308		ret = -EINVAL;
4309		goto bail;
4310	}
4311	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4312	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4313	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4314	qib_write_kreg(dd, kr_scratch, 0);
4315bail:
4316	return ret;
4317}
4318
4319static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4320{
4321	int ret = 0;
4322	u64 val, ctrlb;
4323
4324	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4325	if (!strncmp(what, "ibc", 3)) {
4326		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4327						       Loopback);
4328		val = 0; /* disable heart beat, so link will come up */
4329		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4330			 ppd->dd->unit, ppd->port);
4331	} else if (!strncmp(what, "off", 3)) {
4332		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4333							Loopback);
4334		/* enable heart beat again */
4335		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4336		qib_devinfo(ppd->dd->pcidev,
4337			"Disabling IB%u:%u IBC loopback (normal)\n",
4338			ppd->dd->unit, ppd->port);
4339	} else
4340		ret = -EINVAL;
4341	if (!ret) {
4342		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4343				    ppd->cpspec->ibcctrl_a);
4344		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4345					     << IBA7322_IBC_HRTBT_LSB);
4346		ppd->cpspec->ibcctrl_b = ctrlb | val;
4347		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4348				    ppd->cpspec->ibcctrl_b);
4349		qib_write_kreg(ppd->dd, kr_scratch, 0);
4350	}
4351	return ret;
4352}
4353
4354static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4355			   struct ib_vl_weight_elem *vl)
4356{
4357	unsigned i;
4358
4359	for (i = 0; i < 16; i++, regno++, vl++) {
4360		u32 val = qib_read_kreg_port(ppd, regno);
4361
4362		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4363			SYM_RMASK(LowPriority0_0, VirtualLane);
4364		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4365			SYM_RMASK(LowPriority0_0, Weight);
4366	}
4367}
4368
4369static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4370			   struct ib_vl_weight_elem *vl)
4371{
4372	unsigned i;
4373
4374	for (i = 0; i < 16; i++, regno++, vl++) {
4375		u64 val;
4376
4377		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4378			SYM_LSB(LowPriority0_0, VirtualLane)) |
4379		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4380			SYM_LSB(LowPriority0_0, Weight));
4381		qib_write_kreg_port(ppd, regno, val);
4382	}
4383	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4384		struct qib_devdata *dd = ppd->dd;
4385		unsigned long flags;
4386
4387		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4388		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4389		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4390		qib_write_kreg(dd, kr_scratch, 0);
4391		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4392	}
4393}
4394
4395static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4396{
4397	switch (which) {
4398	case QIB_IB_TBL_VL_HIGH_ARB:
4399		get_vl_weights(ppd, krp_highprio_0, t);
4400		break;
4401
4402	case QIB_IB_TBL_VL_LOW_ARB:
4403		get_vl_weights(ppd, krp_lowprio_0, t);
4404		break;
4405
4406	default:
4407		return -EINVAL;
4408	}
4409	return 0;
4410}
4411
4412static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4413{
4414	switch (which) {
4415	case QIB_IB_TBL_VL_HIGH_ARB:
4416		set_vl_weights(ppd, krp_highprio_0, t);
4417		break;
4418
4419	case QIB_IB_TBL_VL_LOW_ARB:
4420		set_vl_weights(ppd, krp_lowprio_0, t);
4421		break;
4422
4423	default:
4424		return -EINVAL;
4425	}
4426	return 0;
4427}
4428
4429static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4430				    u32 updegr, u32 egrhd, u32 npkts)
4431{
4432	/*
4433	 * Need to write timeout register before updating rcvhdrhead to ensure
4434	 * that the timer is enabled on reception of a packet.
4435	 */
4436	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4437		adjust_rcv_timeout(rcd, npkts);
4438	if (updegr)
4439		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4440	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4441	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4442}
4443
4444static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4445{
4446	u32 head, tail;
4447
4448	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4449	if (rcd->rcvhdrtail_kvaddr)
4450		tail = qib_get_rcvhdrtail(rcd);
4451	else
4452		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4453	return head == tail;
4454}
4455
4456#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4457	QIB_RCVCTRL_CTXT_DIS | \
4458	QIB_RCVCTRL_TIDFLOW_ENB | \
4459	QIB_RCVCTRL_TIDFLOW_DIS | \
4460	QIB_RCVCTRL_TAILUPD_ENB | \
4461	QIB_RCVCTRL_TAILUPD_DIS | \
4462	QIB_RCVCTRL_INTRAVAIL_ENB | \
4463	QIB_RCVCTRL_INTRAVAIL_DIS | \
4464	QIB_RCVCTRL_BP_ENB | \
4465	QIB_RCVCTRL_BP_DIS)
4466
4467#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4468	QIB_RCVCTRL_CTXT_DIS | \
4469	QIB_RCVCTRL_PKEY_DIS | \
4470	QIB_RCVCTRL_PKEY_ENB)
4471
4472/*
4473 * Modify the RCVCTRL register in chip-specific way. This
4474 * is a function because bit positions and (future) register
4475 * location is chip-specifc, but the needed operations are
4476 * generic. <op> is a bit-mask because we often want to
4477 * do multiple modifications.
4478 */
4479static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4480			     int ctxt)
4481{
4482	struct qib_devdata *dd = ppd->dd;
4483	struct qib_ctxtdata *rcd;
4484	u64 mask, val;
4485	unsigned long flags;
4486
4487	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4488
4489	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4490		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4491	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4492		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4493	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4494		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4495	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4496		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4497	if (op & QIB_RCVCTRL_PKEY_ENB)
4498		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4499	if (op & QIB_RCVCTRL_PKEY_DIS)
4500		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4501	if (ctxt < 0) {
4502		mask = (1ULL << dd->ctxtcnt) - 1;
4503		rcd = NULL;
4504	} else {
4505		mask = (1ULL << ctxt);
4506		rcd = dd->rcd[ctxt];
4507	}
4508	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4509		ppd->p_rcvctrl |=
4510			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4511		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4512			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4513			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4514		}
4515		/* Write these registers before the context is enabled. */
4516		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4517				    rcd->rcvhdrqtailaddr_phys);
4518		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4519				    rcd->rcvhdrq_phys);
4520		rcd->seq_cnt = 1;
4521	}
4522	if (op & QIB_RCVCTRL_CTXT_DIS)
4523		ppd->p_rcvctrl &=
4524			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4525	if (op & QIB_RCVCTRL_BP_ENB)
4526		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4527	if (op & QIB_RCVCTRL_BP_DIS)
4528		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4529	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4530		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4531	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4532		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4533	/*
4534	 * Decide which registers to write depending on the ops enabled.
4535	 * Special case is "flush" (no bits set at all)
4536	 * which needs to write both.
4537	 */
4538	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4539		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4540	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4541		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4542	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4543		/*
4544		 * Init the context registers also; if we were
4545		 * disabled, tail and head should both be zero
4546		 * already from the enable, but since we don't
4547		 * know, we have to do it explicitly.
4548		 */
4549		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4550		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4551
4552		/* be sure enabling write seen; hd/tl should be 0 */
4553		(void) qib_read_kreg32(dd, kr_scratch);
4554		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4555		dd->rcd[ctxt]->head = val;
4556		/* If kctxt, interrupt on next receive. */
4557		if (ctxt < dd->first_user_ctxt)
4558			val |= dd->rhdrhead_intr_off;
4559		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4560	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4561		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4562		/* arm rcv interrupt */
4563		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4564		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4565	}
4566	if (op & QIB_RCVCTRL_CTXT_DIS) {
4567		unsigned f;
4568
4569		/* Now that the context is disabled, clear these registers. */
4570		if (ctxt >= 0) {
4571			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4572			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4573			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4574				qib_write_ureg(dd, ur_rcvflowtable + f,
4575					       TIDFLOW_ERRBITS, ctxt);
4576		} else {
4577			unsigned i;
4578
4579			for (i = 0; i < dd->cfgctxts; i++) {
4580				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4581						    i, 0);
4582				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4583				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4584					qib_write_ureg(dd, ur_rcvflowtable + f,
4585						       TIDFLOW_ERRBITS, i);
4586			}
4587		}
4588	}
4589	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4590}
4591
4592/*
4593 * Modify the SENDCTRL register in chip-specific way. This
4594 * is a function where there are multiple such registers with
4595 * slightly different layouts.
4596 * The chip doesn't allow back-to-back sendctrl writes, so write
4597 * the scratch register after writing sendctrl.
4598 *
4599 * Which register is written depends on the operation.
4600 * Most operate on the common register, while
4601 * SEND_ENB and SEND_DIS operate on the per-port ones.
4602 * SEND_ENB is included in common because it can change SPCL_TRIG
4603 */
4604#define SENDCTRL_COMMON_MODS (\
4605	QIB_SENDCTRL_CLEAR | \
4606	QIB_SENDCTRL_AVAIL_DIS | \
4607	QIB_SENDCTRL_AVAIL_ENB | \
4608	QIB_SENDCTRL_AVAIL_BLIP | \
4609	QIB_SENDCTRL_DISARM | \
4610	QIB_SENDCTRL_DISARM_ALL | \
4611	QIB_SENDCTRL_SEND_ENB)
4612
4613#define SENDCTRL_PORT_MODS (\
4614	QIB_SENDCTRL_CLEAR | \
4615	QIB_SENDCTRL_SEND_ENB | \
4616	QIB_SENDCTRL_SEND_DIS | \
4617	QIB_SENDCTRL_FLUSH)
4618
4619static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4620{
4621	struct qib_devdata *dd = ppd->dd;
4622	u64 tmp_dd_sendctrl;
4623	unsigned long flags;
4624
4625	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4626
4627	/* First the dd ones that are "sticky", saved in shadow */
4628	if (op & QIB_SENDCTRL_CLEAR)
4629		dd->sendctrl = 0;
4630	if (op & QIB_SENDCTRL_AVAIL_DIS)
4631		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4632	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4633		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4634		if (dd->flags & QIB_USE_SPCL_TRIG)
4635			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4636	}
4637
4638	/* Then the ppd ones that are "sticky", saved in shadow */
4639	if (op & QIB_SENDCTRL_SEND_DIS)
4640		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4641	else if (op & QIB_SENDCTRL_SEND_ENB)
4642		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4643
4644	if (op & QIB_SENDCTRL_DISARM_ALL) {
4645		u32 i, last;
4646
4647		tmp_dd_sendctrl = dd->sendctrl;
4648		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4649		/*
4650		 * Disarm any buffers that are not yet launched,
4651		 * disabling updates until done.
4652		 */
4653		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4654		for (i = 0; i < last; i++) {
4655			qib_write_kreg(dd, kr_sendctrl,
4656				       tmp_dd_sendctrl |
4657				       SYM_MASK(SendCtrl, Disarm) | i);
4658			qib_write_kreg(dd, kr_scratch, 0);
4659		}
4660	}
4661
4662	if (op & QIB_SENDCTRL_FLUSH) {
4663		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4664
4665		/*
4666		 * Now drain all the fifos.  The Abort bit should never be
4667		 * needed, so for now, at least, we don't use it.
4668		 */
4669		tmp_ppd_sendctrl |=
4670			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4671			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4672			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4673		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4674		qib_write_kreg(dd, kr_scratch, 0);
4675	}
4676
4677	tmp_dd_sendctrl = dd->sendctrl;
4678
4679	if (op & QIB_SENDCTRL_DISARM)
4680		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4681			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4682			 SYM_LSB(SendCtrl, DisarmSendBuf));
4683	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4684	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4685		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4686
4687	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4688		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4689		qib_write_kreg(dd, kr_scratch, 0);
4690	}
4691
4692	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4693		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4694		qib_write_kreg(dd, kr_scratch, 0);
4695	}
4696
4697	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4698		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4699		qib_write_kreg(dd, kr_scratch, 0);
4700	}
4701
4702	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4703
4704	if (op & QIB_SENDCTRL_FLUSH) {
4705		u32 v;
4706		/*
4707		 * ensure writes have hit chip, then do a few
4708		 * more reads, to allow DMA of pioavail registers
4709		 * to occur, so in-memory copy is in sync with
4710		 * the chip.  Not always safe to sleep.
4711		 */
4712		v = qib_read_kreg32(dd, kr_scratch);
4713		qib_write_kreg(dd, kr_scratch, v);
4714		v = qib_read_kreg32(dd, kr_scratch);
4715		qib_write_kreg(dd, kr_scratch, v);
4716		qib_read_kreg32(dd, kr_scratch);
4717	}
4718}
4719
4720#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4721#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4722#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4723
4724/**
4725 * qib_portcntr_7322 - read a per-port chip counter
4726 * @ppd: the qlogic_ib pport
4727 * @creg: the counter to read (not a chip offset)
4728 */
4729static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4730{
4731	struct qib_devdata *dd = ppd->dd;
4732	u64 ret = 0ULL;
4733	u16 creg;
4734	/* 0xffff for unimplemented or synthesized counters */
4735	static const u32 xlator[] = {
4736		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4737		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4738		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4739		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4740		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4741		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4742		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4743		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4744		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4745		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4746		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4747		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4748		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4749		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4750		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4751		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4752		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4753		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4754		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4755		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4756		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4757		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4758		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4759		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4760		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4761		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4762		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4763		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4764		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4765		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4766		/*
4767		 * the next 3 aren't really counters, but were implemented
4768		 * as counters in older chips, so still get accessed as
4769		 * though they were counters from this code.
4770		 */
4771		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4772		[QIBPORTCNTR_PSSTART] = krp_psstart,
4773		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4774		/* pseudo-counter, summed for all ports */
4775		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4776	};
4777
4778	if (reg >= ARRAY_SIZE(xlator)) {
4779		qib_devinfo(ppd->dd->pcidev,
4780			 "Unimplemented portcounter %u\n", reg);
4781		goto done;
4782	}
4783	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4784
4785	/* handle non-counters and special cases first */
4786	if (reg == QIBPORTCNTR_KHDROVFL) {
4787		int i;
4788
4789		/* sum over all kernel contexts (skip if mini_init) */
4790		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4791			struct qib_ctxtdata *rcd = dd->rcd[i];
4792
4793			if (!rcd || rcd->ppd != ppd)
4794				continue;
4795			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4796		}
4797		goto done;
4798	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4799		/*
4800		 * Used as part of the synthesis of port_rcv_errors
4801		 * in the verbs code for IBTA counters.  Not needed for 7322,
4802		 * because all the errors are already counted by other cntrs.
4803		 */
4804		goto done;
4805	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4806		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4807		/* were counters in older chips, now per-port kernel regs */
4808		ret = qib_read_kreg_port(ppd, creg);
4809		goto done;
4810	}
4811
4812	/*
4813	 * Only fast increment counters are 64 bits; use 32 bit reads to
4814	 * avoid two independent reads when on Opteron.
4815	 */
4816	if (xlator[reg] & _PORT_64BIT_FLAG)
4817		ret = read_7322_creg_port(ppd, creg);
4818	else
4819		ret = read_7322_creg32_port(ppd, creg);
4820	if (creg == crp_ibsymbolerr) {
4821		if (ppd->cpspec->ibdeltainprog)
4822			ret -= ret - ppd->cpspec->ibsymsnap;
4823		ret -= ppd->cpspec->ibsymdelta;
4824	} else if (creg == crp_iblinkerrrecov) {
4825		if (ppd->cpspec->ibdeltainprog)
4826			ret -= ret - ppd->cpspec->iblnkerrsnap;
4827		ret -= ppd->cpspec->iblnkerrdelta;
4828	} else if (creg == crp_errlink)
4829		ret -= ppd->cpspec->ibmalfdelta;
4830	else if (creg == crp_iblinkdown)
4831		ret += ppd->cpspec->iblnkdowndelta;
4832done:
4833	return ret;
4834}
4835
4836/*
4837 * Device counter names (not port-specific), one line per stat,
4838 * single string.  Used by utilities like ipathstats to print the stats
4839 * in a way which works for different versions of drivers, without changing
4840 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4841 * display by utility.
4842 * Non-error counters are first.
4843 * Start of "error" conters is indicated by a leading "E " on the first
4844 * "error" counter, and doesn't count in label length.
4845 * The EgrOvfl list needs to be last so we truncate them at the configured
4846 * context count for the device.
4847 * cntr7322indices contains the corresponding register indices.
4848 */
4849static const char cntr7322names[] =
4850	"Interrupts\n"
4851	"HostBusStall\n"
4852	"E RxTIDFull\n"
4853	"RxTIDInvalid\n"
4854	"RxTIDFloDrop\n" /* 7322 only */
4855	"Ctxt0EgrOvfl\n"
4856	"Ctxt1EgrOvfl\n"
4857	"Ctxt2EgrOvfl\n"
4858	"Ctxt3EgrOvfl\n"
4859	"Ctxt4EgrOvfl\n"
4860	"Ctxt5EgrOvfl\n"
4861	"Ctxt6EgrOvfl\n"
4862	"Ctxt7EgrOvfl\n"
4863	"Ctxt8EgrOvfl\n"
4864	"Ctxt9EgrOvfl\n"
4865	"Ctx10EgrOvfl\n"
4866	"Ctx11EgrOvfl\n"
4867	"Ctx12EgrOvfl\n"
4868	"Ctx13EgrOvfl\n"
4869	"Ctx14EgrOvfl\n"
4870	"Ctx15EgrOvfl\n"
4871	"Ctx16EgrOvfl\n"
4872	"Ctx17EgrOvfl\n"
4873	;
4874
4875static const u32 cntr7322indices[] = {
4876	cr_lbint | _PORT_64BIT_FLAG,
4877	cr_lbstall | _PORT_64BIT_FLAG,
4878	cr_tidfull,
4879	cr_tidinvalid,
4880	cr_rxtidflowdrop,
4881	cr_base_egrovfl + 0,
4882	cr_base_egrovfl + 1,
4883	cr_base_egrovfl + 2,
4884	cr_base_egrovfl + 3,
4885	cr_base_egrovfl + 4,
4886	cr_base_egrovfl + 5,
4887	cr_base_egrovfl + 6,
4888	cr_base_egrovfl + 7,
4889	cr_base_egrovfl + 8,
4890	cr_base_egrovfl + 9,
4891	cr_base_egrovfl + 10,
4892	cr_base_egrovfl + 11,
4893	cr_base_egrovfl + 12,
4894	cr_base_egrovfl + 13,
4895	cr_base_egrovfl + 14,
4896	cr_base_egrovfl + 15,
4897	cr_base_egrovfl + 16,
4898	cr_base_egrovfl + 17,
4899};
4900
4901/*
4902 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4903 * portcntr7322indices is somewhat complicated by some registers needing
4904 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4905 */
4906static const char portcntr7322names[] =
4907	"TxPkt\n"
4908	"TxFlowPkt\n"
4909	"TxWords\n"
4910	"RxPkt\n"
4911	"RxFlowPkt\n"
4912	"RxWords\n"
4913	"TxFlowStall\n"
4914	"TxDmaDesc\n"  /* 7220 and 7322-only */
4915	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4916	"IBStatusChng\n"
4917	"IBLinkDown\n"
4918	"IBLnkRecov\n"
4919	"IBRxLinkErr\n"
4920	"IBSymbolErr\n"
4921	"RxLLIErr\n"
4922	"RxBadFormat\n"
4923	"RxBadLen\n"
4924	"RxBufOvrfl\n"
4925	"RxEBP\n"
4926	"RxFlowCtlErr\n"
4927	"RxICRCerr\n"
4928	"RxLPCRCerr\n"
4929	"RxVCRCerr\n"
4930	"RxInvalLen\n"
4931	"RxInvalPKey\n"
4932	"RxPktDropped\n"
4933	"TxBadLength\n"
4934	"TxDropped\n"
4935	"TxInvalLen\n"
4936	"TxUnderrun\n"
4937	"TxUnsupVL\n"
4938	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4939	"RxVL15Drop\n"
4940	"RxVlErr\n"
4941	"XcessBufOvfl\n"
4942	"RxQPBadCtxt\n" /* 7322-only from here down */
4943	"TXBadHeader\n"
4944	;
4945
4946static const u32 portcntr7322indices[] = {
4947	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4948	crp_pktsendflow,
4949	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4950	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4951	crp_pktrcvflowctrl,
4952	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4953	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4954	crp_txsdmadesc | _PORT_64BIT_FLAG,
4955	crp_rxdlidfltr,
4956	crp_ibstatuschange,
4957	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4958	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4959	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4960	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4961	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4962	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4963	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4964	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4965	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4966	crp_rcvflowctrlviol,
4967	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4968	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4969	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4970	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4971	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4972	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4973	crp_txminmaxlenerr,
4974	crp_txdroppedpkt,
4975	crp_txlenerr,
4976	crp_txunderrun,
4977	crp_txunsupvl,
4978	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4979	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4980	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4981	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4982	crp_rxqpinvalidctxt,
4983	crp_txhdrerr,
4984};
4985
4986/* do all the setup to make the counter reads efficient later */
4987static void init_7322_cntrnames(struct qib_devdata *dd)
4988{
4989	int i, j = 0;
4990	char *s;
4991
4992	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4993	     i++) {
4994		/* we always have at least one counter before the egrovfl */
4995		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4996			j = 1;
4997		s = strchr(s + 1, '\n');
4998		if (s && j)
4999			j++;
5000	}
5001	dd->cspec->ncntrs = i;
5002	if (!s)
5003		/* full list; size is without terminating null */
5004		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5005	else
5006		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5007	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
5008					 GFP_KERNEL);
5009
5010	for (i = 0, s = (char *)portcntr7322names; s; i++)
5011		s = strchr(s + 1, '\n');
5012	dd->cspec->nportcntrs = i - 1;
5013	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5014	for (i = 0; i < dd->num_pports; ++i) {
5015		dd->pport[i].cpspec->portcntrs =
5016			kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
5017				      GFP_KERNEL);
5018	}
5019}
5020
5021static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5022			      u64 **cntrp)
5023{
5024	u32 ret;
5025
5026	if (namep) {
5027		ret = dd->cspec->cntrnamelen;
5028		if (pos >= ret)
5029			ret = 0; /* final read after getting everything */
5030		else
5031			*namep = (char *) cntr7322names;
5032	} else {
5033		u64 *cntr = dd->cspec->cntrs;
5034		int i;
5035
5036		ret = dd->cspec->ncntrs * sizeof(u64);
5037		if (!cntr || pos >= ret) {
5038			/* everything read, or couldn't get memory */
5039			ret = 0;
5040			goto done;
5041		}
5042		*cntrp = cntr;
5043		for (i = 0; i < dd->cspec->ncntrs; i++)
5044			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5045				*cntr++ = read_7322_creg(dd,
5046							 cntr7322indices[i] &
5047							 _PORT_CNTR_IDXMASK);
5048			else
5049				*cntr++ = read_7322_creg32(dd,
5050							   cntr7322indices[i]);
5051	}
5052done:
5053	return ret;
5054}
5055
5056static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5057				  char **namep, u64 **cntrp)
5058{
5059	u32 ret;
5060
5061	if (namep) {
5062		ret = dd->cspec->portcntrnamelen;
5063		if (pos >= ret)
5064			ret = 0; /* final read after getting everything */
5065		else
5066			*namep = (char *)portcntr7322names;
5067	} else {
5068		struct qib_pportdata *ppd = &dd->pport[port];
5069		u64 *cntr = ppd->cpspec->portcntrs;
5070		int i;
5071
5072		ret = dd->cspec->nportcntrs * sizeof(u64);
5073		if (!cntr || pos >= ret) {
5074			/* everything read, or couldn't get memory */
5075			ret = 0;
5076			goto done;
5077		}
5078		*cntrp = cntr;
5079		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5080			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5081				*cntr++ = qib_portcntr_7322(ppd,
5082					portcntr7322indices[i] &
5083					_PORT_CNTR_IDXMASK);
5084			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5085				*cntr++ = read_7322_creg_port(ppd,
5086					   portcntr7322indices[i] &
5087					    _PORT_CNTR_IDXMASK);
5088			else
5089				*cntr++ = read_7322_creg32_port(ppd,
5090					   portcntr7322indices[i]);
5091		}
5092	}
5093done:
5094	return ret;
5095}
5096
5097/**
5098 * qib_get_7322_faststats - get word counters from chip before they overflow
5099 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5100 *
5101 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5102 * real purpose of this function is to maintain the notion of
5103 * "active time", which in turn is only logged into the eeprom,
5104 * which we don;t have, yet, for 7322-based boards.
5105 *
5106 * called from add_timer
5107 */
5108static void qib_get_7322_faststats(struct timer_list *t)
5109{
5110	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5111	struct qib_pportdata *ppd;
5112	unsigned long flags;
5113	u64 traffic_wds;
5114	int pidx;
5115
5116	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5117		ppd = dd->pport + pidx;
5118
5119		/*
5120		 * If port isn't enabled or not operational ports, or
5121		 * diags is running (can cause memory diags to fail)
5122		 * skip this port this time.
5123		 */
5124		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5125		    || dd->diag_client)
5126			continue;
5127
5128		/*
5129		 * Maintain an activity timer, based on traffic
5130		 * exceeding a threshold, so we need to check the word-counts
5131		 * even if they are 64-bit.
5132		 */
5133		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5134			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5135		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5136		traffic_wds -= ppd->dd->traffic_wds;
5137		ppd->dd->traffic_wds += traffic_wds;
5138		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5139		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5140						QIB_IB_QDR) &&
5141		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5142				    QIBL_LINKACTIVE)) &&
5143		    ppd->cpspec->qdr_dfe_time &&
5144		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5145			ppd->cpspec->qdr_dfe_on = 0;
5146
5147			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5148					    ppd->dd->cspec->r1 ?
5149					    QDR_STATIC_ADAPT_INIT_R1 :
5150					    QDR_STATIC_ADAPT_INIT);
5151			force_h1(ppd);
5152		}
5153	}
5154	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5155}
5156
5157/*
5158 * If we were using MSIx, try to fallback to INTx.
5159 */
5160static int qib_7322_intr_fallback(struct qib_devdata *dd)
5161{
5162	if (!dd->cspec->num_msix_entries)
5163		return 0; /* already using INTx */
5164
5165	qib_devinfo(dd->pcidev,
5166		"MSIx interrupt not detected, trying INTx interrupts\n");
5167	qib_7322_free_irq(dd);
5168	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5169		qib_dev_err(dd, "Failed to enable INTx\n");
5170	qib_setup_7322_interrupt(dd, 0);
5171	return 1;
5172}
5173
5174/*
5175 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5176 * than resetting the IBC or external link state, and useful in some
5177 * cases to cause some retraining.  To do this right, we reset IBC
5178 * as well, then return to previous state (which may be still in reset)
5179 * NOTE: some callers of this "know" this writes the current value
5180 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5181 * check all callers.
5182 */
5183static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5184{
5185	u64 val;
5186	struct qib_devdata *dd = ppd->dd;
5187	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5188		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5189		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5190
5191	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5192	qib_write_kreg(dd, kr_hwerrmask,
5193		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5194	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5195			    ppd->cpspec->ibcctrl_a &
5196			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5197
5198	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5199	qib_read_kreg32(dd, kr_scratch);
5200	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5201	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5202	qib_write_kreg(dd, kr_scratch, 0ULL);
5203	qib_write_kreg(dd, kr_hwerrclear,
5204		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5205	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5206}
5207
5208/*
5209 * This code for non-IBTA-compliant IB speed negotiation is only known to
5210 * work for the SDR to DDR transition, and only between an HCA and a switch
5211 * with recent firmware.  It is based on observed heuristics, rather than
5212 * actual knowledge of the non-compliant speed negotiation.
5213 * It has a number of hard-coded fields, since the hope is to rewrite this
5214 * when a spec is available on how the negoation is intended to work.
5215 */
5216static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5217				 u32 dcnt, u32 *data)
5218{
5219	int i;
5220	u64 pbc;
5221	u32 __iomem *piobuf;
5222	u32 pnum, control, len;
5223	struct qib_devdata *dd = ppd->dd;
5224
5225	i = 0;
5226	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5227	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5228	pbc = ((u64) control << 32) | len;
5229	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5230		if (i++ > 15)
5231			return;
5232		udelay(2);
5233	}
5234	/* disable header check on this packet, since it can't be valid */
5235	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5236	writeq(pbc, piobuf);
5237	qib_flush_wc();
5238	qib_pio_copy(piobuf + 2, hdr, 7);
5239	qib_pio_copy(piobuf + 9, data, dcnt);
5240	if (dd->flags & QIB_USE_SPCL_TRIG) {
5241		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5242
5243		qib_flush_wc();
5244		__raw_writel(0xaebecede, piobuf + spcl_off);
5245	}
5246	qib_flush_wc();
5247	qib_sendbuf_done(dd, pnum);
5248	/* and re-enable hdr check */
5249	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5250}
5251
5252/*
5253 * _start packet gets sent twice at start, _done gets sent twice at end
5254 */
5255static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5256{
5257	struct qib_devdata *dd = ppd->dd;
5258	static u32 swapped;
5259	u32 dw, i, hcnt, dcnt, *data;
5260	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5261	static u32 madpayload_start[0x40] = {
5262		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5263		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5264		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5265		};
5266	static u32 madpayload_done[0x40] = {
5267		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5268		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5269		0x40000001, 0x1388, 0x15e, /* rest 0's */
5270		};
5271
5272	dcnt = ARRAY_SIZE(madpayload_start);
5273	hcnt = ARRAY_SIZE(hdr);
5274	if (!swapped) {
5275		/* for maintainability, do it at runtime */
5276		for (i = 0; i < hcnt; i++) {
5277			dw = (__force u32) cpu_to_be32(hdr[i]);
5278			hdr[i] = dw;
5279		}
5280		for (i = 0; i < dcnt; i++) {
5281			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5282			madpayload_start[i] = dw;
5283			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5284			madpayload_done[i] = dw;
5285		}
5286		swapped = 1;
5287	}
5288
5289	data = which ? madpayload_done : madpayload_start;
5290
5291	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5292	qib_read_kreg64(dd, kr_scratch);
5293	udelay(2);
5294	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5295	qib_read_kreg64(dd, kr_scratch);
5296	udelay(2);
5297}
5298
5299/*
5300 * Do the absolute minimum to cause an IB speed change, and make it
5301 * ready, but don't actually trigger the change.   The caller will
5302 * do that when ready (if link is in Polling training state, it will
5303 * happen immediately, otherwise when link next goes down)
5304 *
5305 * This routine should only be used as part of the DDR autonegotation
5306 * code for devices that are not compliant with IB 1.2 (or code that
5307 * fixes things up for same).
5308 *
5309 * When link has gone down, and autoneg enabled, or autoneg has
5310 * failed and we give up until next time we set both speeds, and
5311 * then we want IBTA enabled as well as "use max enabled speed.
5312 */
5313static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5314{
5315	u64 newctrlb;
5316
5317	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5318				    IBA7322_IBC_IBTA_1_2_MASK |
5319				    IBA7322_IBC_MAX_SPEED_MASK);
5320
5321	if (speed & (speed - 1)) /* multiple speeds */
5322		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5323				    IBA7322_IBC_IBTA_1_2_MASK |
5324				    IBA7322_IBC_MAX_SPEED_MASK;
5325	else
5326		newctrlb |= speed == QIB_IB_QDR ?
5327			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5328			((speed == QIB_IB_DDR ?
5329			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5330
5331	if (newctrlb == ppd->cpspec->ibcctrl_b)
5332		return;
5333
5334	ppd->cpspec->ibcctrl_b = newctrlb;
5335	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5336	qib_write_kreg(ppd->dd, kr_scratch, 0);
5337}
5338
5339/*
5340 * This routine is only used when we are not talking to another
5341 * IB 1.2-compliant device that we think can do DDR.
5342 * (This includes all existing switch chips as of Oct 2007.)
5343 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5344 */
5345static void try_7322_autoneg(struct qib_pportdata *ppd)
5346{
5347	unsigned long flags;
5348
5349	spin_lock_irqsave(&ppd->lflags_lock, flags);
5350	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5351	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5352	qib_autoneg_7322_send(ppd, 0);
5353	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5354	qib_7322_mini_pcs_reset(ppd);
5355	/* 2 msec is minimum length of a poll cycle */
5356	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5357			   msecs_to_jiffies(2));
5358}
5359
5360/*
5361 * Handle the empirically determined mechanism for auto-negotiation
5362 * of DDR speed with switches.
5363 */
5364static void autoneg_7322_work(struct work_struct *work)
5365{
5366	struct qib_pportdata *ppd;
5367	u32 i;
5368	unsigned long flags;
5369
5370	ppd = container_of(work, struct qib_chippport_specific,
5371			    autoneg_work.work)->ppd;
5372
5373	/*
5374	 * Busy wait for this first part, it should be at most a
5375	 * few hundred usec, since we scheduled ourselves for 2msec.
5376	 */
5377	for (i = 0; i < 25; i++) {
5378		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5379		     == IB_7322_LT_STATE_POLLQUIET) {
5380			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5381			break;
5382		}
5383		udelay(100);
5384	}
5385
5386	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5387		goto done; /* we got there early or told to stop */
5388
5389	/* we expect this to timeout */
5390	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5391			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5392			       msecs_to_jiffies(90)))
5393		goto done;
5394	qib_7322_mini_pcs_reset(ppd);
5395
5396	/* we expect this to timeout */
5397	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5398			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5399			       msecs_to_jiffies(1700)))
5400		goto done;
5401	qib_7322_mini_pcs_reset(ppd);
5402
5403	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5404
5405	/*
5406	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5407	 * this should terminate early.
5408	 */
5409	wait_event_timeout(ppd->cpspec->autoneg_wait,
5410		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5411		msecs_to_jiffies(250));
5412done:
5413	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5414		spin_lock_irqsave(&ppd->lflags_lock, flags);
5415		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5416		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5417			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5418			ppd->cpspec->autoneg_tries = 0;
5419		}
5420		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5421		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5422	}
5423}
5424
5425/*
5426 * This routine is used to request IPG set in the QLogic switch.
5427 * Only called if r1.
5428 */
5429static void try_7322_ipg(struct qib_pportdata *ppd)
5430{
5431	struct qib_ibport *ibp = &ppd->ibport_data;
5432	struct ib_mad_send_buf *send_buf;
5433	struct ib_mad_agent *agent;
5434	struct ib_smp *smp;
5435	unsigned delay;
5436	int ret;
5437
5438	agent = ibp->rvp.send_agent;
5439	if (!agent)
5440		goto retry;
5441
5442	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5443				      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5444				      IB_MGMT_BASE_VERSION);
5445	if (IS_ERR(send_buf))
5446		goto retry;
5447
5448	if (!ibp->smi_ah) {
5449		struct ib_ah *ah;
5450
5451		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5452		if (IS_ERR(ah))
5453			ret = PTR_ERR(ah);
5454		else {
5455			send_buf->ah = ah;
5456			ibp->smi_ah = ibah_to_rvtah(ah);
5457			ret = 0;
5458		}
5459	} else {
5460		send_buf->ah = &ibp->smi_ah->ibah;
5461		ret = 0;
5462	}
5463
5464	smp = send_buf->mad;
5465	smp->base_version = IB_MGMT_BASE_VERSION;
5466	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5467	smp->class_version = 1;
5468	smp->method = IB_MGMT_METHOD_SEND;
5469	smp->hop_cnt = 1;
5470	smp->attr_id = QIB_VENDOR_IPG;
5471	smp->attr_mod = 0;
5472
5473	if (!ret)
5474		ret = ib_post_send_mad(send_buf, NULL);
5475	if (ret)
5476		ib_free_send_mad(send_buf);
5477retry:
5478	delay = 2 << ppd->cpspec->ipg_tries;
5479	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5480			   msecs_to_jiffies(delay));
5481}
5482
5483/*
5484 * Timeout handler for setting IPG.
5485 * Only called if r1.
5486 */
5487static void ipg_7322_work(struct work_struct *work)
5488{
5489	struct qib_pportdata *ppd;
5490
5491	ppd = container_of(work, struct qib_chippport_specific,
5492			   ipg_work.work)->ppd;
5493	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5494	    && ++ppd->cpspec->ipg_tries <= 10)
5495		try_7322_ipg(ppd);
5496}
5497
5498static u32 qib_7322_iblink_state(u64 ibcs)
5499{
5500	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5501
5502	switch (state) {
5503	case IB_7322_L_STATE_INIT:
5504		state = IB_PORT_INIT;
5505		break;
5506	case IB_7322_L_STATE_ARM:
5507		state = IB_PORT_ARMED;
5508		break;
5509	case IB_7322_L_STATE_ACTIVE:
5510	case IB_7322_L_STATE_ACT_DEFER:
5511		state = IB_PORT_ACTIVE;
5512		break;
5513	default:
5514		fallthrough;
5515	case IB_7322_L_STATE_DOWN:
5516		state = IB_PORT_DOWN;
5517		break;
5518	}
5519	return state;
5520}
5521
5522/* returns the IBTA port state, rather than the IBC link training state */
5523static u8 qib_7322_phys_portstate(u64 ibcs)
5524{
5525	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5526	return qib_7322_physportstate[state];
5527}
5528
5529static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5530{
5531	int ret = 0, symadj = 0;
5532	unsigned long flags;
5533	int mult;
5534
5535	spin_lock_irqsave(&ppd->lflags_lock, flags);
5536	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5537	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5538
5539	/* Update our picture of width and speed from chip */
5540	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5541		ppd->link_speed_active = QIB_IB_QDR;
5542		mult = 4;
5543	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5544		ppd->link_speed_active = QIB_IB_DDR;
5545		mult = 2;
5546	} else {
5547		ppd->link_speed_active = QIB_IB_SDR;
5548		mult = 1;
5549	}
5550	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5551		ppd->link_width_active = IB_WIDTH_4X;
5552		mult *= 4;
5553	} else
5554		ppd->link_width_active = IB_WIDTH_1X;
5555	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5556
5557	if (!ibup) {
5558		u64 clr;
5559
5560		/* Link went down. */
5561		/* do IPG MAD again after linkdown, even if last time failed */
5562		ppd->cpspec->ipg_tries = 0;
5563		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5564			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5565			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5566		if (clr)
5567			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5568		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5569				     QIBL_IB_AUTONEG_INPROG)))
5570			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5571		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5572			struct qib_qsfp_data *qd =
5573				&ppd->cpspec->qsfp_data;
5574			/* unlock the Tx settings, speed may change */
5575			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5576				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5577				reset_tx_deemphasis_override));
5578			qib_cancel_sends(ppd);
5579			/* on link down, ensure sane pcs state */
5580			qib_7322_mini_pcs_reset(ppd);
5581			/* schedule the qsfp refresh which should turn the link
5582			   off */
5583			if (ppd->dd->flags & QIB_HAS_QSFP) {
5584				qd->t_insert = jiffies;
5585				queue_work(ib_wq, &qd->work);
5586			}
5587			spin_lock_irqsave(&ppd->sdma_lock, flags);
5588			if (__qib_sdma_running(ppd))
5589				__qib_sdma_process_event(ppd,
5590					qib_sdma_event_e70_go_idle);
5591			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5592		}
5593		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5594		if (clr == ppd->cpspec->iblnkdownsnap)
5595			ppd->cpspec->iblnkdowndelta++;
5596	} else {
5597		if (qib_compat_ddr_negotiate &&
5598		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5599				     QIBL_IB_AUTONEG_INPROG)) &&
5600		    ppd->link_speed_active == QIB_IB_SDR &&
5601		    (ppd->link_speed_enabled & QIB_IB_DDR)
5602		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5603			/* we are SDR, and auto-negotiation enabled */
5604			++ppd->cpspec->autoneg_tries;
5605			if (!ppd->cpspec->ibdeltainprog) {
5606				ppd->cpspec->ibdeltainprog = 1;
5607				ppd->cpspec->ibsymdelta +=
5608					read_7322_creg32_port(ppd,
5609						crp_ibsymbolerr) -
5610						ppd->cpspec->ibsymsnap;
5611				ppd->cpspec->iblnkerrdelta +=
5612					read_7322_creg32_port(ppd,
5613						crp_iblinkerrrecov) -
5614						ppd->cpspec->iblnkerrsnap;
5615			}
5616			try_7322_autoneg(ppd);
5617			ret = 1; /* no other IB status change processing */
5618		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5619			   ppd->link_speed_active == QIB_IB_SDR) {
5620			qib_autoneg_7322_send(ppd, 1);
5621			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5622			qib_7322_mini_pcs_reset(ppd);
5623			udelay(2);
5624			ret = 1; /* no other IB status change processing */
5625		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5626			   (ppd->link_speed_active & QIB_IB_DDR)) {
5627			spin_lock_irqsave(&ppd->lflags_lock, flags);
5628			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5629					 QIBL_IB_AUTONEG_FAILED);
5630			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5631			ppd->cpspec->autoneg_tries = 0;
5632			/* re-enable SDR, for next link down */
5633			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5634			wake_up(&ppd->cpspec->autoneg_wait);
5635			symadj = 1;
5636		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5637			/*
5638			 * Clear autoneg failure flag, and do setup
5639			 * so we'll try next time link goes down and
5640			 * back to INIT (possibly connected to a
5641			 * different device).
5642			 */
5643			spin_lock_irqsave(&ppd->lflags_lock, flags);
5644			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5645			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5646			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5647			symadj = 1;
5648		}
5649		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5650			symadj = 1;
5651			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5652				try_7322_ipg(ppd);
5653			if (!ppd->cpspec->recovery_init)
5654				setup_7322_link_recovery(ppd, 0);
5655			ppd->cpspec->qdr_dfe_time = jiffies +
5656				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5657		}
5658		ppd->cpspec->ibmalfusesnap = 0;
5659		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5660			crp_errlink);
5661	}
5662	if (symadj) {
5663		ppd->cpspec->iblnkdownsnap =
5664			read_7322_creg32_port(ppd, crp_iblinkdown);
5665		if (ppd->cpspec->ibdeltainprog) {
5666			ppd->cpspec->ibdeltainprog = 0;
5667			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5668				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5669			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5670				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5671		}
5672	} else if (!ibup && qib_compat_ddr_negotiate &&
5673		   !ppd->cpspec->ibdeltainprog &&
5674			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5675		ppd->cpspec->ibdeltainprog = 1;
5676		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5677			crp_ibsymbolerr);
5678		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5679			crp_iblinkerrrecov);
5680	}
5681
5682	if (!ret)
5683		qib_setup_7322_setextled(ppd, ibup);
5684	return ret;
5685}
5686
5687/*
5688 * Does read/modify/write to appropriate registers to
5689 * set output and direction bits selected by mask.
5690 * these are in their canonical postions (e.g. lsb of
5691 * dir will end up in D48 of extctrl on existing chips).
5692 * returns contents of GP Inputs.
5693 */
5694static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5695{
5696	u64 read_val, new_out;
5697	unsigned long flags;
5698
5699	if (mask) {
5700		/* some bits being written, lock access to GPIO */
5701		dir &= mask;
5702		out &= mask;
5703		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5704		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5705		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5706		new_out = (dd->cspec->gpio_out & ~mask) | out;
5707
5708		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5709		qib_write_kreg(dd, kr_gpio_out, new_out);
5710		dd->cspec->gpio_out = new_out;
5711		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5712	}
5713	/*
5714	 * It is unlikely that a read at this time would get valid
5715	 * data on a pin whose direction line was set in the same
5716	 * call to this function. We include the read here because
5717	 * that allows us to potentially combine a change on one pin with
5718	 * a read on another, and because the old code did something like
5719	 * this.
5720	 */
5721	read_val = qib_read_kreg64(dd, kr_extstatus);
5722	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5723}
5724
5725/* Enable writes to config EEPROM, if possible. Returns previous state */
5726static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5727{
5728	int prev_wen;
5729	u32 mask;
5730
5731	mask = 1 << QIB_EEPROM_WEN_NUM;
5732	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5733	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5734
5735	return prev_wen & 1;
5736}
5737
5738/*
5739 * Read fundamental info we need to use the chip.  These are
5740 * the registers that describe chip capabilities, and are
5741 * saved in shadow registers.
5742 */
5743static void get_7322_chip_params(struct qib_devdata *dd)
5744{
5745	u64 val;
5746	u32 piobufs;
5747	int mtu;
5748
5749	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5750
5751	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5752
5753	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5754	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5755	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5756	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5757	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5758
5759	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5760	dd->piobcnt2k = val & ~0U;
5761	dd->piobcnt4k = val >> 32;
5762	val = qib_read_kreg64(dd, kr_sendpiosize);
5763	dd->piosize2k = val & ~0U;
5764	dd->piosize4k = val >> 32;
5765
5766	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5767	if (mtu == -1)
5768		mtu = QIB_DEFAULT_MTU;
5769	dd->pport[0].ibmtu = (u32)mtu;
5770	dd->pport[1].ibmtu = (u32)mtu;
5771
5772	/* these may be adjusted in init_chip_wc_pat() */
5773	dd->pio2kbase = (u32 __iomem *)
5774		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5775	dd->pio4kbase = (u32 __iomem *)
5776		((char __iomem *) dd->kregbase +
5777		 (dd->piobufbase >> 32));
5778	/*
5779	 * 4K buffers take 2 pages; we use roundup just to be
5780	 * paranoid; we calculate it once here, rather than on
5781	 * ever buf allocate
5782	 */
5783	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5784
5785	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5786
5787	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5788		(sizeof(u64) * BITS_PER_BYTE / 2);
5789}
5790
5791/*
5792 * The chip base addresses in cspec and cpspec have to be set
5793 * after possible init_chip_wc_pat(), rather than in
5794 * get_7322_chip_params(), so split out as separate function
5795 */
5796static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5797{
5798	u32 cregbase;
5799
5800	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5801
5802	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5803		(char __iomem *)dd->kregbase);
5804
5805	dd->egrtidbase = (u64 __iomem *)
5806		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5807
5808	/* port registers are defined as relative to base of chip */
5809	dd->pport[0].cpspec->kpregbase =
5810		(u64 __iomem *)((char __iomem *)dd->kregbase);
5811	dd->pport[1].cpspec->kpregbase =
5812		(u64 __iomem *)(dd->palign +
5813		(char __iomem *)dd->kregbase);
5814	dd->pport[0].cpspec->cpregbase =
5815		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5816		kr_counterregbase) + (char __iomem *)dd->kregbase);
5817	dd->pport[1].cpspec->cpregbase =
5818		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5819		kr_counterregbase) + (char __iomem *)dd->kregbase);
5820}
5821
5822/*
5823 * This is a fairly special-purpose observer, so we only support
5824 * the port-specific parts of SendCtrl
5825 */
5826
5827#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5828			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5829			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5830			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5831			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5832			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5833			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5834
5835static int sendctrl_hook(struct qib_devdata *dd,
5836			 const struct diag_observer *op, u32 offs,
5837			 u64 *data, u64 mask, int only_32)
5838{
5839	unsigned long flags;
5840	unsigned idx;
5841	unsigned pidx;
5842	struct qib_pportdata *ppd = NULL;
5843	u64 local_data, all_bits;
5844
5845	/*
5846	 * The fixed correspondence between Physical ports and pports is
5847	 * severed. We need to hunt for the ppd that corresponds
5848	 * to the offset we got. And we have to do that without admitting
5849	 * we know the stride, apparently.
5850	 */
5851	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5852		u64 __iomem *psptr;
5853		u32 psoffs;
5854
5855		ppd = dd->pport + pidx;
5856		if (!ppd->cpspec->kpregbase)
5857			continue;
5858
5859		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5860		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5861		if (psoffs == offs)
5862			break;
5863	}
5864
5865	/* If pport is not being managed by driver, just avoid shadows. */
5866	if (pidx >= dd->num_pports)
5867		ppd = NULL;
5868
5869	/* In any case, "idx" is flat index in kreg space */
5870	idx = offs / sizeof(u64);
5871
5872	all_bits = ~0ULL;
5873	if (only_32)
5874		all_bits >>= 32;
5875
5876	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5877	if (!ppd || (mask & all_bits) != all_bits) {
5878		/*
5879		 * At least some mask bits are zero, so we need
5880		 * to read. The judgement call is whether from
5881		 * reg or shadow. First-cut: read reg, and complain
5882		 * if any bits which should be shadowed are different
5883		 * from their shadowed value.
5884		 */
5885		if (only_32)
5886			local_data = (u64)qib_read_kreg32(dd, idx);
5887		else
5888			local_data = qib_read_kreg64(dd, idx);
5889		*data = (local_data & ~mask) | (*data & mask);
5890	}
5891	if (mask) {
5892		/*
5893		 * At least some mask bits are one, so we need
5894		 * to write, but only shadow some bits.
5895		 */
5896		u64 sval, tval; /* Shadowed, transient */
5897
5898		/*
5899		 * New shadow val is bits we don't want to touch,
5900		 * ORed with bits we do, that are intended for shadow.
5901		 */
5902		if (ppd) {
5903			sval = ppd->p_sendctrl & ~mask;
5904			sval |= *data & SENDCTRL_SHADOWED & mask;
5905			ppd->p_sendctrl = sval;
5906		} else
5907			sval = *data & SENDCTRL_SHADOWED & mask;
5908		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5909		qib_write_kreg(dd, idx, tval);
5910		qib_write_kreg(dd, kr_scratch, 0Ull);
5911	}
5912	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5913	return only_32 ? 4 : 8;
5914}
5915
5916static const struct diag_observer sendctrl_0_observer = {
5917	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5918	KREG_IDX(SendCtrl_0) * sizeof(u64)
5919};
5920
5921static const struct diag_observer sendctrl_1_observer = {
5922	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5923	KREG_IDX(SendCtrl_1) * sizeof(u64)
5924};
5925
5926static ushort sdma_fetch_prio = 8;
5927module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5928MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5929
5930/* Besides logging QSFP events, we set appropriate TxDDS values */
5931static void init_txdds_table(struct qib_pportdata *ppd, int override);
5932
5933static void qsfp_7322_event(struct work_struct *work)
5934{
5935	struct qib_qsfp_data *qd;
5936	struct qib_pportdata *ppd;
5937	unsigned long pwrup;
5938	unsigned long flags;
5939	int ret;
5940	u32 le2;
5941
5942	qd = container_of(work, struct qib_qsfp_data, work);
5943	ppd = qd->ppd;
5944	pwrup = qd->t_insert +
5945		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5946
5947	/* Delay for 20 msecs to allow ModPrs resistor to setup */
5948	mdelay(QSFP_MODPRS_LAG_MSEC);
5949
5950	if (!qib_qsfp_mod_present(ppd)) {
5951		ppd->cpspec->qsfp_data.modpresent = 0;
5952		/* Set the physical link to disabled */
5953		qib_set_ib_7322_lstate(ppd, 0,
5954				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5955		spin_lock_irqsave(&ppd->lflags_lock, flags);
5956		ppd->lflags &= ~QIBL_LINKV;
5957		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5958	} else {
5959		/*
5960		 * Some QSFP's not only do not respond until the full power-up
5961		 * time, but may behave badly if we try. So hold off responding
5962		 * to insertion.
5963		 */
5964		while (1) {
5965			if (time_is_before_jiffies(pwrup))
5966				break;
5967			msleep(20);
5968		}
5969
5970		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5971
5972		/*
5973		 * Need to change LE2 back to defaults if we couldn't
5974		 * read the cable type (to handle cable swaps), so do this
5975		 * even on failure to read cable information.  We don't
5976		 * get here for QME, so IS_QME check not needed here.
5977		 */
5978		if (!ret && !ppd->dd->cspec->r1) {
5979			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5980				le2 = LE2_QME;
5981			else if (qd->cache.atten[1] >= qib_long_atten &&
5982				 QSFP_IS_CU(qd->cache.tech))
5983				le2 = LE2_5m;
5984			else
5985				le2 = LE2_DEFAULT;
5986		} else
5987			le2 = LE2_DEFAULT;
5988		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5989		/*
5990		 * We always change parameteters, since we can choose
5991		 * values for cables without eeproms, and the cable may have
5992		 * changed from a cable with full or partial eeprom content
5993		 * to one with partial or no content.
5994		 */
5995		init_txdds_table(ppd, 0);
5996		/* The physical link is being re-enabled only when the
5997		 * previous state was DISABLED and the VALID bit is not
5998		 * set. This should only happen when  the cable has been
5999		 * physically pulled. */
6000		if (!ppd->cpspec->qsfp_data.modpresent &&
6001		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6002			ppd->cpspec->qsfp_data.modpresent = 1;
6003			qib_set_ib_7322_lstate(ppd, 0,
6004				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6005			spin_lock_irqsave(&ppd->lflags_lock, flags);
6006			ppd->lflags |= QIBL_LINKV;
6007			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6008		}
6009	}
6010}
6011
6012/*
6013 * There is little we can do but complain to the user if QSFP
6014 * initialization fails.
6015 */
6016static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6017{
6018	unsigned long flags;
6019	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6020	struct qib_devdata *dd = ppd->dd;
6021	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6022
6023	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6024	qd->ppd = ppd;
6025	qib_qsfp_init(qd, qsfp_7322_event);
6026	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6027	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6028	dd->cspec->gpio_mask |= mod_prs_bit;
6029	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6030	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6031	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6032}
6033
6034/*
6035 * called at device initialization time, and also if the txselect
6036 * module parameter is changed.  This is used for cables that don't
6037 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6038 * We initialize to the default, then if there is a specific
6039 * unit,port match, we use that (and set it immediately, for the
6040 * current speed, if the link is at INIT or better).
6041 * String format is "default# unit#,port#=# ... u,p=#", separators must
6042 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6043 * optionally have "u,p=#,#", where the final # is the H1 value
6044 * The last specific match is used (actually, all are used, but last
6045 * one is the one that winds up set); if none at all, fall back on default.
6046 */
6047static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6048{
6049	char *nxt, *str;
6050	u32 pidx, unit, port, deflt, h1;
6051	unsigned long val;
6052	int any = 0, seth1;
6053	int txdds_size;
6054
6055	str = txselect_list;
6056
6057	/* default number is validated in setup_txselect() */
6058	deflt = simple_strtoul(str, &nxt, 0);
6059	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6060		dd->pport[pidx].cpspec->no_eep = deflt;
6061
6062	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6063	if (IS_QME(dd) || IS_QMH(dd))
6064		txdds_size += TXDDS_MFG_SZ;
6065
6066	while (*nxt && nxt[1]) {
6067		str = ++nxt;
6068		unit = simple_strtoul(str, &nxt, 0);
6069		if (nxt == str || !*nxt || *nxt != ',') {
6070			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6071				;
6072			continue;
6073		}
6074		str = ++nxt;
6075		port = simple_strtoul(str, &nxt, 0);
6076		if (nxt == str || *nxt != '=') {
6077			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6078				;
6079			continue;
6080		}
6081		str = ++nxt;
6082		val = simple_strtoul(str, &nxt, 0);
6083		if (nxt == str) {
6084			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6085				;
6086			continue;
6087		}
6088		if (val >= txdds_size)
6089			continue;
6090		seth1 = 0;
6091		h1 = 0; /* gcc thinks it might be used uninitted */
6092		if (*nxt == ',' && nxt[1]) {
6093			str = ++nxt;
6094			h1 = (u32)simple_strtoul(str, &nxt, 0);
6095			if (nxt == str)
6096				while (*nxt && *nxt++ != ' ') /* skip */
6097					;
6098			else
6099				seth1 = 1;
6100		}
6101		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6102		     ++pidx) {
6103			struct qib_pportdata *ppd = &dd->pport[pidx];
6104
6105			if (ppd->port != port || !ppd->link_speed_supported)
6106				continue;
6107			ppd->cpspec->no_eep = val;
6108			if (seth1)
6109				ppd->cpspec->h1_val = h1;
6110			/* now change the IBC and serdes, overriding generic */
6111			init_txdds_table(ppd, 1);
6112			/* Re-enable the physical state machine on mezz boards
6113			 * now that the correct settings have been set.
6114			 * QSFP boards are handles by the QSFP event handler */
6115			if (IS_QMH(dd) || IS_QME(dd))
6116				qib_set_ib_7322_lstate(ppd, 0,
6117					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6118			any++;
6119		}
6120		if (*nxt == '\n')
6121			break; /* done */
6122	}
6123	if (change && !any) {
6124		/* no specific setting, use the default.
6125		 * Change the IBC and serdes, but since it's
6126		 * general, don't override specific settings.
6127		 */
6128		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6129			if (dd->pport[pidx].link_speed_supported)
6130				init_txdds_table(&dd->pport[pidx], 0);
6131	}
6132}
6133
6134/* handle the txselect parameter changing */
6135static int setup_txselect(const char *str, const struct kernel_param *kp)
6136{
6137	struct qib_devdata *dd;
6138	unsigned long index, val;
6139	char *n;
6140
6141	if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6142		pr_info("txselect_values string too long\n");
6143		return -ENOSPC;
6144	}
6145	val = simple_strtoul(str, &n, 0);
6146	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6147				TXDDS_MFG_SZ)) {
6148		pr_info("txselect_values must start with a number < %d\n",
6149			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6150		return -EINVAL;
6151	}
6152	strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6153
6154	xa_for_each(&qib_dev_table, index, dd)
6155		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6156			set_no_qsfp_atten(dd, 1);
6157	return 0;
6158}
6159
6160/*
6161 * Write the final few registers that depend on some of the
6162 * init setup.  Done late in init, just before bringing up
6163 * the serdes.
6164 */
6165static int qib_late_7322_initreg(struct qib_devdata *dd)
6166{
6167	int ret = 0, n;
6168	u64 val;
6169
6170	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6171	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6172	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6173	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6174	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6175	if (val != dd->pioavailregs_phys) {
6176		qib_dev_err(dd,
6177			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6178			(unsigned long) dd->pioavailregs_phys,
6179			(unsigned long long) val);
6180		ret = -EINVAL;
6181	}
6182
6183	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6184	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6185	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6186	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6187
6188	qib_register_observer(dd, &sendctrl_0_observer);
6189	qib_register_observer(dd, &sendctrl_1_observer);
6190
6191	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6192	qib_write_kreg(dd, kr_control, dd->control);
6193	/*
6194	 * Set SendDmaFetchPriority and init Tx params, including
6195	 * QSFP handler on boards that have QSFP.
6196	 * First set our default attenuation entry for cables that
6197	 * don't have valid attenuation.
6198	 */
6199	set_no_qsfp_atten(dd, 0);
6200	for (n = 0; n < dd->num_pports; ++n) {
6201		struct qib_pportdata *ppd = dd->pport + n;
6202
6203		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6204				    sdma_fetch_prio & 0xf);
6205		/* Initialize qsfp if present on board. */
6206		if (dd->flags & QIB_HAS_QSFP)
6207			qib_init_7322_qsfp(ppd);
6208	}
6209	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6210	qib_write_kreg(dd, kr_control, dd->control);
6211
6212	return ret;
6213}
6214
6215/* per IB port errors.  */
6216#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6217	MASK_ACROSS(8, 15))
6218#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6219#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6220	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6221	MASK_ACROSS(0, 11))
6222
6223/*
6224 * Write the initialization per-port registers that need to be done at
6225 * driver load and after reset completes (i.e., that aren't done as part
6226 * of other init procedures called from qib_init.c).
6227 * Some of these should be redundant on reset, but play safe.
6228 */
6229static void write_7322_init_portregs(struct qib_pportdata *ppd)
6230{
6231	u64 val;
6232	int i;
6233
6234	if (!ppd->link_speed_supported) {
6235		/* no buffer credits for this port */
6236		for (i = 1; i < 8; i++)
6237			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6238		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6239		qib_write_kreg(ppd->dd, kr_scratch, 0);
6240		return;
6241	}
6242
6243	/*
6244	 * Set the number of supported virtual lanes in IBC,
6245	 * for flow control packet handling on unsupported VLs
6246	 */
6247	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6248	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6249	val |= (u64)(ppd->vls_supported - 1) <<
6250		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6251	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6252
6253	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6254
6255	/* enable tx header checking */
6256	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6257			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6258			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6259
6260	qib_write_kreg_port(ppd, krp_ncmodectrl,
6261		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6262
6263	/*
6264	 * Unconditionally clear the bufmask bits.  If SDMA is
6265	 * enabled, we'll set them appropriately later.
6266	 */
6267	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6268	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6269	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6270	if (ppd->dd->cspec->r1)
6271		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6272}
6273
6274/*
6275 * Write the initialization per-device registers that need to be done at
6276 * driver load and after reset completes (i.e., that aren't done as part
6277 * of other init procedures called from qib_init.c).  Also write per-port
6278 * registers that are affected by overall device config, such as QP mapping
6279 * Some of these should be redundant on reset, but play safe.
6280 */
6281static void write_7322_initregs(struct qib_devdata *dd)
6282{
6283	struct qib_pportdata *ppd;
6284	int i, pidx;
6285	u64 val;
6286
6287	/* Set Multicast QPs received by port 2 to map to context one. */
6288	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6289
6290	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6291		unsigned n, regno;
6292		unsigned long flags;
6293
6294		if (dd->n_krcv_queues < 2 ||
6295			!dd->pport[pidx].link_speed_supported)
6296			continue;
6297
6298		ppd = &dd->pport[pidx];
6299
6300		/* be paranoid against later code motion, etc. */
6301		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6302		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6303		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6304
6305		/* Initialize QP to context mapping */
6306		regno = krp_rcvqpmaptable;
6307		val = 0;
6308		if (dd->num_pports > 1)
6309			n = dd->first_user_ctxt / dd->num_pports;
6310		else
6311			n = dd->first_user_ctxt - 1;
6312		for (i = 0; i < 32; ) {
6313			unsigned ctxt;
6314
6315			if (dd->num_pports > 1)
6316				ctxt = (i % n) * dd->num_pports + pidx;
6317			else if (i % n)
6318				ctxt = (i % n) + 1;
6319			else
6320				ctxt = ppd->hw_pidx;
6321			val |= ctxt << (5 * (i % 6));
6322			i++;
6323			if (i % 6 == 0) {
6324				qib_write_kreg_port(ppd, regno, val);
6325				val = 0;
6326				regno++;
6327			}
6328		}
6329		qib_write_kreg_port(ppd, regno, val);
6330	}
6331
6332	/*
6333	 * Setup up interrupt mitigation for kernel contexts, but
6334	 * not user contexts (user contexts use interrupts when
6335	 * stalled waiting for any packet, so want those interrupts
6336	 * right away).
6337	 */
6338	for (i = 0; i < dd->first_user_ctxt; i++) {
6339		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6340		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6341	}
6342
6343	/*
6344	 * Initialize  as (disabled) rcvflow tables.  Application code
6345	 * will setup each flow as it uses the flow.
6346	 * Doesn't clear any of the error bits that might be set.
6347	 */
6348	val = TIDFLOW_ERRBITS; /* these are W1C */
6349	for (i = 0; i < dd->cfgctxts; i++) {
6350		int flow;
6351
6352		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6353			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6354	}
6355
6356	/*
6357	 * dual cards init to dual port recovery, single port cards to
6358	 * the one port.  Dual port cards may later adjust to 1 port,
6359	 * and then back to dual port if both ports are connected
6360	 * */
6361	if (dd->num_pports)
6362		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6363}
6364
6365static int qib_init_7322_variables(struct qib_devdata *dd)
6366{
6367	struct qib_pportdata *ppd;
6368	unsigned features, pidx, sbufcnt;
6369	int ret, mtu;
6370	u32 sbufs, updthresh;
6371	resource_size_t vl15off;
6372
6373	/* pport structs are contiguous, allocated after devdata */
6374	ppd = (struct qib_pportdata *)(dd + 1);
6375	dd->pport = ppd;
6376	ppd[0].dd = dd;
6377	ppd[1].dd = dd;
6378
6379	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6380
6381	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6382	ppd[1].cpspec = &ppd[0].cpspec[1];
6383	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6384	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6385
6386	spin_lock_init(&dd->cspec->rcvmod_lock);
6387	spin_lock_init(&dd->cspec->gpio_lock);
6388
6389	/* we haven't yet set QIB_PRESENT, so use read directly */
6390	dd->revision = readq(&dd->kregbase[kr_revision]);
6391
6392	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6393		qib_dev_err(dd,
6394			"Revision register read failure, giving up initialization\n");
6395		ret = -ENODEV;
6396		goto bail;
6397	}
6398	dd->flags |= QIB_PRESENT;  /* now register routines work */
6399
6400	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6401	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6402	dd->cspec->r1 = dd->minrev == 1;
6403
6404	get_7322_chip_params(dd);
6405	features = qib_7322_boardname(dd);
6406
6407	/* now that piobcnt2k and 4k set, we can allocate these */
6408	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6409		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6410	sbufcnt /= BITS_PER_LONG;
6411	dd->cspec->sendchkenable =
6412		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6413			      GFP_KERNEL);
6414	dd->cspec->sendgrhchk =
6415		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6416			      GFP_KERNEL);
6417	dd->cspec->sendibchk =
6418		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6419			      GFP_KERNEL);
6420	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6421		!dd->cspec->sendibchk) {
6422		ret = -ENOMEM;
6423		goto bail;
6424	}
6425
6426	ppd = dd->pport;
6427
6428	/*
6429	 * GPIO bits for TWSI data and clock,
6430	 * used for serial EEPROM.
6431	 */
6432	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6433	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6434	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6435
6436	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6437		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6438		QIB_HAS_THRESH_UPDATE |
6439		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6440	dd->flags |= qib_special_trigger ?
6441		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6442
6443	/*
6444	 * Setup initial values.  These may change when PAT is enabled, but
6445	 * we need these to do initial chip register accesses.
6446	 */
6447	qib_7322_set_baseaddrs(dd);
6448
6449	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6450	if (mtu == -1)
6451		mtu = QIB_DEFAULT_MTU;
6452
6453	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6454	/* all hwerrors become interrupts, unless special purposed */
6455	dd->cspec->hwerrmask = ~0ULL;
6456	/*  link_recovery setup causes these errors, so ignore them,
6457	 *  other than clearing them when they occur */
6458	dd->cspec->hwerrmask &=
6459		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6460		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6461		  HWE_MASK(LATriggered));
6462
6463	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6464		struct qib_chippport_specific *cp = ppd->cpspec;
6465
6466		ppd->link_speed_supported = features & PORT_SPD_CAP;
6467		features >>=  PORT_SPD_CAP_SHIFT;
6468		if (!ppd->link_speed_supported) {
6469			/* single port mode (7340, or configured) */
6470			dd->skip_kctxt_mask |= 1 << pidx;
6471			if (pidx == 0) {
6472				/* Make sure port is disabled. */
6473				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6474				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6475				ppd[0] = ppd[1];
6476				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6477						  IBSerdesPClkNotDetectMask_0)
6478						  | SYM_MASK(HwErrMask,
6479						  SDmaMemReadErrMask_0));
6480				dd->cspec->int_enable_mask &= ~(
6481				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6482				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6483				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6484				     SYM_MASK(IntMask, SDmaIntMask_0) |
6485				     SYM_MASK(IntMask, ErrIntMask_0) |
6486				     SYM_MASK(IntMask, SendDoneIntMask_0));
6487			} else {
6488				/* Make sure port is disabled. */
6489				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6490				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6491				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6492						  IBSerdesPClkNotDetectMask_1)
6493						  | SYM_MASK(HwErrMask,
6494						  SDmaMemReadErrMask_1));
6495				dd->cspec->int_enable_mask &= ~(
6496				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6497				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6498				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6499				     SYM_MASK(IntMask, SDmaIntMask_1) |
6500				     SYM_MASK(IntMask, ErrIntMask_1) |
6501				     SYM_MASK(IntMask, SendDoneIntMask_1));
6502			}
6503			continue;
6504		}
6505
6506		dd->num_pports++;
6507		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6508		if (ret) {
6509			dd->num_pports--;
6510			goto bail;
6511		}
6512
6513		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6514		ppd->link_width_enabled = IB_WIDTH_4X;
6515		ppd->link_speed_enabled = ppd->link_speed_supported;
6516		/*
6517		 * Set the initial values to reasonable default, will be set
6518		 * for real when link is up.
6519		 */
6520		ppd->link_width_active = IB_WIDTH_4X;
6521		ppd->link_speed_active = QIB_IB_SDR;
6522		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6523		switch (qib_num_cfg_vls) {
6524		case 1:
6525			ppd->vls_supported = IB_VL_VL0;
6526			break;
6527		case 2:
6528			ppd->vls_supported = IB_VL_VL0_1;
6529			break;
6530		default:
6531			qib_devinfo(dd->pcidev,
6532				    "Invalid num_vls %u, using 4 VLs\n",
6533				    qib_num_cfg_vls);
6534			qib_num_cfg_vls = 4;
6535			fallthrough;
6536		case 4:
6537			ppd->vls_supported = IB_VL_VL0_3;
6538			break;
6539		case 8:
6540			if (mtu <= 2048)
6541				ppd->vls_supported = IB_VL_VL0_7;
6542			else {
6543				qib_devinfo(dd->pcidev,
6544					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6545					    qib_num_cfg_vls, mtu);
6546				ppd->vls_supported = IB_VL_VL0_3;
6547				qib_num_cfg_vls = 4;
6548			}
6549			break;
6550		}
6551		ppd->vls_operational = ppd->vls_supported;
6552
6553		init_waitqueue_head(&cp->autoneg_wait);
6554		INIT_DELAYED_WORK(&cp->autoneg_work,
6555				  autoneg_7322_work);
6556		if (ppd->dd->cspec->r1)
6557			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6558
6559		/*
6560		 * For Mez and similar cards, no qsfp info, so do
6561		 * the "cable info" setup here.  Can be overridden
6562		 * in adapter-specific routines.
6563		 */
6564		if (!(dd->flags & QIB_HAS_QSFP)) {
6565			if (!IS_QMH(dd) && !IS_QME(dd))
6566				qib_devinfo(dd->pcidev,
6567					"IB%u:%u: Unknown mezzanine card type\n",
6568					dd->unit, ppd->port);
6569			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6570			/*
6571			 * Choose center value as default tx serdes setting
6572			 * until changed through module parameter.
6573			 */
6574			ppd->cpspec->no_eep = IS_QMH(dd) ?
6575				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6576		} else
6577			cp->h1_val = H1_FORCE_VAL;
6578
6579		/* Avoid writes to chip for mini_init */
6580		if (!qib_mini_init)
6581			write_7322_init_portregs(ppd);
6582
6583		timer_setup(&cp->chase_timer, reenable_chase, 0);
6584
6585		ppd++;
6586	}
6587
6588	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6589		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6590	dd->rcvhdrsize = qib_rcvhdrsize ?
6591		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6592	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6593
6594	/* we always allocate at least 2048 bytes for eager buffers */
6595	dd->rcvegrbufsize = max(mtu, 2048);
6596	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6597
6598	qib_7322_tidtemplate(dd);
6599
6600	/*
6601	 * We can request a receive interrupt for 1 or
6602	 * more packets from current offset.
6603	 */
6604	dd->rhdrhead_intr_off =
6605		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6606
6607	/* setup the stats timer; the add_timer is done at end of init */
6608	timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6609
6610	dd->ureg_align = 0x10000;  /* 64KB alignment */
6611
6612	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6613
6614	qib_7322_config_ctxts(dd);
6615	qib_set_ctxtcnt(dd);
6616
6617	/*
6618	 * We do not set WC on the VL15 buffers to avoid
6619	 * a rare problem with unaligned writes from
6620	 * interrupt-flushed store buffers, so we need
6621	 * to map those separately here.  We can't solve
6622	 * this for the rarely used mtrr case.
6623	 */
6624	ret = init_chip_wc_pat(dd, 0);
6625	if (ret)
6626		goto bail;
6627
6628	/* vl15 buffers start just after the 4k buffers */
6629	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6630		  dd->piobcnt4k * dd->align4k;
6631	dd->piovl15base	= ioremap(vl15off,
6632					  NUM_VL15_BUFS * dd->align4k);
6633	if (!dd->piovl15base) {
6634		ret = -ENOMEM;
6635		goto bail;
6636	}
6637
6638	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6639
6640	ret = 0;
6641	if (qib_mini_init)
6642		goto bail;
6643	if (!dd->num_pports) {
6644		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6645		goto bail; /* no error, so can still figure out why err */
6646	}
6647
6648	write_7322_initregs(dd);
6649	ret = qib_create_ctxts(dd);
6650	init_7322_cntrnames(dd);
6651
6652	updthresh = 8U; /* update threshold */
6653
6654	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6655	 * reserve the update threshold amount for other kernel use, such
6656	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6657	 * unless we aren't enabling SDMA, in which case we want to use
6658	 * all the 4k bufs for the kernel.
6659	 * if this was less than the update threshold, we could wait
6660	 * a long time for an update.  Coded this way because we
6661	 * sometimes change the update threshold for various reasons,
6662	 * and we want this to remain robust.
6663	 */
6664	if (dd->flags & QIB_HAS_SEND_DMA) {
6665		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6666		sbufs = updthresh > 3 ? updthresh : 3;
6667	} else {
6668		dd->cspec->sdmabufcnt = 0;
6669		sbufs = dd->piobcnt4k;
6670	}
6671	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6672		dd->cspec->sdmabufcnt;
6673	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6674	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6675	dd->last_pio = dd->cspec->lastbuf_for_pio;
6676	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6677		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6678
6679	/*
6680	 * If we have 16 user contexts, we will have 7 sbufs
6681	 * per context, so reduce the update threshold to match.  We
6682	 * want to update before we actually run out, at low pbufs/ctxt
6683	 * so give ourselves some margin.
6684	 */
6685	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6686		updthresh = dd->pbufsctxt - 2;
6687	dd->cspec->updthresh_dflt = updthresh;
6688	dd->cspec->updthresh = updthresh;
6689
6690	/* before full enable, no interrupts, no locking needed */
6691	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6692			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6693			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6694
6695	dd->psxmitwait_supported = 1;
6696	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6697bail:
6698	if (!dd->ctxtcnt)
6699		dd->ctxtcnt = 1; /* for other initialization code */
6700
6701	return ret;
6702}
6703
6704static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6705					u32 *pbufnum)
6706{
6707	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6708	struct qib_devdata *dd = ppd->dd;
6709
6710	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6711	if (pbc & PBC_7322_VL15_SEND) {
6712		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6713		last = first;
6714	} else {
6715		if ((plen + 1) > dd->piosize2kmax_dwords)
6716			first = dd->piobcnt2k;
6717		else
6718			first = 0;
6719		last = dd->cspec->lastbuf_for_pio;
6720	}
6721	return qib_getsendbuf_range(dd, pbufnum, first, last);
6722}
6723
6724static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6725				     u32 start)
6726{
6727	qib_write_kreg_port(ppd, krp_psinterval, intv);
6728	qib_write_kreg_port(ppd, krp_psstart, start);
6729}
6730
6731/*
6732 * Must be called with sdma_lock held, or before init finished.
6733 */
6734static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6735{
6736	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6737}
6738
6739/*
6740 * sdma_lock should be acquired before calling this routine
6741 */
6742static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6743{
6744	u64 reg, reg1, reg2;
6745
6746	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6747	qib_dev_porterr(ppd->dd, ppd->port,
6748		"SDMA senddmastatus: 0x%016llx\n", reg);
6749
6750	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6751	qib_dev_porterr(ppd->dd, ppd->port,
6752		"SDMA sendctrl: 0x%016llx\n", reg);
6753
6754	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6755	qib_dev_porterr(ppd->dd, ppd->port,
6756		"SDMA senddmabase: 0x%016llx\n", reg);
6757
6758	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6759	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6760	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6761	qib_dev_porterr(ppd->dd, ppd->port,
6762		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6763		 reg, reg1, reg2);
6764
6765	/* get bufuse bits, clear them, and print them again if non-zero */
6766	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6767	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6768	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6769	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6770	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6771	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6772	/* 0 and 1 should always be zero, so print as short form */
6773	qib_dev_porterr(ppd->dd, ppd->port,
6774		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6775		 reg, reg1, reg2);
6776	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6777	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6778	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6779	/* 0 and 1 should always be zero, so print as short form */
6780	qib_dev_porterr(ppd->dd, ppd->port,
6781		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6782		 reg, reg1, reg2);
6783
6784	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6785	qib_dev_porterr(ppd->dd, ppd->port,
6786		"SDMA senddmatail: 0x%016llx\n", reg);
6787
6788	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6789	qib_dev_porterr(ppd->dd, ppd->port,
6790		"SDMA senddmahead: 0x%016llx\n", reg);
6791
6792	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6793	qib_dev_porterr(ppd->dd, ppd->port,
6794		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6795
6796	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6797	qib_dev_porterr(ppd->dd, ppd->port,
6798		"SDMA senddmalengen: 0x%016llx\n", reg);
6799
6800	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6801	qib_dev_porterr(ppd->dd, ppd->port,
6802		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6803
6804	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6805	qib_dev_porterr(ppd->dd, ppd->port,
6806		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6807
6808	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6809	qib_dev_porterr(ppd->dd, ppd->port,
6810		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6811
6812	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6813	qib_dev_porterr(ppd->dd, ppd->port,
6814		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6815
6816	dump_sdma_state(ppd);
6817}
6818
6819static struct sdma_set_state_action sdma_7322_action_table[] = {
6820	[qib_sdma_state_s00_hw_down] = {
6821		.go_s99_running_tofalse = 1,
6822		.op_enable = 0,
6823		.op_intenable = 0,
6824		.op_halt = 0,
6825		.op_drain = 0,
6826	},
6827	[qib_sdma_state_s10_hw_start_up_wait] = {
6828		.op_enable = 0,
6829		.op_intenable = 1,
6830		.op_halt = 1,
6831		.op_drain = 0,
6832	},
6833	[qib_sdma_state_s20_idle] = {
6834		.op_enable = 1,
6835		.op_intenable = 1,
6836		.op_halt = 1,
6837		.op_drain = 0,
6838	},
6839	[qib_sdma_state_s30_sw_clean_up_wait] = {
6840		.op_enable = 0,
6841		.op_intenable = 1,
6842		.op_halt = 1,
6843		.op_drain = 0,
6844	},
6845	[qib_sdma_state_s40_hw_clean_up_wait] = {
6846		.op_enable = 1,
6847		.op_intenable = 1,
6848		.op_halt = 1,
6849		.op_drain = 0,
6850	},
6851	[qib_sdma_state_s50_hw_halt_wait] = {
6852		.op_enable = 1,
6853		.op_intenable = 1,
6854		.op_halt = 1,
6855		.op_drain = 1,
6856	},
6857	[qib_sdma_state_s99_running] = {
6858		.op_enable = 1,
6859		.op_intenable = 1,
6860		.op_halt = 0,
6861		.op_drain = 0,
6862		.go_s99_running_totrue = 1,
6863	},
6864};
6865
6866static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6867{
6868	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6869}
6870
6871static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6872{
6873	struct qib_devdata *dd = ppd->dd;
6874	unsigned lastbuf, erstbuf;
6875	u64 senddmabufmask[3] = { 0 };
6876	int n;
6877
6878	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6879	qib_sdma_7322_setlengen(ppd);
6880	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6881	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6882	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6883	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6884
6885	if (dd->num_pports)
6886		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6887	else
6888		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6889	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6890		((dd->num_pports == 1 || ppd->port == 2) ? n :
6891		dd->cspec->sdmabufcnt);
6892	lastbuf = erstbuf + n;
6893
6894	ppd->sdma_state.first_sendbuf = erstbuf;
6895	ppd->sdma_state.last_sendbuf = lastbuf;
6896	for (; erstbuf < lastbuf; ++erstbuf) {
6897		unsigned word = erstbuf / BITS_PER_LONG;
6898		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6899
6900		senddmabufmask[word] |= 1ULL << bit;
6901	}
6902	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6903	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6904	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6905	return 0;
6906}
6907
6908/* sdma_lock must be held */
6909static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6910{
6911	struct qib_devdata *dd = ppd->dd;
6912	int sane;
6913	int use_dmahead;
6914	u16 swhead;
6915	u16 swtail;
6916	u16 cnt;
6917	u16 hwhead;
6918
6919	use_dmahead = __qib_sdma_running(ppd) &&
6920		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6921retry:
6922	hwhead = use_dmahead ?
6923		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6924		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6925
6926	swhead = ppd->sdma_descq_head;
6927	swtail = ppd->sdma_descq_tail;
6928	cnt = ppd->sdma_descq_cnt;
6929
6930	if (swhead < swtail)
6931		/* not wrapped */
6932		sane = (hwhead >= swhead) & (hwhead <= swtail);
6933	else if (swhead > swtail)
6934		/* wrapped around */
6935		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6936			(hwhead <= swtail);
6937	else
6938		/* empty */
6939		sane = (hwhead == swhead);
6940
6941	if (unlikely(!sane)) {
6942		if (use_dmahead) {
6943			/* try one more time, directly from the register */
6944			use_dmahead = 0;
6945			goto retry;
6946		}
6947		/* proceed as if no progress */
6948		hwhead = swhead;
6949	}
6950
6951	return hwhead;
6952}
6953
6954static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6955{
6956	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6957
6958	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6959	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6960	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6961	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6962}
6963
6964/*
6965 * Compute the amount of delay before sending the next packet if the
6966 * port's send rate differs from the static rate set for the QP.
6967 * The delay affects the next packet and the amount of the delay is
6968 * based on the length of the this packet.
6969 */
6970static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6971				   u8 srate, u8 vl)
6972{
6973	u8 snd_mult = ppd->delay_mult;
6974	u8 rcv_mult = ib_rate_to_delay[srate];
6975	u32 ret;
6976
6977	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6978
6979	/* Indicate VL15, else set the VL in the control word */
6980	if (vl == 15)
6981		ret |= PBC_7322_VL15_SEND_CTRL;
6982	else
6983		ret |= vl << PBC_VL_NUM_LSB;
6984	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6985
6986	return ret;
6987}
6988
6989/*
6990 * Enable the per-port VL15 send buffers for use.
6991 * They follow the rest of the buffers, without a config parameter.
6992 * This was in initregs, but that is done before the shadow
6993 * is set up, and this has to be done after the shadow is
6994 * set up.
6995 */
6996static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6997{
6998	unsigned vl15bufs;
6999
7000	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7001	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7002			       TXCHK_CHG_TYPE_KERN, NULL);
7003}
7004
7005static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7006{
7007	if (rcd->ctxt < NUM_IB_PORTS) {
7008		if (rcd->dd->num_pports > 1) {
7009			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7010			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7011		} else {
7012			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7013			rcd->rcvegr_tid_base = 0;
7014		}
7015	} else {
7016		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7017		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7018			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7019	}
7020}
7021
7022#define QTXSLEEPS 5000
7023static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7024				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7025{
7026	int i;
7027	const int last = start + len - 1;
7028	const int lastr = last / BITS_PER_LONG;
7029	u32 sleeps = 0;
7030	int wait = rcd != NULL;
7031	unsigned long flags;
7032
7033	while (wait) {
7034		unsigned long shadow = 0;
7035		int cstart, previ = -1;
7036
7037		/*
7038		 * when flipping from kernel to user, we can't change
7039		 * the checking type if the buffer is allocated to the
7040		 * driver.   It's OK the other direction, because it's
7041		 * from close, and we have just disarm'ed all the
7042		 * buffers.  All the kernel to kernel changes are also
7043		 * OK.
7044		 */
7045		for (cstart = start; cstart <= last; cstart++) {
7046			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7047				/ BITS_PER_LONG;
7048			if (i != previ) {
7049				shadow = (unsigned long)
7050					le64_to_cpu(dd->pioavailregs_dma[i]);
7051				previ = i;
7052			}
7053			if (test_bit(((2 * cstart) +
7054				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7055				     % BITS_PER_LONG, &shadow))
7056				break;
7057		}
7058
7059		if (cstart > last)
7060			break;
7061
7062		if (sleeps == QTXSLEEPS)
7063			break;
7064		/* make sure we see an updated copy next time around */
7065		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7066		sleeps++;
7067		msleep(20);
7068	}
7069
7070	switch (which) {
7071	case TXCHK_CHG_TYPE_DIS1:
7072		/*
7073		 * disable checking on a range; used by diags; just
7074		 * one buffer, but still written generically
7075		 */
7076		for (i = start; i <= last; i++)
7077			clear_bit(i, dd->cspec->sendchkenable);
7078		break;
7079
7080	case TXCHK_CHG_TYPE_ENAB1:
7081		/*
7082		 * (re)enable checking on a range; used by diags; just
7083		 * one buffer, but still written generically; read
7084		 * scratch to be sure buffer actually triggered, not
7085		 * just flushed from processor.
7086		 */
7087		qib_read_kreg32(dd, kr_scratch);
7088		for (i = start; i <= last; i++)
7089			set_bit(i, dd->cspec->sendchkenable);
7090		break;
7091
7092	case TXCHK_CHG_TYPE_KERN:
7093		/* usable by kernel */
7094		for (i = start; i <= last; i++) {
7095			set_bit(i, dd->cspec->sendibchk);
7096			clear_bit(i, dd->cspec->sendgrhchk);
7097		}
7098		spin_lock_irqsave(&dd->uctxt_lock, flags);
7099		/* see if we need to raise avail update threshold */
7100		for (i = dd->first_user_ctxt;
7101		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7102		     && i < dd->cfgctxts; i++)
7103			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7104			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7105			   < dd->cspec->updthresh_dflt)
7106				break;
7107		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7108		if (i == dd->cfgctxts) {
7109			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7110			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7111			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7112			dd->sendctrl |= (dd->cspec->updthresh &
7113					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7114					   SYM_LSB(SendCtrl, AvailUpdThld);
7115			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7116			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7117		}
7118		break;
7119
7120	case TXCHK_CHG_TYPE_USER:
7121		/* for user process */
7122		for (i = start; i <= last; i++) {
7123			clear_bit(i, dd->cspec->sendibchk);
7124			set_bit(i, dd->cspec->sendgrhchk);
7125		}
7126		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7127		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7128			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7129			dd->cspec->updthresh = (rcd->piocnt /
7130						rcd->subctxt_cnt) - 1;
7131			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7132			dd->sendctrl |= (dd->cspec->updthresh &
7133					SYM_RMASK(SendCtrl, AvailUpdThld))
7134					<< SYM_LSB(SendCtrl, AvailUpdThld);
7135			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7136			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7137		} else
7138			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7139		break;
7140
7141	default:
7142		break;
7143	}
7144
7145	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7146		qib_write_kreg(dd, kr_sendcheckmask + i,
7147			       dd->cspec->sendchkenable[i]);
7148
7149	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7150		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7151			       dd->cspec->sendgrhchk[i]);
7152		qib_write_kreg(dd, kr_sendibpktmask + i,
7153			       dd->cspec->sendibchk[i]);
7154	}
7155
7156	/*
7157	 * Be sure whatever we did was seen by the chip and acted upon,
7158	 * before we return.  Mostly important for which >= 2.
7159	 */
7160	qib_read_kreg32(dd, kr_scratch);
7161}
7162
7163
7164/* useful for trigger analyzers, etc. */
7165static void writescratch(struct qib_devdata *dd, u32 val)
7166{
7167	qib_write_kreg(dd, kr_scratch, val);
7168}
7169
7170/* Dummy for now, use chip regs soon */
7171static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7172{
7173	return -ENXIO;
7174}
7175
7176/**
7177 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7178 * @dev: the pci_dev for qlogic_ib device
7179 * @ent: pci_device_id struct for this dev
7180 *
7181 * Also allocates, inits, and returns the devdata struct for this
7182 * device instance
7183 *
7184 * This is global, and is called directly at init to set up the
7185 * chip-specific function pointers for later use.
7186 */
7187struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7188					   const struct pci_device_id *ent)
7189{
7190	struct qib_devdata *dd;
7191	int ret, i;
7192	u32 tabsize, actual_cnt = 0;
7193
7194	dd = qib_alloc_devdata(pdev,
7195		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7196		sizeof(struct qib_chip_specific) +
7197		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7198	if (IS_ERR(dd))
7199		goto bail;
7200
7201	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7202	dd->f_cleanup           = qib_setup_7322_cleanup;
7203	dd->f_clear_tids        = qib_7322_clear_tids;
7204	dd->f_free_irq          = qib_7322_free_irq;
7205	dd->f_get_base_info     = qib_7322_get_base_info;
7206	dd->f_get_msgheader     = qib_7322_get_msgheader;
7207	dd->f_getsendbuf        = qib_7322_getsendbuf;
7208	dd->f_gpio_mod          = gpio_7322_mod;
7209	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7210	dd->f_hdrqempty         = qib_7322_hdrqempty;
7211	dd->f_ib_updown         = qib_7322_ib_updown;
7212	dd->f_init_ctxt         = qib_7322_init_ctxt;
7213	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7214	dd->f_intr_fallback     = qib_7322_intr_fallback;
7215	dd->f_late_initreg      = qib_late_7322_initreg;
7216	dd->f_setpbc_control    = qib_7322_setpbc_control;
7217	dd->f_portcntr          = qib_portcntr_7322;
7218	dd->f_put_tid           = qib_7322_put_tid;
7219	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7220	dd->f_rcvctrl           = rcvctrl_7322_mod;
7221	dd->f_read_cntrs        = qib_read_7322cntrs;
7222	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7223	dd->f_reset             = qib_do_7322_reset;
7224	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7225	dd->f_sdma_busy         = qib_sdma_7322_busy;
7226	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7227	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7228	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7229	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7230	dd->f_sendctrl          = sendctrl_7322_mod;
7231	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7232	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7233	dd->f_iblink_state      = qib_7322_iblink_state;
7234	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7235	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7236	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7237	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7238	dd->f_get_ib_table      = qib_7322_get_ib_table;
7239	dd->f_set_ib_table      = qib_7322_set_ib_table;
7240	dd->f_set_intr_state    = qib_7322_set_intr_state;
7241	dd->f_setextled         = qib_setup_7322_setextled;
7242	dd->f_txchk_change      = qib_7322_txchk_change;
7243	dd->f_update_usrhead    = qib_update_7322_usrhead;
7244	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7245	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7246	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7247	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7248	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7249	dd->f_writescratch      = writescratch;
7250	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7251#ifdef CONFIG_INFINIBAND_QIB_DCA
7252	dd->f_notify_dca	= qib_7322_notify_dca;
7253#endif
7254	/*
7255	 * Do remaining PCIe setup and save PCIe values in dd.
7256	 * Any error printing is already done by the init code.
7257	 * On return, we have the chip mapped, but chip registers
7258	 * are not set up until start of qib_init_7322_variables.
7259	 */
7260	ret = qib_pcie_ddinit(dd, pdev, ent);
7261	if (ret < 0)
7262		goto bail_free;
7263
7264	/* initialize chip-specific variables */
7265	ret = qib_init_7322_variables(dd);
7266	if (ret)
7267		goto bail_cleanup;
7268
7269	if (qib_mini_init || !dd->num_pports)
7270		goto bail;
7271
7272	/*
7273	 * Determine number of vectors we want; depends on port count
7274	 * and number of configured kernel receive queues actually used.
7275	 * Should also depend on whether sdma is enabled or not, but
7276	 * that's such a rare testing case it's not worth worrying about.
7277	 */
7278	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7279	for (i = 0; i < tabsize; i++)
7280		if ((i < ARRAY_SIZE(irq_table) &&
7281		     irq_table[i].port <= dd->num_pports) ||
7282		    (i >= ARRAY_SIZE(irq_table) &&
7283		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7284			actual_cnt++;
7285	/* reduce by ctxt's < 2 */
7286	if (qib_krcvq01_no_msi)
7287		actual_cnt -= dd->num_pports;
7288
7289	tabsize = actual_cnt;
7290	dd->cspec->msix_entries = kcalloc(tabsize,
7291					  sizeof(struct qib_msix_entry),
7292					  GFP_KERNEL);
7293	if (!dd->cspec->msix_entries)
7294		tabsize = 0;
7295
7296	if (qib_pcie_params(dd, 8, &tabsize))
7297		qib_dev_err(dd,
7298			"Failed to setup PCIe or interrupts; continuing anyway\n");
7299	/* may be less than we wanted, if not enough available */
7300	dd->cspec->num_msix_entries = tabsize;
7301
7302	/* setup interrupt handler */
7303	qib_setup_7322_interrupt(dd, 1);
7304
7305	/* clear diagctrl register, in case diags were running and crashed */
7306	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7307#ifdef CONFIG_INFINIBAND_QIB_DCA
7308	if (!dca_add_requester(&pdev->dev)) {
7309		qib_devinfo(dd->pcidev, "DCA enabled\n");
7310		dd->flags |= QIB_DCA_ENABLED;
7311		qib_setup_dca(dd);
7312	}
7313#endif
7314	goto bail;
7315
7316bail_cleanup:
7317	qib_pcie_ddcleanup(dd);
7318bail_free:
7319	qib_free_devdata(dd);
7320	dd = ERR_PTR(ret);
7321bail:
7322	return dd;
7323}
7324
7325/*
7326 * Set the table entry at the specified index from the table specifed.
7327 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7328 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7329 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7330 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7331 */
7332#define DDS_ENT_AMP_LSB 14
7333#define DDS_ENT_MAIN_LSB 9
7334#define DDS_ENT_POST_LSB 5
7335#define DDS_ENT_PRE_XTRA_LSB 3
7336#define DDS_ENT_PRE_LSB 0
7337
7338/*
7339 * Set one entry in the TxDDS table for spec'd port
7340 * ridx picks one of the entries, while tp points
7341 * to the appropriate table entry.
7342 */
7343static void set_txdds(struct qib_pportdata *ppd, int ridx,
7344		      const struct txdds_ent *tp)
7345{
7346	struct qib_devdata *dd = ppd->dd;
7347	u32 pack_ent;
7348	int regidx;
7349
7350	/* Get correct offset in chip-space, and in source table */
7351	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7352	/*
7353	 * We do not use qib_write_kreg_port() because it was intended
7354	 * only for registers in the lower "port specific" pages.
7355	 * So do index calculation  by hand.
7356	 */
7357	if (ppd->hw_pidx)
7358		regidx += (dd->palign / sizeof(u64));
7359
7360	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7361	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7362	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7363	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7364	qib_write_kreg(dd, regidx, pack_ent);
7365	/* Prevent back-to-back writes by hitting scratch */
7366	qib_write_kreg(ppd->dd, kr_scratch, 0);
7367}
7368
7369static const struct vendor_txdds_ent vendor_txdds[] = {
7370	{ /* Amphenol 1m 30awg NoEq */
7371		{ 0x41, 0x50, 0x48 }, "584470002       ",
7372		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7373	},
7374	{ /* Amphenol 3m 28awg NoEq */
7375		{ 0x41, 0x50, 0x48 }, "584470004       ",
7376		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7377	},
7378	{ /* Finisar 3m OM2 Optical */
7379		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7380		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7381	},
7382	{ /* Finisar 30m OM2 Optical */
7383		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7384		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7385	},
7386	{ /* Finisar Default OM2 Optical */
7387		{ 0x00, 0x90, 0x65 }, NULL,
7388		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7389	},
7390	{ /* Gore 1m 30awg NoEq */
7391		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7392		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7393	},
7394	{ /* Gore 2m 30awg NoEq */
7395		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7396		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7397	},
7398	{ /* Gore 1m 28awg NoEq */
7399		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7400		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7401	},
7402	{ /* Gore 3m 28awg NoEq */
7403		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7404		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7405	},
7406	{ /* Gore 5m 24awg Eq */
7407		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7408		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7409	},
7410	{ /* Gore 7m 24awg Eq */
7411		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7412		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7413	},
7414	{ /* Gore 5m 26awg Eq */
7415		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7416		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7417	},
7418	{ /* Gore 7m 26awg Eq */
7419		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7420		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7421	},
7422	{ /* Intersil 12m 24awg Active */
7423		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7424		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7425	},
7426	{ /* Intersil 10m 28awg Active */
7427		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7428		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7429	},
7430	{ /* Intersil 7m 30awg Active */
7431		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7432		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7433	},
7434	{ /* Intersil 5m 32awg Active */
7435		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7436		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7437	},
7438	{ /* Intersil Default Active */
7439		{ 0x00, 0x30, 0xB4 }, NULL,
7440		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7441	},
7442	{ /* Luxtera 20m Active Optical */
7443		{ 0x00, 0x25, 0x63 }, NULL,
7444		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7445	},
7446	{ /* Molex 1M Cu loopback */
7447		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7448		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7449	},
7450	{ /* Molex 2m 28awg NoEq */
7451		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7452		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7453	},
7454};
7455
7456static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7457	/* amp, pre, main, post */
7458	{  2, 2, 15,  6 },	/* Loopback */
7459	{  0, 0,  0,  1 },	/*  2 dB */
7460	{  0, 0,  0,  2 },	/*  3 dB */
7461	{  0, 0,  0,  3 },	/*  4 dB */
7462	{  0, 0,  0,  4 },	/*  5 dB */
7463	{  0, 0,  0,  5 },	/*  6 dB */
7464	{  0, 0,  0,  6 },	/*  7 dB */
7465	{  0, 0,  0,  7 },	/*  8 dB */
7466	{  0, 0,  0,  8 },	/*  9 dB */
7467	{  0, 0,  0,  9 },	/* 10 dB */
7468	{  0, 0,  0, 10 },	/* 11 dB */
7469	{  0, 0,  0, 11 },	/* 12 dB */
7470	{  0, 0,  0, 12 },	/* 13 dB */
7471	{  0, 0,  0, 13 },	/* 14 dB */
7472	{  0, 0,  0, 14 },	/* 15 dB */
7473	{  0, 0,  0, 15 },	/* 16 dB */
7474};
7475
7476static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7477	/* amp, pre, main, post */
7478	{  2, 2, 15,  6 },	/* Loopback */
7479	{  0, 0,  0,  8 },	/*  2 dB */
7480	{  0, 0,  0,  8 },	/*  3 dB */
7481	{  0, 0,  0,  9 },	/*  4 dB */
7482	{  0, 0,  0,  9 },	/*  5 dB */
7483	{  0, 0,  0, 10 },	/*  6 dB */
7484	{  0, 0,  0, 10 },	/*  7 dB */
7485	{  0, 0,  0, 11 },	/*  8 dB */
7486	{  0, 0,  0, 11 },	/*  9 dB */
7487	{  0, 0,  0, 12 },	/* 10 dB */
7488	{  0, 0,  0, 12 },	/* 11 dB */
7489	{  0, 0,  0, 13 },	/* 12 dB */
7490	{  0, 0,  0, 13 },	/* 13 dB */
7491	{  0, 0,  0, 14 },	/* 14 dB */
7492	{  0, 0,  0, 14 },	/* 15 dB */
7493	{  0, 0,  0, 15 },	/* 16 dB */
7494};
7495
7496static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7497	/* amp, pre, main, post */
7498	{  2, 2, 15,  6 },	/* Loopback */
7499	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7500	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7501	{  0, 1,  0, 11 },	/*  4 dB */
7502	{  0, 1,  0, 13 },	/*  5 dB */
7503	{  0, 1,  0, 15 },	/*  6 dB */
7504	{  0, 1,  3, 15 },	/*  7 dB */
7505	{  0, 1,  7, 15 },	/*  8 dB */
7506	{  0, 1,  7, 15 },	/*  9 dB */
7507	{  0, 1,  8, 15 },	/* 10 dB */
7508	{  0, 1,  9, 15 },	/* 11 dB */
7509	{  0, 1, 10, 15 },	/* 12 dB */
7510	{  0, 2,  6, 15 },	/* 13 dB */
7511	{  0, 2,  7, 15 },	/* 14 dB */
7512	{  0, 2,  8, 15 },	/* 15 dB */
7513	{  0, 2,  9, 15 },	/* 16 dB */
7514};
7515
7516/*
7517 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7518 * These are mostly used for mez cards going through connectors
7519 * and backplane traces, but can be used to add other "unusual"
7520 * table values as well.
7521 */
7522static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7523	/* amp, pre, main, post */
7524	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7525	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7526	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7527	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7528	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7529	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7530	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7531	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7532	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7533	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7534	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7535	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7536	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7537	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7538	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7539	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7540	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7541	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7542};
7543
7544static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7545	/* amp, pre, main, post */
7546	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7547	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7548	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7549	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7550	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7551	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7552	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7553	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7554	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7555	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7556	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7557	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7558	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7559	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7560	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7561	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7562	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7563	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7564};
7565
7566static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7567	/* amp, pre, main, post */
7568	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7569	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7570	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7571	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7572	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7573	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7574	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7575	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7576	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7577	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7578	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7579	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7580	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7581	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7582	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7583	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7584	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7585	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7586};
7587
7588static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7589	/* amp, pre, main, post */
7590	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7591	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7592};
7593
7594static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7595					       unsigned atten)
7596{
7597	/*
7598	 * The attenuation table starts at 2dB for entry 1,
7599	 * with entry 0 being the loopback entry.
7600	 */
7601	if (atten <= 2)
7602		atten = 1;
7603	else if (atten > TXDDS_TABLE_SZ)
7604		atten = TXDDS_TABLE_SZ - 1;
7605	else
7606		atten--;
7607	return txdds + atten;
7608}
7609
7610/*
7611 * if override is set, the module parameter txselect has a value
7612 * for this specific port, so use it, rather than our normal mechanism.
7613 */
7614static void find_best_ent(struct qib_pportdata *ppd,
7615			  const struct txdds_ent **sdr_dds,
7616			  const struct txdds_ent **ddr_dds,
7617			  const struct txdds_ent **qdr_dds, int override)
7618{
7619	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7620	int idx;
7621
7622	/* Search table of known cables */
7623	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7624		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7625
7626		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7627		    (!v->partnum ||
7628		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7629			*sdr_dds = &v->sdr;
7630			*ddr_dds = &v->ddr;
7631			*qdr_dds = &v->qdr;
7632			return;
7633		}
7634	}
7635
7636	/* Active cables don't have attenuation so we only set SERDES
7637	 * settings to account for the attenuation of the board traces. */
7638	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7639		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7640		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7641		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7642		return;
7643	}
7644
7645	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7646						      qd->atten[1])) {
7647		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7648		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7649		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7650		return;
7651	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7652		/*
7653		 * If we have no (or incomplete) data from the cable
7654		 * EEPROM, or no QSFP, or override is set, use the
7655		 * module parameter value to index into the attentuation
7656		 * table.
7657		 */
7658		idx = ppd->cpspec->no_eep;
7659		*sdr_dds = &txdds_sdr[idx];
7660		*ddr_dds = &txdds_ddr[idx];
7661		*qdr_dds = &txdds_qdr[idx];
7662	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7663		/* similar to above, but index into the "extra" table. */
7664		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7665		*sdr_dds = &txdds_extra_sdr[idx];
7666		*ddr_dds = &txdds_extra_ddr[idx];
7667		*qdr_dds = &txdds_extra_qdr[idx];
7668	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7669		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7670					  TXDDS_MFG_SZ)) {
7671		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7672		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7673			ppd->dd->unit, ppd->port, idx);
7674		*sdr_dds = &txdds_extra_mfg[idx];
7675		*ddr_dds = &txdds_extra_mfg[idx];
7676		*qdr_dds = &txdds_extra_mfg[idx];
7677	} else {
7678		/* this shouldn't happen, it's range checked */
7679		*sdr_dds = txdds_sdr + qib_long_atten;
7680		*ddr_dds = txdds_ddr + qib_long_atten;
7681		*qdr_dds = txdds_qdr + qib_long_atten;
7682	}
7683}
7684
7685static void init_txdds_table(struct qib_pportdata *ppd, int override)
7686{
7687	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7688	struct txdds_ent *dds;
7689	int idx;
7690	int single_ent = 0;
7691
7692	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7693
7694	/* for mez cards or override, use the selected value for all entries */
7695	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7696		single_ent = 1;
7697
7698	/* Fill in the first entry with the best entry found. */
7699	set_txdds(ppd, 0, sdr_dds);
7700	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7701	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7702	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7703		QIBL_LINKACTIVE)) {
7704		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7705					   QIB_IB_QDR ?  qdr_dds :
7706					   (ppd->link_speed_active ==
7707					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7708		write_tx_serdes_param(ppd, dds);
7709	}
7710
7711	/* Fill in the remaining entries with the default table values. */
7712	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7713		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7714		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7715			  single_ent ? ddr_dds : txdds_ddr + idx);
7716		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7717			  single_ent ? qdr_dds : txdds_qdr + idx);
7718	}
7719}
7720
7721#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7722#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7723#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7724#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7725#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7726#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7727#define AHB_TRANS_TRIES 10
7728
7729/*
7730 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7731 * 5=subsystem which is why most calls have "chan + chan >> 1"
7732 * for the channel argument.
7733 */
7734static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7735		    u32 data, u32 mask)
7736{
7737	u32 rd_data, wr_data, sz_mask;
7738	u64 trans, acc, prev_acc;
7739	u32 ret = 0xBAD0BAD;
7740	int tries;
7741
7742	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7743	/* From this point on, make sure we return access */
7744	acc = (quad << 1) | 1;
7745	qib_write_kreg(dd, KR_AHB_ACC, acc);
7746
7747	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7748		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7749		if (trans & AHB_TRANS_RDY)
7750			break;
7751	}
7752	if (tries >= AHB_TRANS_TRIES) {
7753		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7754		goto bail;
7755	}
7756
7757	/* If mask is not all 1s, we need to read, but different SerDes
7758	 * entities have different sizes
7759	 */
7760	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7761	wr_data = data & mask & sz_mask;
7762	if ((~mask & sz_mask) != 0) {
7763		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7764		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7765
7766		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7767			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7768			if (trans & AHB_TRANS_RDY)
7769				break;
7770		}
7771		if (tries >= AHB_TRANS_TRIES) {
7772			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7773				    AHB_TRANS_TRIES);
7774			goto bail;
7775		}
7776		/* Re-read in case host split reads and read data first */
7777		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7778		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7779		wr_data |= (rd_data & ~mask & sz_mask);
7780	}
7781
7782	/* If mask is not zero, we need to write. */
7783	if (mask & sz_mask) {
7784		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7785		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7786		trans |= AHB_WR;
7787		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7788
7789		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7790			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7791			if (trans & AHB_TRANS_RDY)
7792				break;
7793		}
7794		if (tries >= AHB_TRANS_TRIES) {
7795			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7796				    AHB_TRANS_TRIES);
7797			goto bail;
7798		}
7799	}
7800	ret = wr_data;
7801bail:
7802	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7803	return ret;
7804}
7805
7806static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7807			     unsigned mask)
7808{
7809	struct qib_devdata *dd = ppd->dd;
7810	int chan;
7811
7812	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7813		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7814			data, mask);
7815		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7816			0, 0);
7817	}
7818}
7819
7820static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7821{
7822	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7823	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7824
7825	if (enable && !state) {
7826		pr_info("IB%u:%u Turning LOS on\n",
7827			ppd->dd->unit, ppd->port);
7828		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7829	} else if (!enable && state) {
7830		pr_info("IB%u:%u Turning LOS off\n",
7831			ppd->dd->unit, ppd->port);
7832		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7833	}
7834	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7835}
7836
7837static int serdes_7322_init(struct qib_pportdata *ppd)
7838{
7839	int ret = 0;
7840
7841	if (ppd->dd->cspec->r1)
7842		ret = serdes_7322_init_old(ppd);
7843	else
7844		ret = serdes_7322_init_new(ppd);
7845	return ret;
7846}
7847
7848static int serdes_7322_init_old(struct qib_pportdata *ppd)
7849{
7850	u32 le_val;
7851
7852	/*
7853	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7854	 * for adapters with QSFP
7855	 */
7856	init_txdds_table(ppd, 0);
7857
7858	/* ensure no tx overrides from earlier driver loads */
7859	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7860		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7861		reset_tx_deemphasis_override));
7862
7863	/* Patch some SerDes defaults to "Better for IB" */
7864	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7865	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7866
7867	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7868	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7869	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7870	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7871
7872	/* May be overridden in qsfp_7322_event */
7873	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7874	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7875
7876	/* enable LE1 adaptation for all but QME, which is disabled */
7877	le_val = IS_QME(ppd->dd) ? 0 : 1;
7878	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7879
7880	/* Clear cmode-override, may be set from older driver */
7881	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7882
7883	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7884	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7885
7886	/* setup LoS params; these are subsystem, so chan == 5 */
7887	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7888	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7889	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7890	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7891	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7892
7893	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7894	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7895	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7896	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7897	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7898
7899	/* LoS filter select enabled */
7900	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7901
7902	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7903	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7904	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7905	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7906
7907	serdes_7322_los_enable(ppd, 1);
7908
7909	/* rxbistena; set 0 to avoid effects of it switch later */
7910	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7911
7912	/* Configure 4 DFE taps, and only they adapt */
7913	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7914
7915	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7916	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7917	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7918
7919	/*
7920	 * Set receive adaptation mode.  SDR and DDR adaptation are
7921	 * always on, and QDR is initially enabled; later disabled.
7922	 */
7923	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7924	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7925	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7926			    ppd->dd->cspec->r1 ?
7927			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7928	ppd->cpspec->qdr_dfe_on = 1;
7929
7930	/* FLoop LOS gate: PPM filter  enabled */
7931	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7932
7933	/* rx offset center enabled */
7934	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7935
7936	if (!ppd->dd->cspec->r1) {
7937		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7938		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7939	}
7940
7941	/* Set the frequency loop bandwidth to 15 */
7942	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7943
7944	return 0;
7945}
7946
7947static int serdes_7322_init_new(struct qib_pportdata *ppd)
7948{
7949	unsigned long tend;
7950	u32 le_val, rxcaldone;
7951	int chan, chan_done = (1 << SERDES_CHANS) - 1;
7952
7953	/* Clear cmode-override, may be set from older driver */
7954	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7955
7956	/* ensure no tx overrides from earlier driver loads */
7957	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7958		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7959		reset_tx_deemphasis_override));
7960
7961	/* START OF LSI SUGGESTED SERDES BRINGUP */
7962	/* Reset - Calibration Setup */
7963	/*       Stop DFE adaptaion */
7964	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7965	/*       Disable LE1 */
7966	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7967	/*       Disable autoadapt for LE1 */
7968	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7969	/*       Disable LE2 */
7970	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7971	/*       Disable VGA */
7972	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7973	/*       Disable AFE Offset Cancel */
7974	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7975	/*       Disable Timing Loop */
7976	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7977	/*       Disable Frequency Loop */
7978	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7979	/*       Disable Baseline Wander Correction */
7980	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7981	/*       Disable RX Calibration */
7982	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7983	/*       Disable RX Offset Calibration */
7984	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7985	/*       Select BB CDR */
7986	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7987	/*       CDR Step Size */
7988	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7989	/*       Enable phase Calibration */
7990	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7991	/*       DFE Bandwidth [2:14-12] */
7992	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7993	/*       DFE Config (4 taps only) */
7994	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7995	/*       Gain Loop Bandwidth */
7996	if (!ppd->dd->cspec->r1) {
7997		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7998		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7999	} else {
8000		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8001	}
8002	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8003	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8004	/*       Data Rate Select [5:7-6] (leave as default) */
8005	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8006
8007	/* RX REST */
8008	/*       Single- or Multi-channel reset */
8009	/*       RX Analog reset */
8010	/*       RX Digital reset */
8011	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8012	msleep(20);
8013	/*       RX Analog reset */
8014	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8015	msleep(20);
8016	/*       RX Digital reset */
8017	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8018	msleep(20);
8019
8020	/* setup LoS params; these are subsystem, so chan == 5 */
8021	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8022	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8023	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8024	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8025	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8026
8027	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8028	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8029	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8030	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8031	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8032
8033	/* LoS filter select enabled */
8034	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8035
8036	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8037	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8038	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8039	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8040
8041	/* Turn on LOS on initial SERDES init */
8042	serdes_7322_los_enable(ppd, 1);
8043	/* FLoop LOS gate: PPM filter  enabled */
8044	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8045
8046	/* RX LATCH CALIBRATION */
8047	/*       Enable Eyefinder Phase Calibration latch */
8048	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8049	/*       Enable RX Offset Calibration latch */
8050	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8051	msleep(20);
8052	/*       Start Calibration */
8053	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8054	tend = jiffies + msecs_to_jiffies(500);
8055	while (chan_done && !time_is_before_jiffies(tend)) {
8056		msleep(20);
8057		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8058			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8059					    (chan + (chan >> 1)),
8060					    25, 0, 0);
8061			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8062			    (~chan_done & (1 << chan)) == 0)
8063				chan_done &= ~(1 << chan);
8064		}
8065	}
8066	if (chan_done) {
8067		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8068			 IBSD(ppd->hw_pidx), chan_done);
8069	} else {
8070		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8071			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8072					    (chan + (chan >> 1)),
8073					    25, 0, 0);
8074			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8075				pr_info("Serdes %d chan %d calibration failed\n",
8076					IBSD(ppd->hw_pidx), chan);
8077		}
8078	}
8079
8080	/*       Turn off Calibration */
8081	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8082	msleep(20);
8083
8084	/* BRING RX UP */
8085	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8086	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8087	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8088	/*       Set LE2 Loop bandwidth */
8089	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8090	/*       Enable LE2 */
8091	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8092	msleep(20);
8093	/*       Enable H0 only */
8094	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8095	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8096	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8097	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8098	/*       Enable VGA */
8099	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8100	msleep(20);
8101	/*       Set Frequency Loop Bandwidth */
8102	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8103	/*       Enable Frequency Loop */
8104	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8105	/*       Set Timing Loop Bandwidth */
8106	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8107	/*       Enable Timing Loop */
8108	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8109	msleep(50);
8110	/*       Enable DFE
8111	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8112	 *       always on, and QDR is initially enabled; later disabled.
8113	 */
8114	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8115	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8116	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8117			    ppd->dd->cspec->r1 ?
8118			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8119	ppd->cpspec->qdr_dfe_on = 1;
8120	/*       Disable LE1  */
8121	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8122	/*       Disable auto adapt for LE1 */
8123	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8124	msleep(20);
8125	/*       Enable AFE Offset Cancel */
8126	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8127	/*       Enable Baseline Wander Correction */
8128	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8129	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8130	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8131	/* VGA output common mode */
8132	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8133
8134	/*
8135	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8136	 * for adapters with QSFP
8137	 */
8138	init_txdds_table(ppd, 0);
8139
8140	return 0;
8141}
8142
8143/* start adjust QMH serdes parameters */
8144
8145static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8146{
8147	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8148		9, code << 9, 0x3f << 9);
8149}
8150
8151static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8152	int enable, u32 tapenable)
8153{
8154	if (enable)
8155		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8156			1, 3 << 10, 0x1f << 10);
8157	else
8158		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8159			1, 0, 0x1f << 10);
8160}
8161
8162/* Set clock to 1, 0, 1, 0 */
8163static void clock_man(struct qib_pportdata *ppd, int chan)
8164{
8165	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8166		4, 0x4000, 0x4000);
8167	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8168		4, 0, 0x4000);
8169	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8170		4, 0x4000, 0x4000);
8171	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8172		4, 0, 0x4000);
8173}
8174
8175/*
8176 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8177 * The caller must pass the settings appropriate for the current speed,
8178 * or not care if they are correct for the current speed.
8179 */
8180static void write_tx_serdes_param(struct qib_pportdata *ppd,
8181				  struct txdds_ent *txdds)
8182{
8183	u64 deemph;
8184
8185	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8186	/* field names for amp, main, post, pre, respectively */
8187	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8188		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8189		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8190		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8191
8192	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8193			   tx_override_deemphasis_select);
8194	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8195		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8196				       txampcntl_d2a);
8197	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8198		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8199				   txc0_ena);
8200	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8201		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8202				    txcp1_ena);
8203	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8204		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8205				    txcn1_ena);
8206	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8207}
8208
8209/*
8210 * Set the parameters for mez cards on link bounce, so they are
8211 * always exactly what was requested.  Similar logic to init_txdds
8212 * but does just the serdes.
8213 */
8214static void adj_tx_serdes(struct qib_pportdata *ppd)
8215{
8216	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8217	struct txdds_ent *dds;
8218
8219	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8220	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8221		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8222				ddr_dds : sdr_dds));
8223	write_tx_serdes_param(ppd, dds);
8224}
8225
8226/* set QDR forced value for H1, if needed */
8227static void force_h1(struct qib_pportdata *ppd)
8228{
8229	int chan;
8230
8231	ppd->cpspec->qdr_reforce = 0;
8232	if (!ppd->dd->cspec->r1)
8233		return;
8234
8235	for (chan = 0; chan < SERDES_CHANS; chan++) {
8236		set_man_mode_h1(ppd, chan, 1, 0);
8237		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8238		clock_man(ppd, chan);
8239		set_man_mode_h1(ppd, chan, 0, 0);
8240	}
8241}
8242
8243#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8244#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8245
8246#define R_OPCODE_LSB 3
8247#define R_OP_NOP 0
8248#define R_OP_SHIFT 2
8249#define R_OP_UPDATE 3
8250#define R_TDI_LSB 2
8251#define R_TDO_LSB 1
8252#define R_RDY 1
8253
8254static int qib_r_grab(struct qib_devdata *dd)
8255{
8256	u64 val = SJA_EN;
8257
8258	qib_write_kreg(dd, kr_r_access, val);
8259	qib_read_kreg32(dd, kr_scratch);
8260	return 0;
8261}
8262
8263/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8264 * returns the current state of R_TDO
8265 */
8266static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8267{
8268	u64 val;
8269	int timeout;
8270
8271	for (timeout = 0; timeout < 100 ; ++timeout) {
8272		val = qib_read_kreg32(dd, kr_r_access);
8273		if (val & R_RDY)
8274			return (val >> R_TDO_LSB) & 1;
8275	}
8276	return -1;
8277}
8278
8279static int qib_r_shift(struct qib_devdata *dd, int bisten,
8280		       int len, u8 *inp, u8 *outp)
8281{
8282	u64 valbase, val;
8283	int ret, pos;
8284
8285	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8286		(R_OP_SHIFT << R_OPCODE_LSB);
8287	ret = qib_r_wait_for_rdy(dd);
8288	if (ret < 0)
8289		goto bail;
8290	for (pos = 0; pos < len; ++pos) {
8291		val = valbase;
8292		if (outp) {
8293			outp[pos >> 3] &= ~(1 << (pos & 7));
8294			outp[pos >> 3] |= (ret << (pos & 7));
8295		}
8296		if (inp) {
8297			int tdi = inp[pos >> 3] >> (pos & 7);
8298
8299			val |= ((tdi & 1) << R_TDI_LSB);
8300		}
8301		qib_write_kreg(dd, kr_r_access, val);
8302		qib_read_kreg32(dd, kr_scratch);
8303		ret = qib_r_wait_for_rdy(dd);
8304		if (ret < 0)
8305			break;
8306	}
8307	/* Restore to NOP between operations. */
8308	val =  SJA_EN | (bisten << BISTEN_LSB);
8309	qib_write_kreg(dd, kr_r_access, val);
8310	qib_read_kreg32(dd, kr_scratch);
8311	ret = qib_r_wait_for_rdy(dd);
8312
8313	if (ret >= 0)
8314		ret = pos;
8315bail:
8316	return ret;
8317}
8318
8319static int qib_r_update(struct qib_devdata *dd, int bisten)
8320{
8321	u64 val;
8322	int ret;
8323
8324	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8325	ret = qib_r_wait_for_rdy(dd);
8326	if (ret >= 0) {
8327		qib_write_kreg(dd, kr_r_access, val);
8328		qib_read_kreg32(dd, kr_scratch);
8329	}
8330	return ret;
8331}
8332
8333#define BISTEN_PORT_SEL 15
8334#define LEN_PORT_SEL 625
8335#define BISTEN_AT 17
8336#define LEN_AT 156
8337#define BISTEN_ETM 16
8338#define LEN_ETM 632
8339
8340#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8341
8342/* these are common for all IB port use cases. */
8343static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8344	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8345	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8346};
8347static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8348	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8349	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8350	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8351	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8352	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8353	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8354	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8355	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8356};
8357static u8 at[BIT2BYTE(LEN_AT)] = {
8358	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8359	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8360};
8361
8362/* used for IB1 or IB2, only one in use */
8363static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8364	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8365	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8366	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8367	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8368	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8369	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8370	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8371	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8372};
8373
8374/* used when both IB1 and IB2 are in use */
8375static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8376	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8377	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8378	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8379	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8380	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8381	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8382	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8383	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8384};
8385
8386/* used when only IB1 is in use */
8387static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8388	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8389	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8390	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8391	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8392	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8393	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8394	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8395	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8396};
8397
8398/* used when only IB2 is in use */
8399static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8400	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8401	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8402	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8403	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8404	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8405	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8406	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8407	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8408};
8409
8410/* used when both IB1 and IB2 are in use */
8411static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8412	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8413	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8414	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8415	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8416	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8417	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8418	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8419	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8420};
8421
8422/*
8423 * Do setup to properly handle IB link recovery; if port is zero, we
8424 * are initializing to cover both ports; otherwise we are initializing
8425 * to cover a single port card, or the port has reached INIT and we may
8426 * need to switch coverage types.
8427 */
8428static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8429{
8430	u8 *portsel, *etm;
8431	struct qib_devdata *dd = ppd->dd;
8432
8433	if (!ppd->dd->cspec->r1)
8434		return;
8435	if (!both) {
8436		dd->cspec->recovery_ports_initted++;
8437		ppd->cpspec->recovery_init = 1;
8438	}
8439	if (!both && dd->cspec->recovery_ports_initted == 1) {
8440		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8441		etm = atetm_1port;
8442	} else {
8443		portsel = portsel_2port;
8444		etm = atetm_2port;
8445	}
8446
8447	if (qib_r_grab(dd) < 0 ||
8448		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8449		qib_r_update(dd, BISTEN_ETM) < 0 ||
8450		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8451		qib_r_update(dd, BISTEN_AT) < 0 ||
8452		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8453			    portsel, NULL) < 0 ||
8454		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8455		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8456		qib_r_update(dd, BISTEN_AT) < 0 ||
8457		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8458		qib_r_update(dd, BISTEN_ETM) < 0)
8459		qib_dev_err(dd, "Failed IB link recovery setup\n");
8460}
8461
8462static void check_7322_rxe_status(struct qib_pportdata *ppd)
8463{
8464	struct qib_devdata *dd = ppd->dd;
8465	u64 fmask;
8466
8467	if (dd->cspec->recovery_ports_initted != 1)
8468		return; /* rest doesn't apply to dualport */
8469	qib_write_kreg(dd, kr_control, dd->control |
8470		       SYM_MASK(Control, FreezeMode));
8471	(void)qib_read_kreg64(dd, kr_scratch);
8472	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8473	fmask = qib_read_kreg64(dd, kr_act_fmask);
8474	if (!fmask) {
8475		/*
8476		 * require a powercycle before we'll work again, and make
8477		 * sure we get no more interrupts, and don't turn off
8478		 * freeze.
8479		 */
8480		ppd->dd->cspec->stay_in_freeze = 1;
8481		qib_7322_set_intr_state(ppd->dd, 0);
8482		qib_write_kreg(dd, kr_fmask, 0ULL);
8483		qib_dev_err(dd, "HCA unusable until powercycled\n");
8484		return; /* eventually reset */
8485	}
8486
8487	qib_write_kreg(ppd->dd, kr_hwerrclear,
8488	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8489
8490	/* don't do the full clear_freeze(), not needed for this */
8491	qib_write_kreg(dd, kr_control, dd->control);
8492	qib_read_kreg32(dd, kr_scratch);
8493	/* take IBC out of reset */
8494	if (ppd->link_speed_supported) {
8495		ppd->cpspec->ibcctrl_a &=
8496			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8497		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8498				    ppd->cpspec->ibcctrl_a);
8499		qib_read_kreg32(dd, kr_scratch);
8500		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8501			qib_set_ib_7322_lstate(ppd, 0,
8502				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8503	}
8504}
8505