Lines Matching refs:dd
298 * @dd: device
306 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
309 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
312 if (dd->userbase)
314 ((char __iomem *)dd->userbase +
315 dd->ureg_align * ctxt));
318 (dd->uregbase +
319 (char __iomem *)dd->kregbase +
320 dd->ureg_align * ctxt));
325 * @dd: device
332 static inline void qib_write_ureg(const struct qib_devdata *dd,
337 if (dd->userbase)
339 ((char __iomem *) dd->userbase +
340 dd->ureg_align * ctxt);
343 (dd->uregbase +
344 (char __iomem *) dd->kregbase +
345 dd->ureg_align * ctxt);
347 if (dd->kregbase && (dd->flags & QIB_PRESENT))
351 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
354 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
356 return readl((u32 __iomem *)&dd->kregbase[regno]);
359 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
362 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
365 return readq(&dd->kregbase[regno]);
368 static inline void qib_write_kreg(const struct qib_devdata *dd,
371 if (dd->kregbase && (dd->flags & QIB_PRESENT))
372 writeq(value, &dd->kregbase[regno]);
377 * @dd: the qlogic_ib device
382 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
386 qib_write_kreg(dd, regno + ctxt, value);
389 static inline void write_6120_creg(const struct qib_devdata *dd,
392 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
393 writeq(value, &dd->cspec->cregbase[regno]);
396 static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
398 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
400 return readq(&dd->cspec->cregbase[regno]);
403 static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
405 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
407 return readl(&dd->cspec->cregbase[regno]);
670 static void qib_6120_txe_recover(struct qib_devdata *dd)
673 qib_devinfo(dd->pcidev,
678 static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
681 if (dd->flags & QIB_BADINTR)
683 qib_write_kreg(dd, kr_intmask, ~0ULL);
685 qib_write_kreg(dd, kr_intclear, 0ULL);
687 qib_write_kreg(dd, kr_intmask, 0ULL);
705 static void qib_6120_clear_freeze(struct qib_devdata *dd)
708 qib_write_kreg(dd, kr_errmask, 0ULL);
711 qib_6120_set_intr_state(dd, 0);
713 qib_cancel_sends(dd->pport);
716 qib_write_kreg(dd, kr_control, dd->control);
717 qib_read_kreg32(dd, kr_scratch);
720 qib_force_pio_avail_update(dd);
728 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
729 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
730 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
731 qib_6120_set_intr_state(dd, 1);
736 * @dd: the qlogic_ib device
745 static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
753 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
757 qib_dev_err(dd,
768 qib_write_kreg(dd, kr_hwerrclear,
771 hwerrs &= dd->cspec->hwerrmask;
778 qib_devinfo(dd->pcidev,
783 qib_dev_err(dd,
787 ctrl = qib_read_kreg32(dd, kr_control);
788 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
798 qib_6120_txe_recover(dd);
803 qib_6120_clear_freeze(dd);
816 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
817 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
823 bitsmsg = dd->cspec->bitsmsgbuf;
829 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
836 snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
841 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
842 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
850 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
851 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
861 qib_dev_err(dd, "%s hardware error\n", msg);
865 if (isfatal && !dd->diag_client) {
866 qib_dev_err(dd,
868 dd->serial);
873 if (dd->freezemsg)
874 snprintf(dd->freezemsg, dd->freezelen,
876 qib_disable_after_error(dd);
886 static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
962 struct qib_devdata *dd = ppd->dd;
968 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
969 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
972 qib_disarm_piobufs_set(dd, sbuf,
973 dd->piobcnt2k + dd->piobcnt4k);
976 static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
980 u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
982 if (linkrecov != dd->cspec->lastlinkrecov) {
984 dd->cspec->lastlinkrecov = 0;
985 qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
989 dd->cspec->lastlinkrecov =
990 read_6120_creg32(dd, cr_iblinkerrrecov);
994 static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
999 struct qib_pportdata *ppd = dd->pport;
1003 errs &= dd->cspec->errormask;
1004 msg = dd->cspec->emsgbuf;
1008 qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1011 qib_dev_err(dd,
1040 qib_write_kreg(dd, kr_errclear, errs);
1052 qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1062 u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1066 if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
1067 handle = chk_6120_linkrecovery(dd, ibcs);
1083 qib_dev_err(dd,
1085 dd->flags &= ~QIB_INITTED; /* needs re-init */
1087 *dd->devstatusp |= QIB_STATUS_HWERROR;
1088 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1092 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1105 qib_handle_urcv(dd, ~0U);
1117 * @dd: the qlogic_ib device
1125 static void qib_6120_init_hwerrors(struct qib_devdata *dd)
1130 extsval = qib_read_kreg64(dd, kr_extstatus);
1133 qib_dev_err(dd, "MemBIST did not complete!\n");
1137 if (dd->minrev < 2) {
1147 dd->cspec->hwerrmask = val;
1149 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1150 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1153 qib_write_kreg(dd, kr_errclear, ~0ULL);
1155 qib_write_kreg(dd, kr_errmask, ~0ULL);
1156 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1158 qib_write_kreg(dd, kr_intclear, ~0ULL);
1160 qib_write_kreg(dd, kr_rcvbthqp,
1161 dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
1171 static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
1174 qib_write_kreg(dd, kr_errclear,
1176 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1178 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1179 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1191 struct qib_devdata *dd = ppd->dd;
1216 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
1218 qib_write_kreg(dd, kr_scratch, 0);
1227 struct qib_devdata *dd = ppd->dd;
1231 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1232 qib_write_kreg(dd, kr_control, 0ULL);
1234 dd->cspec->ibdeltainprog = 1;
1235 dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
1236 dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
1247 dd->cspec->lli_thresh = 0xf;
1248 ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
1258 dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1261 val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1263 qib_write_kreg(dd, kr_ibcctrl, val);
1265 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1266 config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
1280 qib_write_kreg(dd, kr_serdes_cfg0, val);
1282 qib_read_kreg64(dd, kr_scratch);
1300 qib_write_kreg(dd, kr_serdes_cfg0, val);
1302 (void) qib_read_kreg64(dd, kr_scratch);
1312 qib_write_kreg(dd, kr_serdes_cfg0, val);
1314 (void) qib_read_kreg64(dd, kr_scratch);
1316 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1326 qib_write_kreg(dd, kr_xgxs_cfg, val);
1328 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1336 qib_write_kreg(dd, kr_serdes_cfg1, config1);
1339 ppd->guid = dd->base_guid;
1346 hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
1349 qib_write_kreg(dd, kr_hwerrclear, hwstat);
1350 qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
1353 dd->control |= QLOGIC_IB_C_LINKENABLE;
1354 dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
1355 qib_write_kreg(dd, kr_control, dd->control);
1367 struct qib_devdata *dd = ppd->dd;
1373 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1374 qib_write_kreg(dd, kr_control,
1375 dd->control | QLOGIC_IB_C_FREEZEMODE);
1377 if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
1378 dd->cspec->ibdeltainprog) {
1382 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1383 qib_write_kreg(dd, kr_hwdiagctrl,
1386 if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
1387 val = read_6120_creg32(dd, cr_ibsymbolerr);
1388 if (dd->cspec->ibdeltainprog)
1389 val -= val - dd->cspec->ibsymsnap;
1390 val -= dd->cspec->ibsymdelta;
1391 write_6120_creg(dd, cr_ibsymbolerr, val);
1393 if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
1394 val = read_6120_creg32(dd, cr_iblinkerrrecov);
1395 if (dd->cspec->ibdeltainprog)
1396 val -= val - dd->cspec->iblnkerrsnap;
1397 val -= dd->cspec->iblnkerrdelta;
1398 write_6120_creg(dd, cr_iblinkerrrecov, val);
1402 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1405 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1407 qib_write_kreg(dd, kr_serdes_cfg0, val);
1436 struct qib_devdata *dd = ppd->dd;
1442 if (dd->diag_client)
1452 val = qib_read_kreg64(dd, kr_ibcstatus);
1460 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1461 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1468 dd->cspec->extctrl = extctl;
1469 qib_write_kreg(dd, kr_extctrl, extctl);
1470 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1475 * @dd: the qlogic_ib device
1479 static void qib_6120_setup_cleanup(struct qib_devdata *dd)
1481 qib_free_irq(dd);
1482 kfree(dd->cspec->cntrs);
1483 kfree(dd->cspec->portcntrs);
1484 if (dd->cspec->dummy_hdrq) {
1485 dma_free_coherent(&dd->pcidev->dev,
1486 ALIGN(dd->rcvhdrcnt *
1487 dd->rcvhdrentsize *
1489 dd->cspec->dummy_hdrq,
1490 dd->cspec->dummy_hdrq_phys);
1491 dd->cspec->dummy_hdrq = NULL;
1495 static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
1499 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1501 dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
1503 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
1504 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1505 qib_write_kreg(dd, kr_scratch, 0ULL);
1506 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1513 static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
1516 qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
1523 estat = qib_read_kreg64(dd, kr_errstatus);
1525 qib_devinfo(dd->pcidev,
1528 handle_6120_errors(dd, estat);
1539 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1550 dd->cspec->rxfc_unsupvl_errs++;
1552 dd->cspec->overrun_thresh_errs++;
1554 dd->cspec->lli_errs++;
1564 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1573 dd->cspec->gpio_mask &= ~(gpiostatus & mask);
1574 qib_write_kreg(dd, kr_gpio_mask,
1575 dd->cspec->gpio_mask);
1579 qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
1585 struct qib_devdata *dd = data;
1590 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1601 istat = qib_read_kreg32(dd, kr_intstatus);
1608 qib_bad_intrstatus(dd);
1614 this_cpu_inc(*dd->int_counter);
1618 unlikely_6120_intr(dd, istat);
1626 qib_write_kreg(dd, kr_intclear, istat);
1639 for (i = 0; i < dd->first_user_ctxt; i++) {
1642 crcs += qib_kreceive(dd->rcd[i],
1643 &dd->cspec->lli_counter,
1649 u32 cntr = dd->cspec->lli_counter;
1653 if (cntr > dd->cspec->lli_thresh) {
1654 dd->cspec->lli_counter = 0;
1655 dd->cspec->lli_errs++;
1657 dd->cspec->lli_counter += cntr;
1666 qib_handle_urcv(dd, ctxtrbits);
1670 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1671 qib_ib_piobufavail(dd);
1683 static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1693 if (SYM_FIELD(dd->revision, Revision_R,
1696 dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
1697 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1700 ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
1703 qib_dev_err(dd,
1705 pci_irq_vector(dd->pcidev, 0), ret);
1710 * @dd: the qlogic_ib device
1714 static void pe_boardname(struct qib_devdata *dd)
1718 boardid = SYM_FIELD(dd->revision, Revision,
1723 dd->boardname = "InfiniPath_QLE7140";
1726 qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
1727 dd->boardname = "Unknown_InfiniPath_6120";
1731 if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
1732 qib_dev_err(dd,
1734 dd->majrev, dd->minrev);
1736 snprintf(dd->boardversion, sizeof(dd->boardversion),
1738 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
1739 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
1740 dd->majrev, dd->minrev,
1741 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
1749 static int qib_6120_setup_reset(struct qib_devdata *dd)
1757 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
1760 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
1763 qib_6120_set_intr_state(dd, 0);
1765 dd->cspec->ibdeltainprog = 0;
1766 dd->cspec->ibsymdelta = 0;
1767 dd->cspec->iblnkerrdelta = 0;
1774 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1776 dd->z_int_counter = qib_int_counter(dd);
1777 val = dd->control | QLOGIC_IB_C_RESET;
1778 writeq(val, &dd->kregbase[kr_control]);
1789 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
1795 val = readq(&dd->kregbase[kr_revision]);
1796 if (val == dd->revision) {
1797 dd->flags |= QIB_PRESENT; /* it's back */
1798 ret = qib_reinit_intr(dd);
1806 if (qib_pcie_params(dd, dd->lbus_width, NULL))
1807 qib_dev_err(dd,
1810 qib_6120_init_hwerrors(dd);
1812 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1814 qib_6120_init_hwerrors(dd);
1821 * @dd: the qlogic_ib device
1831 static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
1839 if (!dd->kregbase)
1842 if (pa != dd->tidinvalid) {
1844 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1850 qib_dev_err(dd,
1857 pa |= dd->tidtemplate;
1875 tidx = tidptr - dd->egrtidbase;
1877 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
1878 ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
1880 qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
1882 qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
1888 * @dd: the qlogic_ib device
1898 static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
1903 if (!dd->kregbase)
1906 if (pa != dd->tidinvalid) {
1908 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1914 qib_dev_err(dd,
1921 pa |= dd->tidtemplate;
1931 * @dd: the qlogic_ib device
1939 static void qib_6120_clear_tids(struct qib_devdata *dd,
1947 if (!dd->kregbase || !rcd)
1952 tidinv = dd->tidinvalid;
1954 ((char __iomem *)(dd->kregbase) +
1955 dd->rcvtidbase +
1956 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
1958 for (i = 0; i < dd->rcvtidcnt; i++)
1960 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1964 ((char __iomem *)(dd->kregbase) +
1965 dd->rcvegrbase +
1970 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1976 * @dd: the qlogic_ib device
1980 static void qib_6120_tidtemplate(struct qib_devdata *dd)
1982 u32 egrsize = dd->rcvegrbufsize;
1994 dd->tidtemplate = 1U << 29;
1996 dd->tidtemplate = 2U << 29;
1997 dd->tidinvalid = 0;
2026 qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2032 static void qib_6120_config_ctxts(struct qib_devdata *dd)
2034 dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
2036 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2037 if (dd->first_user_ctxt > dd->ctxtcnt)
2038 dd->first_user_ctxt = dd->ctxtcnt;
2039 dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
2041 dd->first_user_ctxt = dd->num_pports;
2042 dd->n_krcv_queues = dd->first_user_ctxt;
2049 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2050 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2057 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2061 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2070 static void alloc_dummy_hdrq(struct qib_devdata *dd)
2072 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2073 dd->rcd[0]->rcvhdrq_size,
2074 &dd->cspec->dummy_hdrq_phys,
2076 if (!dd->cspec->dummy_hdrq) {
2077 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2079 dd->cspec->dummy_hdrq_phys = 0UL;
2093 struct qib_devdata *dd = ppd->dd;
2097 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2100 dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2102 dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2104 dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2106 dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2108 mask = (1ULL << dd->ctxtcnt) - 1;
2113 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2114 if (!(dd->flags & QIB_NODMA_RTAIL))
2115 dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
2117 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2118 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2119 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2120 dd->rcd[ctxt]->rcvhdrq_phys);
2122 if (ctxt == 0 && !dd->cspec->dummy_hdrq)
2123 alloc_dummy_hdrq(dd);
2126 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2128 dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2130 dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2131 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2132 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2134 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2135 dd->rhdrhead_intr_off;
2136 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2145 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2146 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2148 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2149 dd->rcd[ctxt]->head = val;
2151 if (ctxt < dd->first_user_ctxt)
2152 val |= dd->rhdrhead_intr_off;
2153 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2166 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2167 dd->cspec->dummy_hdrq_phys);
2168 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2169 dd->cspec->dummy_hdrq_phys);
2173 for (i = 0; i < dd->cfgctxts; i++) {
2174 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2175 i, dd->cspec->dummy_hdrq_phys);
2176 qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
2177 i, dd->cspec->dummy_hdrq_phys);
2181 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2194 struct qib_devdata *dd = ppd->dd;
2198 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2202 dd->sendctrl = 0;
2204 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
2206 dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
2208 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2210 dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
2215 tmp_dd_sendctrl = dd->sendctrl;
2220 last = dd->piobcnt2k + dd->piobcnt4k;
2225 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
2227 qib_write_kreg(dd, kr_scratch, 0);
2231 tmp_dd_sendctrl = dd->sendctrl;
2242 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2243 qib_write_kreg(dd, kr_scratch, 0);
2246 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2247 qib_write_kreg(dd, kr_scratch, 0);
2250 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2260 v = qib_read_kreg32(dd, kr_scratch);
2261 qib_write_kreg(dd, kr_scratch, v);
2262 v = qib_read_kreg32(dd, kr_scratch);
2263 qib_write_kreg(dd, kr_scratch, v);
2264 qib_read_kreg32(dd, kr_scratch);
2276 struct qib_devdata *dd = ppd->dd;
2317 qib_devinfo(ppd->dd->pcidev,
2325 ret = dd->cspec->lli_errs;
2327 ret = dd->cspec->overrun_thresh_errs;
2332 for (i = 0; i < dd->first_user_ctxt; i++)
2333 ret += read_6120_creg32(dd, cr_portovfl + i);
2335 ret = dd->cspec->pma_sample_status;
2345 ret = read_6120_creg(dd, creg);
2347 ret = read_6120_creg32(dd, creg);
2349 if (dd->cspec->ibdeltainprog)
2350 ret -= ret - dd->cspec->ibsymsnap;
2351 ret -= dd->cspec->ibsymdelta;
2353 if (dd->cspec->ibdeltainprog)
2354 ret -= ret - dd->cspec->iblnkerrsnap;
2355 ret -= dd->cspec->iblnkerrdelta;
2358 ret += dd->cspec->rxfc_unsupvl_errs;
2471 static void init_6120_cntrnames(struct qib_devdata *dd)
2476 for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
2485 dd->cspec->ncntrs = i;
2488 dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
2490 dd->cspec->cntrnamelen = 1 + s - cntr6120names;
2491 dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
2496 dd->cspec->nportcntrs = i - 1;
2497 dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
2498 dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
2503 static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
2509 ret = dd->cspec->cntrnamelen;
2515 u64 *cntr = dd->cspec->cntrs;
2518 ret = dd->cspec->ncntrs * sizeof(u64);
2529 for (i = 0; i < dd->cspec->ncntrs; i++)
2530 *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
2536 static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
2542 ret = dd->cspec->portcntrnamelen;
2548 u64 *cntr = dd->cspec->portcntrs;
2549 struct qib_pportdata *ppd = &dd->pport[port];
2552 ret = dd->cspec->nportcntrs * sizeof(u64);
2559 for (i = 0; i < dd->cspec->nportcntrs; i++) {
2565 *cntr++ = read_6120_creg32(dd,
2573 static void qib_chk_6120_errormask(struct qib_devdata *dd)
2580 if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
2583 errormask = qib_read_kreg64(dd, kr_errmask);
2585 if (errormask == dd->cspec->errormask)
2589 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2590 ctrl = qib_read_kreg32(dd, kr_control);
2592 qib_write_kreg(dd, kr_errmask,
2593 dd->cspec->errormask);
2595 if ((hwerrs & dd->cspec->hwerrmask) ||
2597 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2598 qib_write_kreg(dd, kr_errclear, 0ULL);
2600 qib_write_kreg(dd, kr_intclear, 0ULL);
2601 qib_devinfo(dd->pcidev,
2603 fixed, errormask, (unsigned long)dd->cspec->errormask,
2618 struct qib_devdata *dd = from_timer(dd, t, stats_timer);
2619 struct qib_pportdata *ppd = dd->pport;
2627 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
2638 spin_lock_irqsave(&dd->eep_st_lock, flags);
2639 traffic_wds -= dd->traffic_wds;
2640 dd->traffic_wds += traffic_wds;
2641 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2643 qib_chk_6120_errormask(dd);
2645 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
2649 static int qib_6120_nointr_fallback(struct qib_devdata *dd)
2663 struct qib_devdata *dd = ppd->dd;
2665 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
2668 qib_write_kreg(dd, kr_control,
2669 dd->control & ~QLOGIC_IB_C_LINKENABLE);
2670 qib_write_kreg(dd, kr_xgxs_cfg, val);
2671 qib_read_kreg32(dd, kr_scratch);
2672 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
2673 qib_write_kreg(dd, kr_control, dd->control);
2710 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2715 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2721 ret = (ppd->dd->cspec->ibcctrl &
2746 struct qib_devdata *dd = ppd->dd;
2761 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2764 dd->cspec->ibcctrl &=
2766 dd->cspec->ibcctrl |= (u64) val <<
2768 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2769 qib_write_kreg(dd, kr_scratch, 0);
2774 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2777 dd->cspec->ibcctrl &=
2779 dd->cspec->ibcctrl |= (u64) val <<
2781 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2782 qib_write_kreg(dd, kr_scratch, 0);
2790 qib_write_kreg(dd, kr_partitionkey, val64);
2796 dd->cspec->ibcctrl &=
2799 dd->cspec->ibcctrl |=
2801 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2802 qib_write_kreg(dd, kr_scratch, 0);
2814 dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2815 dd->cspec->ibcctrl |= (u64)val <<
2817 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2818 qib_write_kreg(dd, kr_scratch, 0);
2825 if (!dd->cspec->ibdeltainprog) {
2826 dd->cspec->ibdeltainprog = 1;
2827 dd->cspec->ibsymsnap =
2828 read_6120_creg32(dd, cr_ibsymbolerr);
2829 dd->cspec->iblnkerrsnap =
2830 read_6120_creg32(dd, cr_iblinkerrrecov);
2844 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2866 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2889 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2890 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2891 ppd->dd->unit, ppd->port);
2893 ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2894 qib_devinfo(ppd->dd->pcidev,
2896 ppd->dd->unit, ppd->port);
2900 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
2901 qib_write_kreg(ppd->dd, kr_scratch, 0);
2941 struct qib_chip_specific *cs = ppd->dd->cspec;
3001 if (ppd->dd->cspec->ibdeltainprog) {
3002 ppd->dd->cspec->ibdeltainprog = 0;
3003 ppd->dd->cspec->ibsymdelta +=
3004 read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
3005 ppd->dd->cspec->ibsymsnap;
3006 ppd->dd->cspec->iblnkerrdelta +=
3007 read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
3008 ppd->dd->cspec->iblnkerrsnap;
3012 ppd->dd->cspec->lli_counter = 0;
3013 if (!ppd->dd->cspec->ibdeltainprog) {
3014 ppd->dd->cspec->ibdeltainprog = 1;
3015 ppd->dd->cspec->ibsymsnap =
3016 read_6120_creg32(ppd->dd, cr_ibsymbolerr);
3017 ppd->dd->cspec->iblnkerrsnap =
3018 read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
3034 static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3043 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3044 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3045 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3046 new_out = (dd->cspec->gpio_out & ~mask) | out;
3048 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3049 qib_write_kreg(dd, kr_gpio_out, new_out);
3050 dd->cspec->gpio_out = new_out;
3051 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3061 read_val = qib_read_kreg64(dd, kr_extstatus);
3070 static void get_6120_chip_params(struct qib_devdata *dd)
3076 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3078 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3079 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3080 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3081 dd->palign = qib_read_kreg32(dd, kr_palign);
3082 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3083 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3085 dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3087 val = qib_read_kreg64(dd, kr_sendpiosize);
3088 dd->piosize2k = val & ~0U;
3089 dd->piosize4k = val >> 32;
3094 dd->pport->ibmtu = (u32)mtu;
3096 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3097 dd->piobcnt2k = val & ~0U;
3098 dd->piobcnt4k = val >> 32;
3099 dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
3101 dd->pio2kbase = (u32 __iomem *)
3102 (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
3103 if (dd->piobcnt4k) {
3104 dd->pio4kbase = (u32 __iomem *)
3105 (((char __iomem *) dd->kregbase) +
3106 (dd->piobufbase >> 32));
3112 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3115 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3117 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3126 static void set_6120_baseaddrs(struct qib_devdata *dd)
3130 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3131 dd->cspec->cregbase = (u64 __iomem *)
3132 ((char __iomem *) dd->kregbase + cregbase);
3134 dd->egrtidbase = (u64 __iomem *)
3135 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3143 static int qib_late_6120_initreg(struct qib_devdata *dd)
3148 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3149 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3150 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3151 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3152 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3153 if (val != dd->pioavailregs_phys) {
3154 qib_dev_err(dd,
3156 (unsigned long) dd->pioavailregs_phys,
3163 static int init_6120_variables(struct qib_devdata *dd)
3169 ppd = (struct qib_pportdata *)(dd + 1);
3170 dd->pport = ppd;
3171 dd->num_pports = 1;
3173 dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
3174 dd->cspec->ppd = ppd;
3177 spin_lock_init(&dd->cspec->kernel_tid_lock);
3178 spin_lock_init(&dd->cspec->user_tid_lock);
3179 spin_lock_init(&dd->cspec->rcvmod_lock);
3180 spin_lock_init(&dd->cspec->gpio_lock);
3183 dd->revision = readq(&dd->kregbase[kr_revision]);
3185 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3186 qib_dev_err(dd,
3191 dd->flags |= QIB_PRESENT; /* now register routines work */
3193 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3195 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3198 get_6120_chip_params(dd);
3199 pe_boardname(dd); /* fill in boardname */
3205 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
3206 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
3207 dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
3210 dd->flags |= QIB_PIO_FLUSH_WC;
3212 ret = qib_init_pportdata(ppd, dd, 0, 1);
3225 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
3226 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
3227 dd->rhf_offset = 0;
3231 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
3232 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
3234 qib_6120_tidtemplate(dd);
3241 dd->rhdrhead_intr_off = 1ULL << 32;
3244 timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
3245 timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
3247 dd->ureg_align = qib_read_kreg32(dd, kr_palign);
3249 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
3250 qib_6120_config_ctxts(dd);
3251 qib_set_ctxtcnt(dd);
3253 ret = init_chip_wc_pat(dd, 0);
3256 set_6120_baseaddrs(dd); /* set chip access pointers now */
3264 ret = qib_create_ctxts(dd);
3265 init_6120_cntrnames(dd);
3268 sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
3270 dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
3271 dd->pbufsctxt = dd->lastctxt_piobuf /
3272 (dd->cfgctxts - dd->first_user_ctxt);
3298 u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
3304 sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3305 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3306 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3312 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
3313 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3314 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3323 struct qib_devdata *dd = ppd->dd;
3331 if ((plen + 1) > dd->piosize2kmax_dwords)
3332 first = dd->piobcnt2k;
3336 last = dd->piobcnt2k + dd->piobcnt4k - 1;
3337 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
3379 static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
3385 rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
3389 static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
3394 static void writescratch(struct qib_devdata *dd, u32 val)
3396 (void) qib_write_kreg(dd, kr_scratch, val);
3399 static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3405 static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
3412 static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
3431 struct qib_devdata *dd;
3434 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3436 if (IS_ERR(dd))
3439 dd->f_bringup_serdes = qib_6120_bringup_serdes;
3440 dd->f_cleanup = qib_6120_setup_cleanup;
3441 dd->f_clear_tids = qib_6120_clear_tids;
3442 dd->f_free_irq = qib_free_irq;
3443 dd->f_get_base_info = qib_6120_get_base_info;
3444 dd->f_get_msgheader = qib_6120_get_msgheader;
3445 dd->f_getsendbuf = qib_6120_getsendbuf;
3446 dd->f_gpio_mod = gpio_6120_mod;
3447 dd->f_eeprom_wen = qib_6120_eeprom_wen;
3448 dd->f_hdrqempty = qib_6120_hdrqempty;
3449 dd->f_ib_updown = qib_6120_ib_updown;
3450 dd->f_init_ctxt = qib_6120_init_ctxt;
3451 dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
3452 dd->f_intr_fallback = qib_6120_nointr_fallback;
3453 dd->f_late_initreg = qib_late_6120_initreg;
3454 dd->f_setpbc_control = qib_6120_setpbc_control;
3455 dd->f_portcntr = qib_portcntr_6120;
3456 dd->f_put_tid = (dd->minrev >= 2) ?
3459 dd->f_quiet_serdes = qib_6120_quiet_serdes;
3460 dd->f_rcvctrl = rcvctrl_6120_mod;
3461 dd->f_read_cntrs = qib_read_6120cntrs;
3462 dd->f_read_portcntrs = qib_read_6120portcntrs;
3463 dd->f_reset = qib_6120_setup_reset;
3464 dd->f_init_sdma_regs = init_sdma_6120_regs;
3465 dd->f_sdma_busy = qib_sdma_6120_busy;
3466 dd->f_sdma_gethead = qib_sdma_6120_gethead;
3467 dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
3468 dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
3469 dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
3470 dd->f_sendctrl = sendctrl_6120_mod;
3471 dd->f_set_armlaunch = qib_set_6120_armlaunch;
3472 dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
3473 dd->f_iblink_state = qib_6120_iblink_state;
3474 dd->f_ibphys_portstate = qib_6120_phys_portstate;
3475 dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
3476 dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
3477 dd->f_set_ib_loopback = qib_6120_set_loopback;
3478 dd->f_set_intr_state = qib_6120_set_intr_state;
3479 dd->f_setextled = qib_6120_setup_setextled;
3480 dd->f_txchk_change = qib_6120_txchk_change;
3481 dd->f_update_usrhead = qib_update_6120_usrhead;
3482 dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
3483 dd->f_xgxs_reset = qib_6120_xgxs_reset;
3484 dd->f_writescratch = writescratch;
3485 dd->f_tempsense_rd = qib_6120_tempsense_rd;
3487 dd->f_notify_dca = qib_6120_notify_dca;
3490 * Do remaining pcie setup and save pcie values in dd.
3496 ret = qib_pcie_ddinit(dd, pdev, ent);
3501 ret = init_6120_variables(dd);
3508 if (qib_pcie_params(dd, 8, NULL))
3509 qib_dev_err(dd,
3512 qib_write_kreg(dd, kr_hwdiagctrl, 0);
3514 if (qib_read_kreg64(dd, kr_hwerrstatus) &
3516 qib_write_kreg(dd, kr_hwerrclear,
3520 qib_setup_6120_interrupt(dd);
3522 qib_6120_init_hwerrors(dd);
3527 qib_pcie_ddcleanup(dd);
3529 qib_free_devdata(dd);
3530 dd = ERR_PTR(ret);
3532 return dd;