Lines Matching refs:ppd

63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
626 struct qib_pportdata *ppd;
848 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
851 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
853 return readq(&ppd->cpspec->kpregbase[regno]);
856 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
859 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
860 (ppd->dd->flags & QIB_PRESENT))
861 writeq(value, &ppd->cpspec->kpregbase[regno]);
896 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
899 if (ppd->cpspec && ppd->cpspec->cpregbase &&
900 (ppd->dd->flags & QIB_PRESENT))
901 writeq(value, &ppd->cpspec->cpregbase[regno]);
904 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
907 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
908 !(ppd->dd->flags & QIB_PRESENT))
910 return readq(&ppd->cpspec->cpregbase[regno]);
913 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
916 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
917 !(ppd->dd->flags & QIB_PRESENT))
919 return readl(&ppd->cpspec->cpregbase[regno]);
1317 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1319 struct qib_devdata *dd = ppd->dd;
1394 static void flush_fifo(struct qib_pportdata *ppd)
1396 struct qib_devdata *dd = ppd->dd;
1420 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1422 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1441 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1443 struct qib_devdata *dd = ppd->dd;
1475 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1476 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1480 ppd->p_sendctrl |= set_sendctrl;
1481 ppd->p_sendctrl &= ~clr_sendctrl;
1484 qib_write_kreg_port(ppd, krp_sendctrl,
1485 ppd->p_sendctrl |
1488 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1492 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1493 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1499 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1500 flush_fifo(ppd);
1503 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1505 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1508 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1515 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1516 qib_write_kreg_port(ppd, krp_senddmalengen,
1517 ppd->sdma_descq_cnt |
1524 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1528 ppd->sdma_descq_tail = tail;
1529 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1535 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1543 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1545 qib_sdma_7322_setlengen(ppd);
1546 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1547 ppd->sdma_head_dma[0] = 0;
1548 qib_7322_sdma_sendctrl(ppd,
1549 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1564 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1567 struct qib_devdata *dd = ppd->dd;
1570 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1575 ppd->port);
1577 spin_lock_irqsave(&ppd->sdma_lock, flags);
1581 qib_dev_porterr(dd, ppd->port,
1583 qib_sdma_state_names[ppd->sdma_state.current_state],
1584 errs, ppd->cpspec->sdmamsgbuf);
1585 dump_sdma_7322_state(ppd);
1588 switch (ppd->sdma_state.current_state) {
1594 __qib_sdma_process_event(ppd,
1606 __qib_sdma_process_event(ppd,
1612 __qib_sdma_process_event(ppd,
1617 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1618 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1622 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1725 struct qib_pportdata *ppd = cp->ppd;
1727 ppd->cpspec->chase_timer.expires = 0;
1728 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1732 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1735 ppd->cpspec->chase_end = 0;
1740 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1742 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1743 add_timer(&ppd->cpspec->chase_timer);
1746 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1765 if (ppd->cpspec->chase_end &&
1766 time_after(tnow, ppd->cpspec->chase_end))
1767 disable_chase(ppd, tnow, ibclt);
1768 else if (!ppd->cpspec->chase_end)
1769 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1772 ppd->cpspec->chase_end = 0;
1780 force_h1(ppd);
1781 ppd->cpspec->qdr_reforce = 1;
1782 if (!ppd->dd->cspec->r1)
1783 serdes_7322_los_enable(ppd, 0);
1784 } else if (ppd->cpspec->qdr_reforce &&
1789 force_h1(ppd);
1791 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1792 ppd->link_speed_enabled == QIB_IB_QDR &&
1797 adj_tx_serdes(ppd);
1801 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1803 if (!ppd->dd->cspec->r1 &&
1811 serdes_7322_los_enable(ppd, 1);
1812 if (!ppd->cpspec->qdr_dfe_on &&
1814 ppd->cpspec->qdr_dfe_on = 1;
1815 ppd->cpspec->qdr_dfe_time = 0;
1817 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1818 ppd->dd->cspec->r1 ?
1823 ppd->dd->unit, ppd->port, ibclt);
1835 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1839 struct qib_devdata *dd = ppd->dd;
1844 check_7322_rxe_status(ppd);
1846 errs = qib_read_kreg_port(ppd, krp_errstatus);
1850 ppd->port);
1856 msg = ppd->cpspec->epmsgbuf;
1860 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1863 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1865 qib_dev_porterr(dd, ppd->port,
1875 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1876 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1877 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1885 !(ppd->lflags & QIBL_LINKACTIVE)) {
1893 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1899 qib_disarm_7322_senderrbufs(ppd);
1901 !(ppd->lflags & QIBL_LINKACTIVE)) {
1909 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1915 qib_write_kreg_port(ppd, krp_errclear, errs);
1929 sdma_7322_p_errors(ppd, errs);
1935 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1938 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1939 handle_serdes_issues(ppd, ibcs);
1940 if (!(ppd->cpspec->ibcctrl_a &
1947 ppd->cpspec->ibcctrl_a |=
1949 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1950 ppd->cpspec->ibcctrl_a);
1954 ppd->link_width_active =
1957 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1962 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1964 qib_set_ib_7322_lstate(ppd, 0,
1979 qib_handle_e_ibstatuschanged(ppd, ibcs);
1982 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1984 if (ppd->state_wanted & ppd->lflags)
1985 wake_up_interruptible(&ppd->state_wait);
2147 struct qib_pportdata *ppd = dd->pport;
2149 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2158 spin_lock_irqsave(&ppd->sdma_lock, flags);
2159 dump_sdma_7322_state(ppd);
2160 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2237 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2241 struct qib_devdata *dd = ppd->dd;
2252 qib_7322_mini_pcs_reset(ppd);
2253 spin_lock_irqsave(&ppd->lflags_lock, flags);
2254 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2255 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2262 spin_lock_irqsave(&ppd->lflags_lock, flags);
2263 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2264 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2269 ppd->cpspec->ibcctrl_a &=
2276 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2294 static void set_vls(struct qib_pportdata *ppd)
2297 struct qib_devdata *dd = ppd->dd;
2300 numvls = qib_num_vls(ppd->vls_operational);
2312 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2315 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2317 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2319 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2322 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2324 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2327 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2330 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2331 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2334 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2337 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2345 static int serdes_7322_init(struct qib_pportdata *ppd);
2349 * @ppd: physical port on the qlogic_ib device
2351 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2353 struct qib_devdata *dd = ppd->dd;
2363 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2364 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2368 qib_write_kreg_port(ppd, krp_tx_deemph_override,
2373 ppd->cpspec->ibdeltainprog = 1;
2374 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2376 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2396 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2398 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2404 qib_7322_mini_pcs_reset(ppd);
2406 if (!ppd->cpspec->ibcctrl_b) {
2407 unsigned lse = ppd->link_speed_enabled;
2413 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2415 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2421 ppd->cpspec->ibcctrl_b |=
2426 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2432 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2434 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2436 ppd->cpspec->ibcctrl_b |=
2437 ppd->link_width_enabled == IB_WIDTH_4X ?
2442 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2445 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2448 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2451 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2453 serdes_7322_init(ppd);
2455 guid = be64_to_cpu(ppd->guid);
2458 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2459 ppd->guid = cpu_to_be64(guid);
2462 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2467 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2468 set_vls(ppd);
2471 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2473 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2476 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2480 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2481 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2485 val = qib_read_kreg_port(ppd, krp_errmask);
2486 qib_write_kreg_port(ppd, krp_errmask,
2495 * @ppd: the qlogic_ib device
2498 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2503 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2505 spin_lock_irqsave(&ppd->lflags_lock, flags);
2506 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2507 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2508 wake_up(&ppd->cpspec->autoneg_wait);
2509 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2510 if (ppd->dd->cspec->r1)
2511 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2513 ppd->cpspec->chase_end = 0;
2514 if (ppd->cpspec->chase_timer.function) /* if initted */
2515 del_timer_sync(&ppd->cpspec->chase_timer);
2524 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2525 qib_7322_mini_pcs_reset(ppd);
2531 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2532 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2533 struct qib_devdata *dd = ppd->dd;
2541 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2542 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2543 if (ppd->cpspec->ibdeltainprog)
2544 val -= val - ppd->cpspec->ibsymsnap;
2545 val -= ppd->cpspec->ibsymdelta;
2546 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2548 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2549 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2550 if (ppd->cpspec->ibdeltainprog)
2551 val -= val - ppd->cpspec->iblnkerrsnap;
2552 val -= ppd->cpspec->iblnkerrdelta;
2553 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2555 if (ppd->cpspec->iblnkdowndelta) {
2556 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2557 val += ppd->cpspec->iblnkdowndelta;
2558 write_7322_creg_port(ppd, crp_iblinkdown, val);
2572 * @ppd: physical port on the qlogic_ib device
2592 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2594 struct qib_devdata *dd = ppd->dd;
2607 if (ppd->led_override) {
2608 grn = (ppd->led_override & QIB_LED_PHYS);
2609 yel = (ppd->led_override & QIB_LED_LOG);
2611 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2621 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2624 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2634 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2640 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2695 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2697 struct qib_devdata *dd = ppd->dd;
2699 unsigned pidx = ppd->port - 1;
2705 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2710 (ppd->hw_pidx ?
2714 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2718 cspec->dca_ctrl |= ppd->hw_pidx ?
2776 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2778 qib_update_sdma_dca(ppd, cpu);
2793 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2795 dd = ppd->dd;
2956 struct qib_pportdata *ppd;
2963 ppd = dd->pport + pidx;
2964 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2968 qd = &ppd->cpspec->qsfp_data;
3007 handle_7322_p_errors(dd->rcd[0]->ppd);
3009 handle_7322_p_errors(dd->rcd[1]->ppd);
3197 struct qib_pportdata *ppd = data;
3198 struct qib_devdata *dd = ppd->dd;
3212 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3214 qib_sdma_intr(ppd);
3224 struct qib_pportdata *ppd = data;
3225 struct qib_devdata *dd = ppd->dd;
3239 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3241 qib_sdma_intr(ppd);
3251 struct qib_pportdata *ppd = data;
3252 struct qib_devdata *dd = ppd->dd;
3266 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3269 qib_sdma_intr(ppd);
3279 struct qib_pportdata *ppd = data;
3280 struct qib_devdata *dd = ppd->dd;
3294 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3297 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3723 struct qib_pportdata *ppd = &dd->pport[i];
3725 spin_lock_irqsave(&ppd->lflags_lock, flags);
3726 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3727 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3728 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3939 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3948 ret = ppd->link_width_enabled;
3952 ret = ppd->link_width_active;
3956 ret = ppd->link_speed_enabled;
3960 ret = ppd->link_speed_active;
3974 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3979 ret = ppd->vls_operational;
3991 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3996 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4002 ret = (ppd->cpspec->ibcctrl_a &
4017 if (ppd->link_speed_active == QIB_IB_QDR)
4019 else if (ppd->link_speed_active == QIB_IB_DDR)
4029 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4042 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4044 struct qib_devdata *dd = ppd->dd;
4065 qib_write_kreg_port(ppd, krp_sendslid,
4067 qib_write_kreg_port(ppd, krp_sendslidmask,
4072 ppd->link_width_enabled = val;
4092 ppd->link_speed_enabled = val;
4100 spin_lock_irqsave(&ppd->lflags_lock, flags);
4101 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4102 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4120 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4123 ppd->cpspec->ibcctrl_a &=
4125 ppd->cpspec->ibcctrl_a |= (u64) val <<
4127 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4128 ppd->cpspec->ibcctrl_a);
4134 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4137 ppd->cpspec->ibcctrl_a &=
4139 ppd->cpspec->ibcctrl_a |= (u64) val <<
4141 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4142 ppd->cpspec->ibcctrl_a);
4148 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4149 ((u64) ppd->pkeys[2] << 32) |
4150 ((u64) ppd->pkeys[3] << 48);
4151 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4157 ppd->cpspec->ibcctrl_a &=
4160 ppd->cpspec->ibcctrl_a |=
4162 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4174 val = (ppd->ibmaxlen >> 2) + 1;
4175 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4176 ppd->cpspec->ibcctrl_a |= (u64)val <<
4178 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4179 ppd->cpspec->ibcctrl_a);
4187 ppd->cpspec->ibmalfusesnap = 1;
4188 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4190 if (!ppd->cpspec->ibdeltainprog &&
4192 ppd->cpspec->ibdeltainprog = 1;
4193 ppd->cpspec->ibsymsnap =
4194 read_7322_creg32_port(ppd,
4196 ppd->cpspec->iblnkerrsnap =
4197 read_7322_creg32_port(ppd,
4204 if (ppd->cpspec->ibmalfusesnap) {
4205 ppd->cpspec->ibmalfusesnap = 0;
4206 ppd->cpspec->ibmalfdelta +=
4207 read_7322_creg32_port(ppd,
4209 ppd->cpspec->ibmalfsnap;
4237 ppd->cpspec->chase_end = 0;
4240 * wait forpending timer, but don't clear .data (ppd)!
4242 if (ppd->cpspec->chase_timer.expires) {
4243 del_timer_sync(&ppd->cpspec->chase_timer);
4244 ppd->cpspec->chase_timer.expires = 0;
4254 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4258 if (ppd->vls_operational != val) {
4259 ppd->vls_operational = val;
4260 set_vls(ppd);
4265 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4279 if (ppd->dd->cspec->r1) {
4280 cancel_delayed_work(&ppd->cpspec->ipg_work);
4281 ppd->cpspec->ipg_tries = 0;
4289 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4290 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4291 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4297 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4304 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4307 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4308 ppd->dd->unit, ppd->port);
4310 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4314 qib_devinfo(ppd->dd->pcidev,
4316 ppd->dd->unit, ppd->port);
4320 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4321 ppd->cpspec->ibcctrl_a);
4322 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4324 ppd->cpspec->ibcctrl_b = ctrlb | val;
4325 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4326 ppd->cpspec->ibcctrl_b);
4327 qib_write_kreg(ppd->dd, kr_scratch, 0);
4332 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4338 u32 val = qib_read_kreg_port(ppd, regno);
4347 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4359 qib_write_kreg_port(ppd, regno, val);
4361 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4362 struct qib_devdata *dd = ppd->dd;
4366 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4367 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4373 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4377 get_vl_weights(ppd, krp_highprio_0, t);
4381 get_vl_weights(ppd, krp_lowprio_0, t);
4390 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4394 set_vl_weights(ppd, krp_highprio_0, t);
4398 set_vl_weights(ppd, krp_lowprio_0, t);
4457 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4460 struct qib_devdata *dd = ppd->dd;
4476 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4478 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4487 ppd->p_rcvctrl |=
4501 ppd->p_rcvctrl &=
4519 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4597 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4599 struct qib_devdata *dd = ppd->dd;
4616 /* Then the ppd ones that are "sticky", saved in shadow */
4618 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4620 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4641 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4651 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4671 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4704 * @ppd: the qlogic_ib pport
4707 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4709 struct qib_devdata *dd = ppd->dd;
4757 qib_devinfo(ppd->dd->pcidev,
4771 if (!rcd || rcd->ppd != ppd)
4786 ret = qib_read_kreg_port(ppd, creg);
4795 ret = read_7322_creg_port(ppd, creg);
4797 ret = read_7322_creg32_port(ppd, creg);
4799 if (ppd->cpspec->ibdeltainprog)
4800 ret -= ret - ppd->cpspec->ibsymsnap;
4801 ret -= ppd->cpspec->ibsymdelta;
4803 if (ppd->cpspec->ibdeltainprog)
4804 ret -= ret - ppd->cpspec->iblnkerrsnap;
4805 ret -= ppd->cpspec->iblnkerrdelta;
4807 ret -= ppd->cpspec->ibmalfdelta;
4809 ret += ppd->cpspec->iblnkdowndelta;
5046 struct qib_pportdata *ppd = &dd->pport[port];
5047 u64 *cntr = ppd->cpspec->portcntrs;
5059 *cntr++ = qib_portcntr_7322(ppd,
5063 *cntr++ = read_7322_creg_port(ppd,
5067 *cntr++ = read_7322_creg32_port(ppd,
5089 struct qib_pportdata *ppd;
5095 ppd = dd->pport + pidx;
5102 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5111 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5112 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5113 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5114 traffic_wds -= ppd->dd->traffic_wds;
5115 ppd->dd->traffic_wds += traffic_wds;
5116 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5117 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5119 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5121 ppd->cpspec->qdr_dfe_time &&
5122 time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5123 ppd->cpspec->qdr_dfe_on = 0;
5125 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5126 ppd->dd->cspec->r1 ?
5129 force_h1(ppd);
5161 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5164 struct qib_devdata *dd = ppd->dd;
5169 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5172 qib_write_kreg_port(ppd, krp_ibcctrl_a,
5173 ppd->cpspec->ibcctrl_a &
5176 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5178 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5179 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5194 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5201 struct qib_devdata *dd = ppd->dd;
5205 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5207 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5233 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5235 struct qib_devdata *dd = ppd->dd;
5269 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5272 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5291 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5295 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5309 if (newctrlb == ppd->cpspec->ibcctrl_b)
5312 ppd->cpspec->ibcctrl_b = newctrlb;
5313 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5314 qib_write_kreg(ppd->dd, kr_scratch, 0);
5323 static void try_7322_autoneg(struct qib_pportdata *ppd)
5327 spin_lock_irqsave(&ppd->lflags_lock, flags);
5328 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5329 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5330 qib_autoneg_7322_send(ppd, 0);
5331 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5332 qib_7322_mini_pcs_reset(ppd);
5334 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5344 struct qib_pportdata *ppd;
5348 ppd = container_of(work, struct qib_chippport_specific,
5349 autoneg_work.work)->ppd;
5356 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5358 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5364 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5368 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5369 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5372 qib_7322_mini_pcs_reset(ppd);
5375 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5376 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5379 qib_7322_mini_pcs_reset(ppd);
5381 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5387 wait_event_timeout(ppd->cpspec->autoneg_wait,
5388 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5391 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5392 spin_lock_irqsave(&ppd->lflags_lock, flags);
5393 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5394 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5395 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5396 ppd->cpspec->autoneg_tries = 0;
5398 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5399 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5407 static void try_7322_ipg(struct qib_pportdata *ppd)
5409 struct qib_ibport *ibp = &ppd->ibport_data;
5456 delay = 2 << ppd->cpspec->ipg_tries;
5457 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5467 struct qib_pportdata *ppd;
5469 ppd = container_of(work, struct qib_chippport_specific,
5470 ipg_work.work)->ppd;
5471 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5472 && ++ppd->cpspec->ipg_tries <= 10)
5473 try_7322_ipg(ppd);
5507 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5513 spin_lock_irqsave(&ppd->lflags_lock, flags);
5514 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5515 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5519 ppd->link_speed_active = QIB_IB_QDR;
5522 ppd->link_speed_active = QIB_IB_DDR;
5525 ppd->link_speed_active = QIB_IB_SDR;
5529 ppd->link_width_active = IB_WIDTH_4X;
5532 ppd->link_width_active = IB_WIDTH_1X;
5533 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5540 ppd->cpspec->ipg_tries = 0;
5541 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5545 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5546 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5548 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5549 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5551 &ppd->cpspec->qsfp_data;
5553 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5556 qib_cancel_sends(ppd);
5558 qib_7322_mini_pcs_reset(ppd);
5561 if (ppd->dd->flags & QIB_HAS_QSFP) {
5565 spin_lock_irqsave(&ppd->sdma_lock, flags);
5566 if (__qib_sdma_running(ppd))
5567 __qib_sdma_process_event(ppd,
5569 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5571 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5572 if (clr == ppd->cpspec->iblnkdownsnap)
5573 ppd->cpspec->iblnkdowndelta++;
5576 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5578 ppd->link_speed_active == QIB_IB_SDR &&
5579 (ppd->link_speed_enabled & QIB_IB_DDR)
5580 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5582 ++ppd->cpspec->autoneg_tries;
5583 if (!ppd->cpspec->ibdeltainprog) {
5584 ppd->cpspec->ibdeltainprog = 1;
5585 ppd->cpspec->ibsymdelta +=
5586 read_7322_creg32_port(ppd,
5588 ppd->cpspec->ibsymsnap;
5589 ppd->cpspec->iblnkerrdelta +=
5590 read_7322_creg32_port(ppd,
5592 ppd->cpspec->iblnkerrsnap;
5594 try_7322_autoneg(ppd);
5596 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5597 ppd->link_speed_active == QIB_IB_SDR) {
5598 qib_autoneg_7322_send(ppd, 1);
5599 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5600 qib_7322_mini_pcs_reset(ppd);
5603 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5604 (ppd->link_speed_active & QIB_IB_DDR)) {
5605 spin_lock_irqsave(&ppd->lflags_lock, flags);
5606 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5608 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5609 ppd->cpspec->autoneg_tries = 0;
5611 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5612 wake_up(&ppd->cpspec->autoneg_wait);
5614 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5621 spin_lock_irqsave(&ppd->lflags_lock, flags);
5622 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5623 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5624 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5627 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5629 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5630 try_7322_ipg(ppd);
5631 if (!ppd->cpspec->recovery_init)
5632 setup_7322_link_recovery(ppd, 0);
5633 ppd->cpspec->qdr_dfe_time = jiffies +
5636 ppd->cpspec->ibmalfusesnap = 0;
5637 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5641 ppd->cpspec->iblnkdownsnap =
5642 read_7322_creg32_port(ppd, crp_iblinkdown);
5643 if (ppd->cpspec->ibdeltainprog) {
5644 ppd->cpspec->ibdeltainprog = 0;
5645 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5646 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5647 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5648 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5651 !ppd->cpspec->ibdeltainprog &&
5652 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5653 ppd->cpspec->ibdeltainprog = 1;
5654 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5656 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5661 qib_setup_7322_setextled(ppd, ibup);
5820 struct qib_pportdata *ppd = NULL;
5825 * severed. We need to hunt for the ppd that corresponds
5833 ppd = dd->pport + pidx;
5834 if (!ppd->cpspec->kpregbase)
5837 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5845 ppd = NULL;
5855 if (!ppd || (mask & all_bits) != all_bits) {
5880 if (ppd) {
5881 sval = ppd->p_sendctrl & ~mask;
5883 ppd->p_sendctrl = sval;
5909 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5914 struct qib_pportdata *ppd;
5921 ppd = qd->ppd;
5928 if (!qib_qsfp_mod_present(ppd)) {
5929 ppd->cpspec->qsfp_data.modpresent = 0;
5931 qib_set_ib_7322_lstate(ppd, 0,
5933 spin_lock_irqsave(&ppd->lflags_lock, flags);
5934 ppd->lflags &= ~QIBL_LINKV;
5935 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5948 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5956 if (!ret && !ppd->dd->cspec->r1) {
5966 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5973 init_txdds_table(ppd, 0);
5978 if (!ppd->cpspec->qsfp_data.modpresent &&
5979 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5980 ppd->cpspec->qsfp_data.modpresent = 1;
5981 qib_set_ib_7322_lstate(ppd, 0,
5983 spin_lock_irqsave(&ppd->lflags_lock, flags);
5984 ppd->lflags |= QIBL_LINKV;
5985 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5994 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5997 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5998 struct qib_devdata *dd = ppd->dd;
6001 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6002 qd->ppd = ppd;
6081 struct qib_pportdata *ppd = &dd->pport[pidx];
6083 if (ppd->port != port || !ppd->link_speed_supported)
6085 ppd->cpspec->no_eep = val;
6087 ppd->cpspec->h1_val = h1;
6089 init_txdds_table(ppd, 1);
6094 qib_set_ib_7322_lstate(ppd, 0,
6179 struct qib_pportdata *ppd = dd->pport + n;
6181 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6185 qib_init_7322_qsfp(ppd);
6207 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6212 if (!ppd->link_speed_supported) {
6215 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6216 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6217 qib_write_kreg(ppd->dd, kr_scratch, 0);
6225 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6227 val |= (u64)(ppd->vls_supported - 1) <<
6229 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6231 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6234 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6238 qib_write_kreg_port(ppd, krp_ncmodectrl,
6245 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6246 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6247 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6248 if (ppd->dd->cspec->r1)
6249 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6261 struct qib_pportdata *ppd;
6276 ppd = &dd->pport[pidx];
6280 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6298 ctxt = ppd->hw_pidx;
6302 qib_write_kreg_port(ppd, regno, val);
6307 qib_write_kreg_port(ppd, regno, val);
6345 struct qib_pportdata *ppd;
6352 ppd = (struct qib_pportdata *)(dd + 1);
6353 dd->pport = ppd;
6354 ppd[0].dd = dd;
6355 ppd[1].dd = dd;
6357 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6359 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6360 ppd[1].cpspec = &ppd[0].cpspec[1];
6361 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6362 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6397 ppd = dd->pport;
6435 struct qib_chippport_specific *cp = ppd->cpspec;
6437 ppd->link_speed_supported = features & PORT_SPD_CAP;
6439 if (!ppd->link_speed_supported) {
6444 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6445 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6446 ppd[0] = ppd[1];
6460 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6461 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6478 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6484 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6485 ppd->link_width_enabled = IB_WIDTH_4X;
6486 ppd->link_speed_enabled = ppd->link_speed_supported;
6491 ppd->link_width_active = IB_WIDTH_4X;
6492 ppd->link_speed_active = QIB_IB_SDR;
6493 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6496 ppd->vls_supported = IB_VL_VL0;
6499 ppd->vls_supported = IB_VL_VL0_1;
6508 ppd->vls_supported = IB_VL_VL0_3;
6512 ppd->vls_supported = IB_VL_VL0_7;
6517 ppd->vls_supported = IB_VL_VL0_3;
6522 ppd->vls_operational = ppd->vls_supported;
6527 if (ppd->dd->cspec->r1)
6539 dd->unit, ppd->port);
6545 ppd->cpspec->no_eep = IS_QMH(dd) ?
6552 write_7322_init_portregs(ppd);
6556 ppd++;
6675 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6679 struct qib_devdata *dd = ppd->dd;
6683 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6695 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6698 qib_write_kreg_port(ppd, krp_psinterval, intv);
6699 qib_write_kreg_port(ppd, krp_psstart, start);
6705 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6707 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6713 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6717 reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6718 qib_dev_porterr(ppd->dd, ppd->port,
6721 reg = qib_read_kreg_port(ppd, krp_sendctrl);
6722 qib_dev_porterr(ppd->dd, ppd->port,
6725 reg = qib_read_kreg_port(ppd, krp_senddmabase);
6726 qib_dev_porterr(ppd->dd, ppd->port,
6729 reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6730 reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6731 reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6732 qib_dev_porterr(ppd->dd, ppd->port,
6737 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6738 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6739 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6740 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6741 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6742 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6744 qib_dev_porterr(ppd->dd, ppd->port,
6747 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6748 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6749 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6751 qib_dev_porterr(ppd->dd, ppd->port,
6755 reg = qib_read_kreg_port(ppd, krp_senddmatail);
6756 qib_dev_porterr(ppd->dd, ppd->port,
6759 reg = qib_read_kreg_port(ppd, krp_senddmahead);
6760 qib_dev_porterr(ppd->dd, ppd->port,
6763 reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6764 qib_dev_porterr(ppd->dd, ppd->port,
6767 reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6768 qib_dev_porterr(ppd->dd, ppd->port,
6771 reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6772 qib_dev_porterr(ppd->dd, ppd->port,
6775 reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6776 qib_dev_porterr(ppd->dd, ppd->port,
6779 reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6780 qib_dev_porterr(ppd->dd, ppd->port,
6783 reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6784 qib_dev_porterr(ppd->dd, ppd->port,
6787 dump_sdma_state(ppd);
6837 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6839 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6842 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6844 struct qib_devdata *dd = ppd->dd;
6849 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6850 qib_sdma_7322_setlengen(ppd);
6851 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6852 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6853 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6854 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6861 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6865 ppd->sdma_state.first_sendbuf = erstbuf;
6866 ppd->sdma_state.last_sendbuf = lastbuf;
6873 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6874 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6875 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6880 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6882 struct qib_devdata *dd = ppd->dd;
6890 use_dmahead = __qib_sdma_running(ppd) &&
6894 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6895 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6897 swhead = ppd->sdma_descq_head;
6898 swtail = ppd->sdma_descq_tail;
6899 cnt = ppd->sdma_descq_cnt;
6925 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6927 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6941 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6944 u8 snd_mult = ppd->delay_mult;
6955 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7314 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7317 struct qib_devdata *dd = ppd->dd;
7328 if (ppd->hw_pidx)
7337 qib_write_kreg(ppd->dd, kr_scratch, 0);
7585 static void find_best_ent(struct qib_pportdata *ppd,
7590 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7610 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7611 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7612 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7622 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7629 idx = ppd->cpspec->no_eep;
7633 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7635 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7639 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7640 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7642 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7644 ppd->dd->unit, ppd->port, idx);
7656 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7663 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7666 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7670 set_txdds(ppd, 0, sdr_dds);
7671 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7672 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7673 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7675 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7677 (ppd->link_speed_active ==
7679 write_tx_serdes_param(ppd, dds);
7684 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7685 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7687 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7777 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7780 struct qib_devdata *dd = ppd->dd;
7784 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7786 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7791 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7793 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7798 ppd->dd->unit, ppd->port);
7802 ppd->dd->unit, ppd->port);
7805 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7808 static int serdes_7322_init(struct qib_pportdata *ppd)
7812 if (ppd->dd->cspec->r1)
7813 ret = serdes_7322_init_old(ppd);
7815 ret = serdes_7322_init_new(ppd);
7819 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7827 init_txdds_table(ppd, 0);
7830 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7836 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7839 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7841 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7844 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7845 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7848 le_val = IS_QME(ppd->dd) ? 0 : 1;
7849 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7852 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7855 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7859 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7860 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7861 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7862 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7865 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7866 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7867 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7868 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7871 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7874 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7875 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7876 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7878 serdes_7322_los_enable(ppd, 1);
7881 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7884 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7887 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7888 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7894 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7895 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7896 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7897 ppd->dd->cspec->r1 ?
7899 ppd->cpspec->qdr_dfe_on = 1;
7902 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7905 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7907 if (!ppd->dd->cspec->r1) {
7908 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7909 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7913 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7918 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7925 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7928 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7935 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7937 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7939 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7941 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7943 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7945 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7947 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7949 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7951 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7953 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7955 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7957 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7959 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7961 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7963 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7965 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7967 if (!ppd->dd->cspec->r1) {
7968 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7969 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7971 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7982 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7985 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7988 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7993 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7994 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7995 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7996 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7999 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8000 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8001 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8002 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8005 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8008 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8009 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8010 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8013 serdes_7322_los_enable(ppd, 1);
8015 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8019 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8021 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8024 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8029 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8039 IBSD(ppd->hw_pidx), chan_done);
8042 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8047 IBSD(ppd->hw_pidx), chan);
8052 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8057 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8058 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8060 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8062 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8065 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8067 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8068 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8070 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8073 ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8075 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8077 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8079 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8085 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8086 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8087 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8088 ppd->dd->cspec->r1 ?
8090 ppd->cpspec->qdr_dfe_on = 1;
8092 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8094 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8097 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8099 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8101 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8103 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8109 init_txdds_table(ppd, 0);
8116 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8118 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8122 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8126 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8129 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8134 static void clock_man(struct qib_pportdata *ppd, int chan)
8136 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8138 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8140 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8142 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8151 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8156 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8177 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8185 static void adj_tx_serdes(struct qib_pportdata *ppd)
8190 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8191 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8192 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8194 write_tx_serdes_param(ppd, dds);
8198 static void force_h1(struct qib_pportdata *ppd)
8202 ppd->cpspec->qdr_reforce = 0;
8203 if (!ppd->dd->cspec->r1)
8207 set_man_mode_h1(ppd, chan, 1, 0);
8208 set_man_code(ppd, chan, ppd->cpspec->h1_val);
8209 clock_man(ppd, chan);
8210 set_man_mode_h1(ppd, chan, 0, 0);
8399 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8402 struct qib_devdata *dd = ppd->dd;
8404 if (!ppd->dd->cspec->r1)
8408 ppd->cpspec->recovery_init = 1;
8411 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8433 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8435 struct qib_devdata *dd = ppd->dd;
8451 ppd->dd->cspec->stay_in_freeze = 1;
8452 qib_7322_set_intr_state(ppd->dd, 0);
8458 qib_write_kreg(ppd->dd, kr_hwerrclear,
8465 if (ppd->link_speed_supported) {
8466 ppd->cpspec->ibcctrl_a &=
8468 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8469 ppd->cpspec->ibcctrl_a);
8471 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8472 qib_set_ib_7322_lstate(ppd, 0,