Lines Matching refs:ppd
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
626 struct qib_pportdata *ppd;
870 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
873 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
875 return readq(&ppd->cpspec->kpregbase[regno]);
878 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
881 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
882 (ppd->dd->flags & QIB_PRESENT))
883 writeq(value, &ppd->cpspec->kpregbase[regno]);
918 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
921 if (ppd->cpspec && ppd->cpspec->cpregbase &&
922 (ppd->dd->flags & QIB_PRESENT))
923 writeq(value, &ppd->cpspec->cpregbase[regno]);
926 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
929 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930 !(ppd->dd->flags & QIB_PRESENT))
932 return readq(&ppd->cpspec->cpregbase[regno]);
935 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
938 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
939 !(ppd->dd->flags & QIB_PRESENT))
941 return readl(&ppd->cpspec->cpregbase[regno]);
1339 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1341 struct qib_devdata *dd = ppd->dd;
1416 static void flush_fifo(struct qib_pportdata *ppd)
1418 struct qib_devdata *dd = ppd->dd;
1442 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1444 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1463 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1465 struct qib_devdata *dd = ppd->dd;
1497 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1498 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1502 ppd->p_sendctrl |= set_sendctrl;
1503 ppd->p_sendctrl &= ~clr_sendctrl;
1506 qib_write_kreg_port(ppd, krp_sendctrl,
1507 ppd->p_sendctrl |
1510 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1514 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1515 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1521 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1522 flush_fifo(ppd);
1525 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1527 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1530 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1537 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1538 qib_write_kreg_port(ppd, krp_senddmalengen,
1539 ppd->sdma_descq_cnt |
1546 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1550 ppd->sdma_descq_tail = tail;
1551 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1557 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1565 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1567 qib_sdma_7322_setlengen(ppd);
1568 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1569 ppd->sdma_head_dma[0] = 0;
1570 qib_7322_sdma_sendctrl(ppd,
1571 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1586 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1589 struct qib_devdata *dd = ppd->dd;
1592 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1597 ppd->port);
1599 spin_lock_irqsave(&ppd->sdma_lock, flags);
1603 qib_dev_porterr(dd, ppd->port,
1605 qib_sdma_state_names[ppd->sdma_state.current_state],
1606 errs, ppd->cpspec->sdmamsgbuf);
1607 dump_sdma_7322_state(ppd);
1610 switch (ppd->sdma_state.current_state) {
1616 __qib_sdma_process_event(ppd,
1628 __qib_sdma_process_event(ppd,
1634 __qib_sdma_process_event(ppd,
1639 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1640 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1644 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1747 struct qib_pportdata *ppd = cp->ppd;
1749 ppd->cpspec->chase_timer.expires = 0;
1750 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1754 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1757 ppd->cpspec->chase_end = 0;
1762 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1764 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1765 add_timer(&ppd->cpspec->chase_timer);
1768 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1787 if (ppd->cpspec->chase_end &&
1788 time_after(tnow, ppd->cpspec->chase_end))
1789 disable_chase(ppd, tnow, ibclt);
1790 else if (!ppd->cpspec->chase_end)
1791 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1794 ppd->cpspec->chase_end = 0;
1802 force_h1(ppd);
1803 ppd->cpspec->qdr_reforce = 1;
1804 if (!ppd->dd->cspec->r1)
1805 serdes_7322_los_enable(ppd, 0);
1806 } else if (ppd->cpspec->qdr_reforce &&
1811 force_h1(ppd);
1813 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1814 ppd->link_speed_enabled == QIB_IB_QDR &&
1819 adj_tx_serdes(ppd);
1823 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1825 if (!ppd->dd->cspec->r1 &&
1833 serdes_7322_los_enable(ppd, 1);
1834 if (!ppd->cpspec->qdr_dfe_on &&
1836 ppd->cpspec->qdr_dfe_on = 1;
1837 ppd->cpspec->qdr_dfe_time = 0;
1839 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1840 ppd->dd->cspec->r1 ?
1845 ppd->dd->unit, ppd->port, ibclt);
1857 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1861 struct qib_devdata *dd = ppd->dd;
1866 check_7322_rxe_status(ppd);
1868 errs = qib_read_kreg_port(ppd, krp_errstatus);
1872 ppd->port);
1878 msg = ppd->cpspec->epmsgbuf;
1882 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1885 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1887 qib_dev_porterr(dd, ppd->port,
1897 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1898 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1899 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1907 !(ppd->lflags & QIBL_LINKACTIVE)) {
1915 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1921 qib_disarm_7322_senderrbufs(ppd);
1923 !(ppd->lflags & QIBL_LINKACTIVE)) {
1931 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1937 qib_write_kreg_port(ppd, krp_errclear, errs);
1951 sdma_7322_p_errors(ppd, errs);
1957 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1960 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1961 handle_serdes_issues(ppd, ibcs);
1962 if (!(ppd->cpspec->ibcctrl_a &
1969 ppd->cpspec->ibcctrl_a |=
1971 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1972 ppd->cpspec->ibcctrl_a);
1976 ppd->link_width_active =
1979 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1984 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1986 qib_set_ib_7322_lstate(ppd, 0,
2001 qib_handle_e_ibstatuschanged(ppd, ibcs);
2004 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2006 if (ppd->state_wanted & ppd->lflags)
2007 wake_up_interruptible(&ppd->state_wait);
2169 struct qib_pportdata *ppd = dd->pport;
2171 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180 spin_lock_irqsave(&ppd->sdma_lock, flags);
2181 dump_sdma_7322_state(ppd);
2182 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2259 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2263 struct qib_devdata *dd = ppd->dd;
2274 qib_7322_mini_pcs_reset(ppd);
2275 spin_lock_irqsave(&ppd->lflags_lock, flags);
2276 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2277 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2284 spin_lock_irqsave(&ppd->lflags_lock, flags);
2285 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2286 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2291 ppd->cpspec->ibcctrl_a &=
2298 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2316 static void set_vls(struct qib_pportdata *ppd)
2319 struct qib_devdata *dd = ppd->dd;
2322 numvls = qib_num_vls(ppd->vls_operational);
2334 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2337 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2339 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2341 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2344 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2346 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2349 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2352 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2353 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2356 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2359 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2367 static int serdes_7322_init(struct qib_pportdata *ppd);
2371 * @ppd: physical port on the qlogic_ib device
2373 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2375 struct qib_devdata *dd = ppd->dd;
2385 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2390 qib_write_kreg_port(ppd, krp_tx_deemph_override,
2395 ppd->cpspec->ibdeltainprog = 1;
2396 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2398 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2418 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2420 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2426 qib_7322_mini_pcs_reset(ppd);
2428 if (!ppd->cpspec->ibcctrl_b) {
2429 unsigned lse = ppd->link_speed_enabled;
2435 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2437 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2443 ppd->cpspec->ibcctrl_b |=
2448 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2454 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2456 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2458 ppd->cpspec->ibcctrl_b |=
2459 ppd->link_width_enabled == IB_WIDTH_4X ?
2464 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2467 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2470 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2473 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2475 serdes_7322_init(ppd);
2477 guid = be64_to_cpu(ppd->guid);
2480 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2481 ppd->guid = cpu_to_be64(guid);
2484 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2489 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2490 set_vls(ppd);
2493 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2495 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2498 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2502 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2503 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2507 val = qib_read_kreg_port(ppd, krp_errmask);
2508 qib_write_kreg_port(ppd, krp_errmask,
2520 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2525 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2527 spin_lock_irqsave(&ppd->lflags_lock, flags);
2528 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2529 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2530 wake_up(&ppd->cpspec->autoneg_wait);
2531 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2532 if (ppd->dd->cspec->r1)
2533 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2535 ppd->cpspec->chase_end = 0;
2536 if (ppd->cpspec->chase_timer.function) /* if initted */
2537 del_timer_sync(&ppd->cpspec->chase_timer);
2546 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2547 qib_7322_mini_pcs_reset(ppd);
2553 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2554 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2555 struct qib_devdata *dd = ppd->dd;
2563 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2564 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2565 if (ppd->cpspec->ibdeltainprog)
2566 val -= val - ppd->cpspec->ibsymsnap;
2567 val -= ppd->cpspec->ibsymdelta;
2568 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2570 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2571 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2572 if (ppd->cpspec->ibdeltainprog)
2573 val -= val - ppd->cpspec->iblnkerrsnap;
2574 val -= ppd->cpspec->iblnkerrdelta;
2575 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2577 if (ppd->cpspec->iblnkdowndelta) {
2578 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2579 val += ppd->cpspec->iblnkdowndelta;
2580 write_7322_creg_port(ppd, crp_iblinkdown, val);
2594 * @ppd: physical port on the qlogic_ib device
2614 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2616 struct qib_devdata *dd = ppd->dd;
2629 if (ppd->led_override) {
2630 grn = (ppd->led_override & QIB_LED_PHYS);
2631 yel = (ppd->led_override & QIB_LED_LOG);
2633 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2643 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2646 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2656 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2662 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2717 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2719 struct qib_devdata *dd = ppd->dd;
2721 unsigned pidx = ppd->port - 1;
2727 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2732 (ppd->hw_pidx ?
2736 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2740 cspec->dca_ctrl |= ppd->hw_pidx ?
2798 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2800 qib_update_sdma_dca(ppd, cpu);
2815 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2817 dd = ppd->dd;
2978 struct qib_pportdata *ppd;
2985 ppd = dd->pport + pidx;
2986 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2990 qd = &ppd->cpspec->qsfp_data;
3029 handle_7322_p_errors(dd->rcd[0]->ppd);
3031 handle_7322_p_errors(dd->rcd[1]->ppd);
3219 struct qib_pportdata *ppd = data;
3220 struct qib_devdata *dd = ppd->dd;
3234 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3236 qib_sdma_intr(ppd);
3246 struct qib_pportdata *ppd = data;
3247 struct qib_devdata *dd = ppd->dd;
3261 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3263 qib_sdma_intr(ppd);
3273 struct qib_pportdata *ppd = data;
3274 struct qib_devdata *dd = ppd->dd;
3288 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3291 qib_sdma_intr(ppd);
3301 struct qib_pportdata *ppd = data;
3302 struct qib_devdata *dd = ppd->dd;
3316 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3319 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3745 struct qib_pportdata *ppd = &dd->pport[i];
3747 spin_lock_irqsave(&ppd->lflags_lock, flags);
3748 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3749 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3750 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3961 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3970 ret = ppd->link_width_enabled;
3974 ret = ppd->link_width_active;
3978 ret = ppd->link_speed_enabled;
3982 ret = ppd->link_speed_active;
3996 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4001 ret = ppd->vls_operational;
4013 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4018 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4024 ret = (ppd->cpspec->ibcctrl_a &
4039 if (ppd->link_speed_active == QIB_IB_QDR)
4041 else if (ppd->link_speed_active == QIB_IB_DDR)
4051 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4064 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4066 struct qib_devdata *dd = ppd->dd;
4087 qib_write_kreg_port(ppd, krp_sendslid,
4089 qib_write_kreg_port(ppd, krp_sendslidmask,
4094 ppd->link_width_enabled = val;
4114 ppd->link_speed_enabled = val;
4122 spin_lock_irqsave(&ppd->lflags_lock, flags);
4123 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4124 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4142 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4145 ppd->cpspec->ibcctrl_a &=
4147 ppd->cpspec->ibcctrl_a |= (u64) val <<
4149 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4150 ppd->cpspec->ibcctrl_a);
4156 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4159 ppd->cpspec->ibcctrl_a &=
4161 ppd->cpspec->ibcctrl_a |= (u64) val <<
4163 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4164 ppd->cpspec->ibcctrl_a);
4170 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4171 ((u64) ppd->pkeys[2] << 32) |
4172 ((u64) ppd->pkeys[3] << 48);
4173 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4179 ppd->cpspec->ibcctrl_a &=
4182 ppd->cpspec->ibcctrl_a |=
4184 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4196 val = (ppd->ibmaxlen >> 2) + 1;
4197 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4198 ppd->cpspec->ibcctrl_a |= (u64)val <<
4200 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4201 ppd->cpspec->ibcctrl_a);
4209 ppd->cpspec->ibmalfusesnap = 1;
4210 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4212 if (!ppd->cpspec->ibdeltainprog &&
4214 ppd->cpspec->ibdeltainprog = 1;
4215 ppd->cpspec->ibsymsnap =
4216 read_7322_creg32_port(ppd,
4218 ppd->cpspec->iblnkerrsnap =
4219 read_7322_creg32_port(ppd,
4226 if (ppd->cpspec->ibmalfusesnap) {
4227 ppd->cpspec->ibmalfusesnap = 0;
4228 ppd->cpspec->ibmalfdelta +=
4229 read_7322_creg32_port(ppd,
4231 ppd->cpspec->ibmalfsnap;
4259 ppd->cpspec->chase_end = 0;
4262 * wait forpending timer, but don't clear .data (ppd)!
4264 if (ppd->cpspec->chase_timer.expires) {
4265 del_timer_sync(&ppd->cpspec->chase_timer);
4266 ppd->cpspec->chase_timer.expires = 0;
4276 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4280 if (ppd->vls_operational != val) {
4281 ppd->vls_operational = val;
4282 set_vls(ppd);
4287 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4301 if (ppd->dd->cspec->r1) {
4302 cancel_delayed_work(&ppd->cpspec->ipg_work);
4303 ppd->cpspec->ipg_tries = 0;
4311 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4312 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4313 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4319 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4326 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4329 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4330 ppd->dd->unit, ppd->port);
4332 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4336 qib_devinfo(ppd->dd->pcidev,
4338 ppd->dd->unit, ppd->port);
4342 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4343 ppd->cpspec->ibcctrl_a);
4344 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4346 ppd->cpspec->ibcctrl_b = ctrlb | val;
4347 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4348 ppd->cpspec->ibcctrl_b);
4349 qib_write_kreg(ppd->dd, kr_scratch, 0);
4354 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4360 u32 val = qib_read_kreg_port(ppd, regno);
4369 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4381 qib_write_kreg_port(ppd, regno, val);
4383 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4384 struct qib_devdata *dd = ppd->dd;
4388 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4389 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4395 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4399 get_vl_weights(ppd, krp_highprio_0, t);
4403 get_vl_weights(ppd, krp_lowprio_0, t);
4412 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4416 set_vl_weights(ppd, krp_highprio_0, t);
4420 set_vl_weights(ppd, krp_lowprio_0, t);
4479 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4482 struct qib_devdata *dd = ppd->dd;
4498 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4500 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4509 ppd->p_rcvctrl |=
4523 ppd->p_rcvctrl &=
4541 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4619 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4621 struct qib_devdata *dd = ppd->dd;
4638 /* Then the ppd ones that are "sticky", saved in shadow */
4640 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4642 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4663 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4673 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4693 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4726 * @ppd: the qlogic_ib pport
4729 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4731 struct qib_devdata *dd = ppd->dd;
4779 qib_devinfo(ppd->dd->pcidev,
4793 if (!rcd || rcd->ppd != ppd)
4808 ret = qib_read_kreg_port(ppd, creg);
4817 ret = read_7322_creg_port(ppd, creg);
4819 ret = read_7322_creg32_port(ppd, creg);
4821 if (ppd->cpspec->ibdeltainprog)
4822 ret -= ret - ppd->cpspec->ibsymsnap;
4823 ret -= ppd->cpspec->ibsymdelta;
4825 if (ppd->cpspec->ibdeltainprog)
4826 ret -= ret - ppd->cpspec->iblnkerrsnap;
4827 ret -= ppd->cpspec->iblnkerrdelta;
4829 ret -= ppd->cpspec->ibmalfdelta;
4831 ret += ppd->cpspec->iblnkdowndelta;
5068 struct qib_pportdata *ppd = &dd->pport[port];
5069 u64 *cntr = ppd->cpspec->portcntrs;
5081 *cntr++ = qib_portcntr_7322(ppd,
5085 *cntr++ = read_7322_creg_port(ppd,
5089 *cntr++ = read_7322_creg32_port(ppd,
5111 struct qib_pportdata *ppd;
5117 ppd = dd->pport + pidx;
5124 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5133 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5134 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5135 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5136 traffic_wds -= ppd->dd->traffic_wds;
5137 ppd->dd->traffic_wds += traffic_wds;
5138 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5139 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5141 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5143 ppd->cpspec->qdr_dfe_time &&
5144 time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5145 ppd->cpspec->qdr_dfe_on = 0;
5147 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5148 ppd->dd->cspec->r1 ?
5151 force_h1(ppd);
5183 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5186 struct qib_devdata *dd = ppd->dd;
5191 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5194 qib_write_kreg_port(ppd, krp_ibcctrl_a,
5195 ppd->cpspec->ibcctrl_a &
5198 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5200 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5201 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5216 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5223 struct qib_devdata *dd = ppd->dd;
5227 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5229 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5255 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5257 struct qib_devdata *dd = ppd->dd;
5291 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5294 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5313 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5317 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5331 if (newctrlb == ppd->cpspec->ibcctrl_b)
5334 ppd->cpspec->ibcctrl_b = newctrlb;
5335 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5336 qib_write_kreg(ppd->dd, kr_scratch, 0);
5345 static void try_7322_autoneg(struct qib_pportdata *ppd)
5349 spin_lock_irqsave(&ppd->lflags_lock, flags);
5350 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5351 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5352 qib_autoneg_7322_send(ppd, 0);
5353 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5354 qib_7322_mini_pcs_reset(ppd);
5356 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5366 struct qib_pportdata *ppd;
5370 ppd = container_of(work, struct qib_chippport_specific,
5371 autoneg_work.work)->ppd;
5378 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5380 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5386 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5390 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5391 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5394 qib_7322_mini_pcs_reset(ppd);
5397 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5398 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5401 qib_7322_mini_pcs_reset(ppd);
5403 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5409 wait_event_timeout(ppd->cpspec->autoneg_wait,
5410 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5413 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5414 spin_lock_irqsave(&ppd->lflags_lock, flags);
5415 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5416 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5417 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5418 ppd->cpspec->autoneg_tries = 0;
5420 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5421 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5429 static void try_7322_ipg(struct qib_pportdata *ppd)
5431 struct qib_ibport *ibp = &ppd->ibport_data;
5478 delay = 2 << ppd->cpspec->ipg_tries;
5479 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5489 struct qib_pportdata *ppd;
5491 ppd = container_of(work, struct qib_chippport_specific,
5492 ipg_work.work)->ppd;
5493 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5494 && ++ppd->cpspec->ipg_tries <= 10)
5495 try_7322_ipg(ppd);
5529 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5535 spin_lock_irqsave(&ppd->lflags_lock, flags);
5536 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5537 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5541 ppd->link_speed_active = QIB_IB_QDR;
5544 ppd->link_speed_active = QIB_IB_DDR;
5547 ppd->link_speed_active = QIB_IB_SDR;
5551 ppd->link_width_active = IB_WIDTH_4X;
5554 ppd->link_width_active = IB_WIDTH_1X;
5555 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5562 ppd->cpspec->ipg_tries = 0;
5563 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5567 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5568 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5570 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5571 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5573 &ppd->cpspec->qsfp_data;
5575 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5578 qib_cancel_sends(ppd);
5580 qib_7322_mini_pcs_reset(ppd);
5583 if (ppd->dd->flags & QIB_HAS_QSFP) {
5587 spin_lock_irqsave(&ppd->sdma_lock, flags);
5588 if (__qib_sdma_running(ppd))
5589 __qib_sdma_process_event(ppd,
5591 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5593 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5594 if (clr == ppd->cpspec->iblnkdownsnap)
5595 ppd->cpspec->iblnkdowndelta++;
5598 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5600 ppd->link_speed_active == QIB_IB_SDR &&
5601 (ppd->link_speed_enabled & QIB_IB_DDR)
5602 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5604 ++ppd->cpspec->autoneg_tries;
5605 if (!ppd->cpspec->ibdeltainprog) {
5606 ppd->cpspec->ibdeltainprog = 1;
5607 ppd->cpspec->ibsymdelta +=
5608 read_7322_creg32_port(ppd,
5610 ppd->cpspec->ibsymsnap;
5611 ppd->cpspec->iblnkerrdelta +=
5612 read_7322_creg32_port(ppd,
5614 ppd->cpspec->iblnkerrsnap;
5616 try_7322_autoneg(ppd);
5618 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5619 ppd->link_speed_active == QIB_IB_SDR) {
5620 qib_autoneg_7322_send(ppd, 1);
5621 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5622 qib_7322_mini_pcs_reset(ppd);
5625 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5626 (ppd->link_speed_active & QIB_IB_DDR)) {
5627 spin_lock_irqsave(&ppd->lflags_lock, flags);
5628 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5630 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5631 ppd->cpspec->autoneg_tries = 0;
5633 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5634 wake_up(&ppd->cpspec->autoneg_wait);
5636 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5643 spin_lock_irqsave(&ppd->lflags_lock, flags);
5644 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5645 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5646 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5649 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5651 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5652 try_7322_ipg(ppd);
5653 if (!ppd->cpspec->recovery_init)
5654 setup_7322_link_recovery(ppd, 0);
5655 ppd->cpspec->qdr_dfe_time = jiffies +
5658 ppd->cpspec->ibmalfusesnap = 0;
5659 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5663 ppd->cpspec->iblnkdownsnap =
5664 read_7322_creg32_port(ppd, crp_iblinkdown);
5665 if (ppd->cpspec->ibdeltainprog) {
5666 ppd->cpspec->ibdeltainprog = 0;
5667 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5668 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5669 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5670 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5673 !ppd->cpspec->ibdeltainprog &&
5674 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5675 ppd->cpspec->ibdeltainprog = 1;
5676 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5678 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5683 qib_setup_7322_setextled(ppd, ibup);
5842 struct qib_pportdata *ppd = NULL;
5847 * severed. We need to hunt for the ppd that corresponds
5855 ppd = dd->pport + pidx;
5856 if (!ppd->cpspec->kpregbase)
5859 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5867 ppd = NULL;
5877 if (!ppd || (mask & all_bits) != all_bits) {
5902 if (ppd) {
5903 sval = ppd->p_sendctrl & ~mask;
5905 ppd->p_sendctrl = sval;
5931 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5936 struct qib_pportdata *ppd;
5943 ppd = qd->ppd;
5950 if (!qib_qsfp_mod_present(ppd)) {
5951 ppd->cpspec->qsfp_data.modpresent = 0;
5953 qib_set_ib_7322_lstate(ppd, 0,
5955 spin_lock_irqsave(&ppd->lflags_lock, flags);
5956 ppd->lflags &= ~QIBL_LINKV;
5957 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5970 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5978 if (!ret && !ppd->dd->cspec->r1) {
5988 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5995 init_txdds_table(ppd, 0);
6000 if (!ppd->cpspec->qsfp_data.modpresent &&
6001 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6002 ppd->cpspec->qsfp_data.modpresent = 1;
6003 qib_set_ib_7322_lstate(ppd, 0,
6005 spin_lock_irqsave(&ppd->lflags_lock, flags);
6006 ppd->lflags |= QIBL_LINKV;
6007 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6016 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6019 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6020 struct qib_devdata *dd = ppd->dd;
6023 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6024 qd->ppd = ppd;
6103 struct qib_pportdata *ppd = &dd->pport[pidx];
6105 if (ppd->port != port || !ppd->link_speed_supported)
6107 ppd->cpspec->no_eep = val;
6109 ppd->cpspec->h1_val = h1;
6111 init_txdds_table(ppd, 1);
6116 qib_set_ib_7322_lstate(ppd, 0,
6201 struct qib_pportdata *ppd = dd->pport + n;
6203 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6207 qib_init_7322_qsfp(ppd);
6229 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6234 if (!ppd->link_speed_supported) {
6237 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6238 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6239 qib_write_kreg(ppd->dd, kr_scratch, 0);
6247 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6249 val |= (u64)(ppd->vls_supported - 1) <<
6251 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6253 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6256 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6260 qib_write_kreg_port(ppd, krp_ncmodectrl,
6267 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6268 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6269 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6270 if (ppd->dd->cspec->r1)
6271 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6283 struct qib_pportdata *ppd;
6298 ppd = &dd->pport[pidx];
6302 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6320 ctxt = ppd->hw_pidx;
6324 qib_write_kreg_port(ppd, regno, val);
6329 qib_write_kreg_port(ppd, regno, val);
6367 struct qib_pportdata *ppd;
6374 ppd = (struct qib_pportdata *)(dd + 1);
6375 dd->pport = ppd;
6376 ppd[0].dd = dd;
6377 ppd[1].dd = dd;
6379 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6381 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6382 ppd[1].cpspec = &ppd[0].cpspec[1];
6383 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6384 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6426 ppd = dd->pport;
6464 struct qib_chippport_specific *cp = ppd->cpspec;
6466 ppd->link_speed_supported = features & PORT_SPD_CAP;
6468 if (!ppd->link_speed_supported) {
6473 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6474 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6475 ppd[0] = ppd[1];
6489 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6490 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6507 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6513 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6514 ppd->link_width_enabled = IB_WIDTH_4X;
6515 ppd->link_speed_enabled = ppd->link_speed_supported;
6520 ppd->link_width_active = IB_WIDTH_4X;
6521 ppd->link_speed_active = QIB_IB_SDR;
6522 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6525 ppd->vls_supported = IB_VL_VL0;
6528 ppd->vls_supported = IB_VL_VL0_1;
6537 ppd->vls_supported = IB_VL_VL0_3;
6541 ppd->vls_supported = IB_VL_VL0_7;
6546 ppd->vls_supported = IB_VL_VL0_3;
6551 ppd->vls_operational = ppd->vls_supported;
6556 if (ppd->dd->cspec->r1)
6568 dd->unit, ppd->port);
6574 ppd->cpspec->no_eep = IS_QMH(dd) ?
6581 write_7322_init_portregs(ppd);
6585 ppd++;
6704 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6708 struct qib_devdata *dd = ppd->dd;
6712 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6724 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6727 qib_write_kreg_port(ppd, krp_psinterval, intv);
6728 qib_write_kreg_port(ppd, krp_psstart, start);
6734 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6736 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6742 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6746 reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6747 qib_dev_porterr(ppd->dd, ppd->port,
6750 reg = qib_read_kreg_port(ppd, krp_sendctrl);
6751 qib_dev_porterr(ppd->dd, ppd->port,
6754 reg = qib_read_kreg_port(ppd, krp_senddmabase);
6755 qib_dev_porterr(ppd->dd, ppd->port,
6758 reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6759 reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6760 reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6761 qib_dev_porterr(ppd->dd, ppd->port,
6766 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6767 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6768 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6769 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6770 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6771 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6773 qib_dev_porterr(ppd->dd, ppd->port,
6776 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6777 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6778 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6780 qib_dev_porterr(ppd->dd, ppd->port,
6784 reg = qib_read_kreg_port(ppd, krp_senddmatail);
6785 qib_dev_porterr(ppd->dd, ppd->port,
6788 reg = qib_read_kreg_port(ppd, krp_senddmahead);
6789 qib_dev_porterr(ppd->dd, ppd->port,
6792 reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6793 qib_dev_porterr(ppd->dd, ppd->port,
6796 reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6797 qib_dev_porterr(ppd->dd, ppd->port,
6800 reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6801 qib_dev_porterr(ppd->dd, ppd->port,
6804 reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6805 qib_dev_porterr(ppd->dd, ppd->port,
6808 reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6809 qib_dev_porterr(ppd->dd, ppd->port,
6812 reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6813 qib_dev_porterr(ppd->dd, ppd->port,
6816 dump_sdma_state(ppd);
6866 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6868 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6871 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6873 struct qib_devdata *dd = ppd->dd;
6878 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6879 qib_sdma_7322_setlengen(ppd);
6880 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6881 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6882 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6883 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6890 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6894 ppd->sdma_state.first_sendbuf = erstbuf;
6895 ppd->sdma_state.last_sendbuf = lastbuf;
6902 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6903 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6904 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6909 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6911 struct qib_devdata *dd = ppd->dd;
6919 use_dmahead = __qib_sdma_running(ppd) &&
6923 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6924 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6926 swhead = ppd->sdma_descq_head;
6927 swtail = ppd->sdma_descq_tail;
6928 cnt = ppd->sdma_descq_cnt;
6954 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6956 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6970 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6973 u8 snd_mult = ppd->delay_mult;
6984 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7343 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7346 struct qib_devdata *dd = ppd->dd;
7357 if (ppd->hw_pidx)
7366 qib_write_kreg(ppd->dd, kr_scratch, 0);
7614 static void find_best_ent(struct qib_pportdata *ppd,
7619 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7639 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7640 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7641 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7651 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7658 idx = ppd->cpspec->no_eep;
7662 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7664 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7668 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7669 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7671 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7673 ppd->dd->unit, ppd->port, idx);
7685 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7692 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7695 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7699 set_txdds(ppd, 0, sdr_dds);
7700 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7701 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7702 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7704 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7706 (ppd->link_speed_active ==
7708 write_tx_serdes_param(ppd, dds);
7713 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7714 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7716 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7806 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7809 struct qib_devdata *dd = ppd->dd;
7813 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7815 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7820 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7822 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7827 ppd->dd->unit, ppd->port);
7831 ppd->dd->unit, ppd->port);
7834 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7837 static int serdes_7322_init(struct qib_pportdata *ppd)
7841 if (ppd->dd->cspec->r1)
7842 ret = serdes_7322_init_old(ppd);
7844 ret = serdes_7322_init_new(ppd);
7848 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7856 init_txdds_table(ppd, 0);
7859 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7865 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7868 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7870 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7873 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7874 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7877 le_val = IS_QME(ppd->dd) ? 0 : 1;
7878 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7881 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7884 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7888 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7889 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7890 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7891 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7894 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7895 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7896 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7897 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7900 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7903 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7904 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7905 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7907 serdes_7322_los_enable(ppd, 1);
7910 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7913 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7916 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7917 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7923 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7924 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7925 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7926 ppd->dd->cspec->r1 ?
7928 ppd->cpspec->qdr_dfe_on = 1;
7931 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7934 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7936 if (!ppd->dd->cspec->r1) {
7937 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7938 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7942 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7947 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7954 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7957 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7964 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7966 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7968 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7970 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7972 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7974 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7976 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7978 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7980 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7982 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7984 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7986 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7988 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7990 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7992 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7994 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7996 if (!ppd->dd->cspec->r1) {
7997 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7998 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8000 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8011 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8014 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8017 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8022 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8023 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8024 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8025 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8028 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8029 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8030 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8031 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8034 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8037 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8038 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8039 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8042 serdes_7322_los_enable(ppd, 1);
8044 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8048 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8050 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8053 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8058 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8068 IBSD(ppd->hw_pidx), chan_done);
8071 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8076 IBSD(ppd->hw_pidx), chan);
8081 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8086 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8087 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8089 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8091 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8094 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8096 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8097 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8099 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8102 ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8104 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8106 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8108 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8114 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8115 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8116 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8117 ppd->dd->cspec->r1 ?
8119 ppd->cpspec->qdr_dfe_on = 1;
8121 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8123 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8126 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8128 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8130 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8132 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8138 init_txdds_table(ppd, 0);
8145 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8147 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8151 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8155 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8158 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8163 static void clock_man(struct qib_pportdata *ppd, int chan)
8165 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8167 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8169 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8171 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8180 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8185 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8206 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8214 static void adj_tx_serdes(struct qib_pportdata *ppd)
8219 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8220 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8221 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8223 write_tx_serdes_param(ppd, dds);
8227 static void force_h1(struct qib_pportdata *ppd)
8231 ppd->cpspec->qdr_reforce = 0;
8232 if (!ppd->dd->cspec->r1)
8236 set_man_mode_h1(ppd, chan, 1, 0);
8237 set_man_code(ppd, chan, ppd->cpspec->h1_val);
8238 clock_man(ppd, chan);
8239 set_man_mode_h1(ppd, chan, 0, 0);
8428 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8431 struct qib_devdata *dd = ppd->dd;
8433 if (!ppd->dd->cspec->r1)
8437 ppd->cpspec->recovery_init = 1;
8440 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8462 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8464 struct qib_devdata *dd = ppd->dd;
8480 ppd->dd->cspec->stay_in_freeze = 1;
8481 qib_7322_set_intr_state(ppd->dd, 0);
8487 qib_write_kreg(ppd->dd, kr_hwerrclear,
8494 if (ppd->link_speed_supported) {
8495 ppd->cpspec->ibcctrl_a &=
8497 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8498 ppd->cpspec->ibcctrl_a);
8500 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8501 qib_set_ib_7322_lstate(ppd, 0,