Lines Matching refs:bp
261 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
271 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
273 if (bp->flags & BNXT_FLAG_CHIP_P5)
279 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
281 if (bp->flags & BNXT_FLAG_CHIP_P5)
287 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
289 if (bp->flags & BNXT_FLAG_CHIP_P5)
290 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
296 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
298 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
301 if (BNXT_PF(bp))
302 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
304 schedule_delayed_work(&bp->fw_reset_task, delay);
307 static void __bnxt_queue_sp_work(struct bnxt *bp)
309 if (BNXT_PF(bp))
310 queue_work(bnxt_pf_wq, &bp->sp_task);
312 schedule_work(&bp->sp_task);
315 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
317 set_bit(event, &bp->sp_event);
318 __bnxt_queue_sp_work(bp);
321 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
325 if (bp->flags & BNXT_FLAG_CHIP_P5)
326 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
328 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
329 __bnxt_queue_sp_work(bp);
334 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
342 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
347 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
382 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
385 bnxt_db_write(bp, &txr->tx_db, prod);
391 struct bnxt *bp = netdev_priv(dev);
400 struct pci_dev *pdev = bp->pdev;
406 if (unlikely(i >= bp->tx_nr_rings)) {
413 txr = &bp->tx_ring[bp->tx_ring_map[i]];
416 free_size = bnxt_tx_avail(bp, txr);
420 netif_warn(bp, tx_err, dev,
422 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
423 bp->tx_wake_thresh))
455 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
466 atomic_inc(&bp->ptp_cfg->tx_avail);
474 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
643 bnxt_txr_db_kick(bp, txr, prod);
649 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
651 bnxt_txr_db_kick(bp, txr, prod);
653 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
654 bp->tx_wake_thresh);
660 atomic_inc(&bp->ptp_cfg->tx_avail);
684 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
690 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
693 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
695 struct pci_dev *pdev = bp->pdev;
711 bnxt_sched_reset_txr(bp, txr, i);
736 if (bp->flags & BNXT_FLAG_CHIP_P5) {
738 if (!bnxt_get_tx_ts_p5(bp, skb))
741 atomic_inc(&bp->ptp_cfg->tx_avail);
755 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
759 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
780 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
784 struct pci_dev *pdev = bp->pdev;
787 data = napi_alloc_frag(bp->rx_buf_size);
789 data = netdev_alloc_frag(bp->rx_buf_size);
793 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
794 bp->rx_buf_use_size, bp->rx_dir,
804 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
811 if (BNXT_RX_PAGE_MODE(bp)) {
814 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
819 mapping += bp->rx_dma_offset;
821 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
823 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
829 rx_buf->data_ptr = data + bp->rx_offset;
867 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
879 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
899 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
911 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
924 struct bnxt *bp = bnapi->bp;
931 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
942 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
944 agg = bnxt_get_agg(bp, cpr, idx, start + i);
977 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
989 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
994 dma_addr -= bp->rx_dma_offset;
995 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
996 bp->rx_dir);
997 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1003 skb_reserve(skb, bp->rx_offset);
1009 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1023 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1028 dma_addr -= bp->rx_dma_offset;
1029 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1030 bp->rx_dir);
1033 payload = eth_get_headlen(bp->dev, data_ptr, len);
1056 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1066 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1072 skb = napi_build_skb(data, bp->rx_buf_size);
1073 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1074 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1080 skb_reserve(skb, bp->rx_offset);
1085 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1092 struct pci_dev *pdev = bp->pdev;
1098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1110 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1112 agg = bnxt_get_agg(bp, cpr, idx, i);
1134 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1147 bp->rx_dir);
1156 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1164 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1178 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1189 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1199 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1216 struct bnxt *bp = bnapi->bp;
1217 struct pci_dev *pdev = bp->pdev;
1224 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1225 bp->rx_dir);
1230 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1231 bp->rx_dir);
1237 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1253 if (bp->flags & BNXT_FLAG_CHIP_P5)
1260 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1294 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1304 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1318 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1321 bnxt_sched_reset_rxr(bp, rxr);
1359 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1549 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1567 if (bp->flags & BNXT_FLAG_CHIP_P5)
1571 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1581 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1583 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1586 return dev ? dev : bp->dev;
1589 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1608 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1615 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1621 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1629 gro = !!(bp->flags & BNXT_FLAG_GRO);
1636 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1653 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1658 if (len <= bp->rx_copy_thresh) {
1669 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1677 tpa_info->data_ptr = new_data + bp->rx_offset;
1680 skb = napi_build_skb(data, bp->rx_buf_size);
1681 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1682 bp->rx_buf_use_size, bp->rx_dir,
1691 skb_reserve(skb, bp->rx_offset);
1696 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1705 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1732 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1737 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1749 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1754 if (skb->dev != bp->dev) {
1756 bnxt_vf_rep_rx(bp, skb);
1763 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1770 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1785 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1790 struct net_device *dev = bp->dev;
1813 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1832 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1839 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1848 bnxt_deliver_skb(bp, bnapi, skb);
1857 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1861 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1863 bnxt_sched_reset_rxr(bp, rxr);
1877 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1897 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1898 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1899 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1901 bnxt_sched_reset_rxr(bp, rxr);
1911 if (bnxt_xdp_attached(bp, rxr)) {
1912 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
1914 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1927 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
1933 if (len <= bp->rx_copy_thresh) {
1955 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1966 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1973 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1995 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
2026 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2027 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2030 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2031 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2042 bnxt_deliver_skb(bp, bnapi, skb);
2062 static int bnxt_force_rx_discard(struct bnxt *bp,
2100 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2106 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2108 struct bnxt_fw_health *fw_health = bp->fw_health;
2116 pci_read_config_dword(bp->pdev, reg_off, &val);
2122 val = readl(bp->bar0 + reg_off);
2125 val = readl(bp->bar1 + reg_off);
2133 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2137 for (i = 0; i < bp->rx_nr_rings; i++) {
2138 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2141 grp_info = &bp->grp_info[grp_idx];
2148 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2154 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2158 netdev_warn(bp->dev, "Pause Storm detected!\n");
2161 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2164 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2192 static int bnxt_async_event_process(struct bnxt *bp,
2199 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2205 struct bnxt_link_info *link_info = &bp->link_info;
2207 if (BNXT_VF(bp))
2217 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2220 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2225 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2228 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2231 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2236 if (BNXT_VF(bp))
2239 if (bp->pf.port_id != port_id)
2242 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2246 if (BNXT_PF(bp))
2248 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2253 if (!bp->fw_health)
2256 bp->fw_reset_timestamp = jiffies;
2257 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2258 if (!bp->fw_reset_min_dsecs)
2259 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2260 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2261 if (!bp->fw_reset_max_dsecs)
2262 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2264 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2267 bp->fw_health->fatalities++;
2268 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2272 bp->fw_health->survivals++;
2273 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2275 netif_warn(bp, hw, bp->dev,
2278 bp->fw_reset_min_dsecs * 100,
2279 bp->fw_reset_max_dsecs * 100);
2280 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2284 struct bnxt_fw_health *fw_health = bp->fw_health;
2293 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2299 bp->current_interval * 10);
2303 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2305 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2306 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2309 netif_info(bp, drv, bp->dev,
2323 netif_notice(bp, hw, bp->dev,
2331 if (bp->flags & BNXT_FLAG_CHIP_P5)
2334 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2339 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2341 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2345 rxr = bp->bnapi[grp_idx]->rx_ring;
2346 bnxt_sched_reset_rxr(bp, rxr);
2350 struct bnxt_fw_health *fw_health = bp->fw_health;
2352 netif_notice(bp, hw, bp->dev,
2358 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2364 bnxt_ptp_pps_event(bp, data1, data2);
2368 bnxt_event_error_report(bp, data1, data2);
2374 if (BNXT_PTP_USE_RTC(bp)) {
2375 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2382 bnxt_ptp_update_current_time(bp);
2395 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2401 __bnxt_queue_sp_work(bp);
2406 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2416 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2422 if ((vf_id < bp->pf.first_vf_id) ||
2423 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2424 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2429 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2430 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2434 bnxt_async_event_process(bp,
2448 struct bnxt *bp = bnapi->bp;
2458 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2472 struct bnxt *bp = bnapi->bp;
2479 if (!bnxt_has_work(bp, cpr)) {
2480 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2490 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2497 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2526 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2535 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2537 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2556 bnxt_hwrm_handler(bp, txcmp);
2576 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2585 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2589 bnapi->tx_int(bp, bnapi, budget);
2594 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2599 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2604 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2610 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2616 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2618 __bnxt_poll_work_done(bp, bnapi, budget);
2625 struct bnxt *bp = bnapi->bp;
2662 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2671 bnxt_hwrm_handler(bp, txcmp);
2673 netdev_err(bp->dev,
2684 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2687 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2691 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2701 struct bnxt *bp = bnapi->bp;
2705 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2710 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2718 if (!bnxt_has_work(bp, cpr)) {
2724 if (bp->flags & BNXT_FLAG_DIM) {
2736 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2745 work_done += __bnxt_poll_work(bp, cpr2,
2753 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2765 bnxt_writeq(bp, db->db_key64 | dbr_type |
2770 __bnxt_poll_work_done(bp, bnapi, budget);
2779 struct bnxt *bp = bnapi->bp;
2784 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2790 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2800 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2823 work_done += __bnxt_poll_work(bp, cpr2,
2827 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2831 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
2838 if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2850 static void bnxt_free_tx_skbs(struct bnxt *bp)
2853 struct pci_dev *pdev = bp->pdev;
2855 if (!bp->tx_ring)
2858 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2859 for (i = 0; i < bp->tx_nr_rings; i++) {
2860 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2871 if (i < bp->tx_nr_rings_xdp &&
2906 int ring_idx = j & bp->tx_ring_mask;
2917 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2921 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2923 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2924 struct pci_dev *pdev = bp->pdev;
2928 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2929 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2933 for (i = 0; i < bp->max_tpa; i++) {
2941 bp->rx_buf_use_size, bp->rx_dir,
2962 if (BNXT_RX_PAGE_MODE(bp)) {
2966 bp->rx_buf_use_size, bp->rx_dir,
2995 static void bnxt_free_rx_skbs(struct bnxt *bp)
2999 if (!bp->rx_ring)
3002 for (i = 0; i < bp->rx_nr_rings; i++)
3003 bnxt_free_one_rx_ring_skbs(bp, i);
3006 static void bnxt_free_skbs(struct bnxt *bp)
3008 bnxt_free_tx_skbs(bp);
3009 bnxt_free_rx_skbs(bp);
3029 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3031 struct pci_dev *pdev = bp->pdev;
3062 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3064 struct pci_dev *pdev = bp->pdev;
3115 static void bnxt_free_tpa_info(struct bnxt *bp)
3119 for (i = 0; i < bp->rx_nr_rings; i++) {
3120 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3125 for (j = 0; j < bp->max_tpa; j++) {
3135 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3139 bp->max_tpa = MAX_TPA;
3140 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3141 if (!bp->max_tpa_v2)
3143 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3146 for (i = 0; i < bp->rx_nr_rings; i++) {
3147 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3150 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3155 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3157 for (j = 0; j < bp->max_tpa; j++) {
3171 static void bnxt_free_rx_rings(struct bnxt *bp)
3175 if (!bp->rx_ring)
3178 bnxt_free_tpa_info(bp);
3179 for (i = 0; i < bp->rx_nr_rings; i++) {
3180 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3196 bnxt_free_ring(bp, &ring->ring_mem);
3199 bnxt_free_ring(bp, &ring->ring_mem);
3203 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3208 pp.pool_size = bp->rx_agg_ring_size;
3209 if (BNXT_RX_PAGE_MODE(bp))
3210 pp.pool_size += bp->rx_ring_size;
3211 pp.nid = dev_to_node(&bp->pdev->dev);
3213 pp.dev = &bp->pdev->dev;
3214 pp.dma_dir = bp->rx_dir;
3230 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3234 if (!bp->rx_ring)
3237 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3240 for (i = 0; i < bp->rx_nr_rings; i++) {
3241 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3246 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3250 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3262 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3271 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3276 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3283 if (bp->flags & BNXT_FLAG_TPA)
3284 rc = bnxt_alloc_tpa_info(bp);
3288 static void bnxt_free_tx_rings(struct bnxt *bp)
3291 struct pci_dev *pdev = bp->pdev;
3293 if (!bp->tx_ring)
3296 for (i = 0; i < bp->tx_nr_rings; i++) {
3297 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3301 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3308 bnxt_free_ring(bp, &ring->ring_mem);
3312 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3315 struct pci_dev *pdev = bp->pdev;
3317 bp->tx_push_size = 0;
3318 if (bp->tx_push_thresh) {
3322 bp->tx_push_thresh);
3326 bp->tx_push_thresh = 0;
3329 bp->tx_push_size = push_size;
3332 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3333 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3339 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3344 if (bp->tx_push_size) {
3351 bp->tx_push_size,
3362 qidx = bp->tc_to_qidx[j];
3363 ring->queue_id = bp->q_info[qidx].queue_id;
3365 if (i < bp->tx_nr_rings_xdp)
3367 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3397 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3401 if (!bp->bnapi)
3403 for (i = 0; i < bp->cp_nr_rings; i++) {
3404 struct bnxt_napi *bnapi = bp->bnapi[i];
3412 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3414 int i, n = bp->cp_nr_pages;
3416 for (i = 0; i < bp->cp_nr_rings; i++) {
3417 struct bnxt_napi *bnapi = bp->bnapi[i];
3429 static void bnxt_free_cp_rings(struct bnxt *bp)
3433 if (!bp->bnapi)
3436 for (i = 0; i < bp->cp_nr_rings; i++) {
3437 struct bnxt_napi *bnapi = bp->bnapi[i];
3448 bnxt_free_ring(bp, &ring->ring_mem);
3455 bnxt_free_ring(bp, &ring->ring_mem);
3464 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3475 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3483 rmem->nr_pages = bp->cp_nr_pages;
3488 rc = bnxt_alloc_ring(bp, rmem);
3490 bnxt_free_ring(bp, rmem);
3498 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3500 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3503 ulp_msix = bnxt_get_ulp_msix_num(bp);
3504 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3505 for (i = 0; i < bp->cp_nr_rings; i++) {
3506 struct bnxt_napi *bnapi = bp->bnapi[i];
3517 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3526 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3529 if (i < bp->rx_nr_rings) {
3531 bnxt_alloc_cp_sub_ring(bp);
3538 if ((sh && i < bp->tx_nr_rings) ||
3539 (!sh && i >= bp->rx_nr_rings)) {
3541 bnxt_alloc_cp_sub_ring(bp);
3552 static void bnxt_init_ring_struct(struct bnxt *bp)
3556 for (i = 0; i < bp->cp_nr_rings; i++) {
3557 struct bnxt_napi *bnapi = bp->bnapi[i];
3570 rmem->nr_pages = bp->cp_nr_pages;
3582 rmem->nr_pages = bp->rx_nr_pages;
3586 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3591 rmem->nr_pages = bp->rx_agg_nr_pages;
3595 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3605 rmem->nr_pages = bp->tx_nr_pages;
3609 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3636 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3638 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3639 struct net_device *dev = bp->dev;
3644 for (i = 0; i < bp->rx_ring_size; i++) {
3645 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3647 ring_nr, i, bp->rx_ring_size);
3654 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3658 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3659 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3661 ring_nr, i, bp->rx_ring_size);
3672 for (i = 0; i < bp->max_tpa; i++) {
3673 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3678 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3685 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3691 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3697 rxr = &bp->rx_ring[ring_nr];
3701 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3702 bpf_prog_add(bp->xdp_prog, 1);
3703 rxr->xdp_prog = bp->xdp_prog;
3710 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3717 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3720 static void bnxt_init_cp_rings(struct bnxt *bp)
3724 for (i = 0; i < bp->cp_nr_rings; i++) {
3725 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3729 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3730 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3739 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3740 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3745 static int bnxt_init_rx_rings(struct bnxt *bp)
3749 if (BNXT_RX_PAGE_MODE(bp)) {
3750 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3751 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3753 bp->rx_offset = BNXT_RX_OFFSET;
3754 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3757 for (i = 0; i < bp->rx_nr_rings; i++) {
3758 rc = bnxt_init_one_rx_ring(bp, i);
3766 static int bnxt_init_tx_rings(struct bnxt *bp)
3770 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3773 for (i = 0; i < bp->tx_nr_rings; i++) {
3774 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3783 static void bnxt_free_ring_grps(struct bnxt *bp)
3785 kfree(bp->grp_info);
3786 bp->grp_info = NULL;
3789 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3794 bp->grp_info = kcalloc(bp->cp_nr_rings,
3797 if (!bp->grp_info)
3800 for (i = 0; i < bp->cp_nr_rings; i++) {
3802 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3803 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3804 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3805 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3806 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3811 static void bnxt_free_vnics(struct bnxt *bp)
3813 kfree(bp->vnic_info);
3814 bp->vnic_info = NULL;
3815 bp->nr_vnics = 0;
3818 static int bnxt_alloc_vnics(struct bnxt *bp)
3823 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3824 num_vnics += bp->rx_nr_rings;
3827 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3830 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3832 if (!bp->vnic_info)
3835 bp->nr_vnics = num_vnics;
3839 static void bnxt_init_vnics(struct bnxt *bp)
3843 for (i = 0; i < bp->nr_vnics; i++) {
3844 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3853 if (bp->vnic_info[i].rss_hash_key) {
3859 bp->vnic_info[0].rss_hash_key,
3882 void bnxt_set_tpa_flags(struct bnxt *bp)
3884 bp->flags &= ~BNXT_FLAG_TPA;
3885 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3887 if (bp->dev->features & NETIF_F_LRO)
3888 bp->flags |= BNXT_FLAG_LRO;
3889 else if (bp->dev->features & NETIF_F_GRO_HW)
3890 bp->flags |= BNXT_FLAG_GRO;
3893 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3896 void bnxt_set_ring_params(struct bnxt *bp)
3902 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3907 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3908 ring_size = bp->rx_ring_size;
3909 bp->rx_agg_ring_size = 0;
3910 bp->rx_agg_nr_pages = 0;
3912 if (bp->flags & BNXT_FLAG_TPA)
3915 bp->flags &= ~BNXT_FLAG_JUMBO;
3916 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3919 bp->flags |= BNXT_FLAG_JUMBO;
3920 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3927 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3928 bp->rx_ring_size, ring_size);
3929 bp->rx_ring_size = ring_size;
3933 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3935 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3938 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3940 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3943 bp->rx_agg_ring_size = agg_ring_size;
3944 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3946 if (BNXT_RX_PAGE_MODE(bp)) {
3958 bp->rx_buf_use_size = rx_size;
3959 bp->rx_buf_size = rx_space;
3961 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3962 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3964 ring_size = bp->tx_ring_size;
3965 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3966 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3968 max_rx_cmpl = bp->rx_ring_size;
3973 if (bp->flags & BNXT_FLAG_TPA)
3974 max_rx_cmpl += bp->max_tpa;
3976 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3977 bp->cp_ring_size = ring_size;
3979 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3980 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3981 bp->cp_nr_pages = MAX_CP_PAGES;
3982 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3983 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3984 ring_size, bp->cp_ring_size);
3986 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3987 bp->cp_ring_mask = bp->cp_bit - 1;
3993 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3995 struct net_device *dev = bp->dev;
3998 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3999 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4001 if (bp->xdp_prog->aux->xdp_has_frags)
4002 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4005 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4007 bp->flags |= BNXT_FLAG_JUMBO;
4008 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4010 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4011 bp->rx_skb_func = bnxt_rx_page_skb;
4013 bp->rx_dir = DMA_BIDIRECTIONAL;
4017 dev->max_mtu = bp->max_mtu;
4018 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4019 bp->rx_dir = DMA_FROM_DEVICE;
4020 bp->rx_skb_func = bnxt_rx_skb;
4025 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4029 struct pci_dev *pdev = bp->pdev;
4031 if (!bp->vnic_info)
4034 for (i = 0; i < bp->nr_vnics; i++) {
4035 vnic = &bp->vnic_info[i];
4061 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4065 struct pci_dev *pdev = bp->pdev;
4068 for (i = 0; i < bp->nr_vnics; i++) {
4069 vnic = &bp->vnic_info[i];
4096 if (bp->flags & BNXT_FLAG_CHIP_P5)
4100 max_rings = bp->rx_nr_rings;
4110 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4116 if (bp->flags & BNXT_FLAG_CHIP_P5)
4138 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4142 dma_pool_destroy(bp->hwrm_dma_pool);
4143 bp->hwrm_dma_pool = NULL;
4146 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4151 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4153 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4156 if (!bp->hwrm_dma_pool)
4159 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4164 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4171 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4177 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4180 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4197 bnxt_free_stats_mem(bp, stats);
4217 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4225 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4226 !(bp->flags & BNXT_FLAG_CHIP_P5))
4229 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4236 resp = hwrm_req_hold(bp, req);
4237 rc = hwrm_req_send(bp, req);
4242 hwrm_req_drop(bp, req);
4246 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4247 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4249 static void bnxt_init_stats(struct bnxt *bp)
4251 struct bnxt_napi *bnapi = bp->bnapi[0];
4262 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4264 if (bp->flags & BNXT_FLAG_CHIP_P5)
4270 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4271 stats = &bp->port_stats;
4280 rc = bnxt_hwrm_port_qstats(bp, flags);
4289 bnxt_hwrm_port_qstats(bp, 0);
4292 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4293 stats = &bp->rx_port_stats_ext;
4297 stats = &bp->tx_port_stats_ext;
4303 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4315 bnxt_hwrm_port_qstats_ext(bp, 0);
4320 static void bnxt_free_port_stats(struct bnxt *bp)
4322 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4323 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4325 bnxt_free_stats_mem(bp, &bp->port_stats);
4326 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4327 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4330 static void bnxt_free_ring_stats(struct bnxt *bp)
4334 if (!bp->bnapi)
4337 for (i = 0; i < bp->cp_nr_rings; i++) {
4338 struct bnxt_napi *bnapi = bp->bnapi[i];
4341 bnxt_free_stats_mem(bp, &cpr->stats);
4345 static int bnxt_alloc_stats(struct bnxt *bp)
4350 size = bp->hw_ring_stats_size;
4352 for (i = 0; i < bp->cp_nr_rings; i++) {
4353 struct bnxt_napi *bnapi = bp->bnapi[i];
4357 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4364 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4367 if (bp->port_stats.hw_stats)
4370 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4371 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4375 bp->flags |= BNXT_FLAG_PORT_STATS;
4379 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4380 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4383 if (bp->rx_port_stats_ext.hw_stats)
4386 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4387 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4393 if (bp->tx_port_stats_ext.hw_stats)
4396 if (bp->hwrm_spec_code >= 0x10902 ||
4397 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4398 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4399 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4404 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4408 static void bnxt_clear_ring_indices(struct bnxt *bp)
4412 if (!bp->bnapi)
4415 for (i = 0; i < bp->cp_nr_rings; i++) {
4416 struct bnxt_napi *bnapi = bp->bnapi[i];
4443 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4456 head = &bp->ntp_fltr_hash_tbl[i];
4463 bitmap_free(bp->ntp_fltr_bmap);
4464 bp->ntp_fltr_bmap = NULL;
4466 bp->ntp_fltr_count = 0;
4470 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4475 if (!(bp->flags & BNXT_FLAG_RFS))
4479 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4481 bp->ntp_fltr_count = 0;
4482 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
4484 if (!bp->ntp_fltr_bmap)
4493 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4495 bnxt_free_vnic_attributes(bp);
4496 bnxt_free_tx_rings(bp);
4497 bnxt_free_rx_rings(bp);
4498 bnxt_free_cp_rings(bp);
4499 bnxt_free_all_cp_arrays(bp);
4500 bnxt_free_ntp_fltrs(bp, irq_re_init);
4502 bnxt_free_ring_stats(bp);
4503 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4504 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4505 bnxt_free_port_stats(bp);
4506 bnxt_free_ring_grps(bp);
4507 bnxt_free_vnics(bp);
4508 kfree(bp->tx_ring_map);
4509 bp->tx_ring_map = NULL;
4510 kfree(bp->tx_ring);
4511 bp->tx_ring = NULL;
4512 kfree(bp->rx_ring);
4513 bp->rx_ring = NULL;
4514 kfree(bp->bnapi);
4515 bp->bnapi = NULL;
4517 bnxt_clear_ring_indices(bp);
4521 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4531 bp->cp_nr_rings);
4533 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4537 bp->bnapi = bnapi;
4539 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4540 bp->bnapi[i] = bnapi;
4541 bp->bnapi[i]->index = i;
4542 bp->bnapi[i]->bp = bp;
4543 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4545 &bp->bnapi[i]->cp_ring;
4552 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4555 if (!bp->rx_ring)
4558 for (i = 0; i < bp->rx_nr_rings; i++) {
4559 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4561 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4567 rxr->bnapi = bp->bnapi[i];
4568 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4571 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4574 if (!bp->tx_ring)
4577 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4580 if (!bp->tx_ring_map)
4583 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4586 j = bp->rx_nr_rings;
4588 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4589 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4591 if (bp->flags & BNXT_FLAG_CHIP_P5)
4594 txr->bnapi = bp->bnapi[j];
4595 bp->bnapi[j]->tx_ring = txr;
4596 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4597 if (i >= bp->tx_nr_rings_xdp) {
4598 txr->txq_index = i - bp->tx_nr_rings_xdp;
4599 bp->bnapi[j]->tx_int = bnxt_tx_int;
4601 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4602 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4606 rc = bnxt_alloc_stats(bp);
4609 bnxt_init_stats(bp);
4611 rc = bnxt_alloc_ntp_fltrs(bp);
4615 rc = bnxt_alloc_vnics(bp);
4620 rc = bnxt_alloc_all_cp_arrays(bp);
4624 bnxt_init_ring_struct(bp);
4626 rc = bnxt_alloc_rx_rings(bp);
4630 rc = bnxt_alloc_tx_rings(bp);
4634 rc = bnxt_alloc_cp_rings(bp);
4638 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4640 rc = bnxt_alloc_vnic_attributes(bp);
4646 bnxt_free_mem(bp, true);
4650 static void bnxt_disable_int(struct bnxt *bp)
4654 if (!bp->bnapi)
4657 for (i = 0; i < bp->cp_nr_rings; i++) {
4658 struct bnxt_napi *bnapi = bp->bnapi[i];
4663 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4667 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4669 struct bnxt_napi *bnapi = bp->bnapi[n];
4676 static void bnxt_disable_int_sync(struct bnxt *bp)
4680 if (!bp->irq_tbl)
4683 atomic_inc(&bp->intr_sem);
4685 bnxt_disable_int(bp);
4686 for (i = 0; i < bp->cp_nr_rings; i++) {
4687 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4689 synchronize_irq(bp->irq_tbl[map_idx].vector);
4693 static void bnxt_enable_int(struct bnxt *bp)
4697 atomic_set(&bp->intr_sem, 0);
4698 for (i = 0; i < bp->cp_nr_rings; i++) {
4699 struct bnxt_napi *bnapi = bp->bnapi[i];
4702 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4706 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4716 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4726 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4728 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4739 if (BNXT_PF(bp)) {
4760 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4769 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4772 !bp->ptp_cfg)
4789 resp = hwrm_req_hold(bp, req);
4790 rc = hwrm_req_send(bp, req);
4792 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4795 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4797 hwrm_req_drop(bp, req);
4801 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4806 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4809 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4812 return hwrm_req_send(bp, req);
4815 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4821 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4824 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4827 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4835 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4836 bp->vxlan_port = 0;
4837 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4840 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4841 bp->nge_port = 0;
4842 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4848 rc = hwrm_req_send(bp, req);
4850 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4855 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4862 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4869 resp = hwrm_req_hold(bp, req);
4870 rc = hwrm_req_send(bp, req);
4872 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4879 bp->vxlan_port = port;
4880 bp->vxlan_fw_dst_port_id =
4884 bp->nge_port = port;
4885 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4892 hwrm_req_drop(bp, req);
4896 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4899 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4902 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4912 return hwrm_req_send_silent(bp, req);
4916 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4922 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4927 return hwrm_req_send(bp, req);
4949 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4959 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4963 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4965 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4969 vnic = &bp->vnic_info[fltr->rxq + 1];
5011 resp = hwrm_req_hold(bp, req);
5012 rc = hwrm_req_send(bp, req);
5015 hwrm_req_drop(bp, req);
5020 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5027 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5032 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5035 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5048 resp = hwrm_req_hold(bp, req);
5049 rc = hwrm_req_send(bp, req);
5051 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5053 hwrm_req_drop(bp, req);
5057 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5064 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5067 hwrm_req_hold(bp, req);
5069 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5074 rc = hwrm_req_send(bp, req);
5078 hwrm_req_drop(bp, req);
5082 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5084 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5092 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5097 u16 mss = bp->dev->mtu - 40;
5128 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5130 max_aggs = bp->max_tpa;
5141 return hwrm_req_send(bp, req);
5144 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5148 grp_info = &bp->grp_info[ring->grp_idx];
5152 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5154 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5161 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5165 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5167 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5174 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5178 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5182 if (bp->flags & BNXT_FLAG_CHIP_P5)
5187 bp->rss_indir_tbl_entries = entries;
5188 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5190 if (!bp->rss_indir_tbl)
5195 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5199 if (!bp->rx_nr_rings)
5202 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5203 max_rings = bp->rx_nr_rings - 1;
5205 max_rings = bp->rx_nr_rings;
5207 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5210 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5212 pad = bp->rss_indir_tbl_entries - max_entries;
5214 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5217 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5221 if (!bp->rss_indir_tbl)
5224 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5226 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5230 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5232 if (bp->flags & BNXT_FLAG_CHIP_P5)
5234 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5239 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5247 j = bp->rss_indir_tbl[i];
5252 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5259 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5264 j = bp->rss_indir_tbl[i];
5265 rxr = &bp->rx_ring[j];
5269 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5275 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5278 if (bp->flags & BNXT_FLAG_CHIP_P5)
5279 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5281 bnxt_fill_hw_rss_tbl(bp, vnic);
5283 if (bp->rss_hash_delta) {
5284 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5285 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5290 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5297 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5299 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5303 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5307 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5312 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5314 return hwrm_req_send(bp, req);
5317 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5325 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5331 return hwrm_req_send(bp, req);
5333 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5335 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5337 hwrm_req_hold(bp, req);
5342 rc = hwrm_req_send(bp, req);
5348 hwrm_req_drop(bp, req);
5352 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
5354 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5358 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
5364 resp = hwrm_req_hold(bp, req);
5365 if (!hwrm_req_send(bp, req)) {
5366 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
5367 bp->rss_hash_delta = 0;
5369 hwrm_req_drop(bp, req);
5372 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5374 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5378 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5385 if (BNXT_RX_PAGE_MODE(bp)) {
5386 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
5392 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5393 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5396 return hwrm_req_send(bp, req);
5399 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5404 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5408 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5410 hwrm_req_send(bp, req);
5411 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5414 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5418 for (i = 0; i < bp->nr_vnics; i++) {
5419 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5423 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5426 bp->rsscos_nr_ctxs = 0;
5429 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5435 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5439 resp = hwrm_req_hold(bp, req);
5440 rc = hwrm_req_send(bp, req);
5442 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5444 hwrm_req_drop(bp, req);
5449 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5451 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5456 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5458 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5464 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5468 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5469 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5474 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5488 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5496 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5508 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5509 ring = bp->rx_nr_rings - 1;
5511 grp_idx = bp->rx_ring[ring].bnapi->index;
5512 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5515 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5519 if (BNXT_VF(bp))
5520 def_vlan = bp->vf.vlan;
5522 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5524 if (!vnic_id && bnxt_ulp_registered(bp->edev))
5525 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5527 return hwrm_req_send(bp, req);
5530 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5532 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5535 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5539 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5541 hwrm_req_send(bp, req);
5542 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5546 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5550 for (i = 0; i < bp->nr_vnics; i++)
5551 bnxt_hwrm_vnic_free_one(bp, i);
5554 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5559 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5564 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5568 if (bp->flags & BNXT_FLAG_CHIP_P5)
5573 grp_idx = bp->rx_ring[i].bnapi->index;
5574 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5575 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5579 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5588 resp = hwrm_req_hold(bp, req);
5589 rc = hwrm_req_send(bp, req);
5592 hwrm_req_drop(bp, req);
5596 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5602 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5603 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5604 if (bp->hwrm_spec_code < 0x10600)
5607 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5611 resp = hwrm_req_hold(bp, req);
5612 rc = hwrm_req_send(bp, req);
5616 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5618 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5621 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5627 (BNXT_CHIP_P5_THOR(bp) &&
5628 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5629 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5631 bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
5632 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5633 if (bp->max_tpa_v2) {
5634 if (BNXT_CHIP_P5_THOR(bp))
5635 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5637 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5640 hwrm_req_drop(bp, req);
5644 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5651 if (bp->flags & BNXT_FLAG_CHIP_P5)
5654 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5658 resp = hwrm_req_hold(bp, req);
5659 for (i = 0; i < bp->rx_nr_rings; i++) {
5660 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5662 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5663 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5664 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5665 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5667 rc = hwrm_req_send(bp, req);
5672 bp->grp_info[grp_idx].fw_grp_id =
5675 hwrm_req_drop(bp, req);
5679 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5684 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5687 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5690 hwrm_req_hold(bp, req);
5691 for (i = 0; i < bp->cp_nr_rings; i++) {
5692 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5695 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5697 hwrm_req_send(bp, req);
5698 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5700 hwrm_req_drop(bp, req);
5703 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5714 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5739 grp_info = &bp->grp_info[ring->grp_idx];
5740 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5741 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5748 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5749 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753 grp_info = &bp->grp_info[ring->grp_idx];
5754 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5764 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5767 grp_info = &bp->grp_info[ring->grp_idx];
5777 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5781 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5782 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5784 grp_info = &bp->grp_info[map_index];
5789 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5795 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5796 if (bp->flags & BNXT_FLAG_USING_MSIX)
5800 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5805 resp = hwrm_req_hold(bp, req);
5806 rc = hwrm_req_send(bp, req);
5809 hwrm_req_drop(bp, req);
5813 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5821 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5825 if (BNXT_PF(bp)) {
5828 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5835 return hwrm_req_send(bp, req);
5839 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5846 return hwrm_req_send(bp, req);
5850 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5853 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5854 if (BNXT_PF(bp))
5855 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5857 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5875 db->doorbell = bp->bar1 + map_idx * 0x80;
5891 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5893 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5897 if (bp->flags & BNXT_FLAG_CHIP_P5)
5901 for (i = 0; i < bp->cp_nr_rings; i++) {
5902 struct bnxt_napi *bnapi = bp->bnapi[i];
5908 vector = bp->irq_tbl[map_idx].vector;
5910 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5915 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5916 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5918 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5921 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5923 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5928 for (i = 0; i < bp->tx_nr_rings; i++) {
5929 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5933 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5943 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5946 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5948 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5952 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5955 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5959 for (i = 0; i < bp->rx_nr_rings; i++) {
5960 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5965 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5968 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5971 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5972 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5973 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5981 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5984 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5986 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5992 for (i = 0; i < bp->rx_nr_rings; i++) {
5993 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5997 u32 map_idx = grp_idx + bp->rx_nr_rings;
5999 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6003 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6005 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6006 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6007 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6014 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6023 if (BNXT_NO_FW_ACCESS(bp))
6026 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6034 resp = hwrm_req_hold(bp, req);
6035 rc = hwrm_req_send(bp, req);
6037 hwrm_req_drop(bp, req);
6040 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6047 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6052 if (!bp->bnapi)
6055 for (i = 0; i < bp->tx_nr_rings; i++) {
6056 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6060 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6062 hwrm_ring_free_send_msg(bp, ring,
6070 for (i = 0; i < bp->rx_nr_rings; i++) {
6071 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6076 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6078 hwrm_ring_free_send_msg(bp, ring,
6083 bp->grp_info[grp_idx].rx_fw_ring_id =
6088 if (bp->flags & BNXT_FLAG_CHIP_P5)
6092 for (i = 0; i < bp->rx_nr_rings; i++) {
6093 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6098 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6100 hwrm_ring_free_send_msg(bp, ring, type,
6104 bp->grp_info[grp_idx].agg_fw_ring_id =
6113 bnxt_disable_int_sync(bp);
6115 if (bp->flags & BNXT_FLAG_CHIP_P5)
6119 for (i = 0; i < bp->cp_nr_rings; i++) {
6120 struct bnxt_napi *bnapi = bp->bnapi[i];
6132 hwrm_ring_free_send_msg(bp, ring,
6140 hwrm_ring_free_send_msg(bp, ring, type,
6143 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6148 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6151 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6153 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6158 if (bp->hwrm_spec_code < 0x10601)
6161 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6166 resp = hwrm_req_hold(bp, req);
6167 rc = hwrm_req_send(bp, req);
6169 hwrm_req_drop(bp, req);
6174 if (BNXT_NEW_RM(bp)) {
6184 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6188 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6191 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6192 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6203 hwrm_req_drop(bp, req);
6207 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6213 if (bp->hwrm_spec_code < 0x10601)
6216 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6221 resp = hwrm_req_hold(bp, req);
6222 rc = hwrm_req_send(bp, req);
6226 hwrm_req_drop(bp, req);
6230 static bool bnxt_rfs_supported(struct bnxt *bp);
6233 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6239 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6245 if (BNXT_NEW_RM(bp)) {
6248 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6264 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6273 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6274 bnxt_rfs_supported(bp))
6286 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6292 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6299 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6314 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6330 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6336 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6342 hwrm_req_drop(bp, req);
6346 rc = hwrm_req_send(bp, req);
6350 if (bp->hwrm_spec_code < 0x10601)
6351 bp->hw_resc.resv_tx_rings = tx_rings;
6353 return bnxt_hwrm_get_rings(bp);
6357 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6363 if (!BNXT_NEW_RM(bp)) {
6364 bp->hw_resc.resv_tx_rings = tx_rings;
6368 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6373 rc = hwrm_req_send(bp, req);
6377 return bnxt_hwrm_get_rings(bp);
6380 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6383 if (BNXT_PF(bp))
6384 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6387 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6391 int bnxt_nq_rings_in_use(struct bnxt *bp)
6393 int cp = bp->cp_nr_rings;
6396 ulp_msix = bnxt_get_ulp_msix_num(bp);
6398 ulp_base = bnxt_get_ulp_msix_base(bp);
6406 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6410 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6411 return bnxt_nq_rings_in_use(bp);
6413 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6417 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6419 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6420 int cp = bp->cp_nr_rings;
6425 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6426 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6434 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6436 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6439 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6440 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6441 if (!netif_is_rxfh_configured(bp->dev))
6442 bnxt_set_dflt_rss_indir_tbl(bp);
6446 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6448 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6449 int cp = bnxt_cp_rings_in_use(bp);
6450 int nq = bnxt_nq_rings_in_use(bp);
6451 int rx = bp->rx_nr_rings, stat;
6454 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6455 bp->hwrm_spec_code >= 0x10601)
6463 if (!BNXT_NEW_RM(bp)) {
6464 bnxt_check_rss_tbl_no_rmgr(bp);
6467 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6469 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6471 stat = bnxt_get_func_stat_ctxs(bp);
6475 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6477 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6483 static int __bnxt_reserve_rings(struct bnxt *bp)
6485 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6486 int cp = bnxt_nq_rings_in_use(bp);
6487 int tx = bp->tx_nr_rings;
6488 int rx = bp->rx_nr_rings;
6493 if (!bnxt_need_reserve_rings(bp))
6496 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6498 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6500 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6502 grp = bp->rx_nr_rings;
6503 stat = bnxt_get_func_stat_ctxs(bp);
6505 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6510 if (BNXT_NEW_RM(bp)) {
6519 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6523 if (netif_running(bp->dev))
6526 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6527 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6528 bp->dev->hw_features &= ~NETIF_F_LRO;
6529 bp->dev->features &= ~NETIF_F_LRO;
6530 bnxt_set_ring_params(bp);
6534 cp = min_t(int, cp, bp->cp_nr_rings);
6535 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6536 stat -= bnxt_get_ulp_stat_ctxs(bp);
6538 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6539 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6542 bp->tx_nr_rings = tx;
6547 if (rx_rings != bp->rx_nr_rings) {
6548 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6549 rx_rings, bp->rx_nr_rings);
6550 if (netif_is_rxfh_configured(bp->dev) &&
6551 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6552 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6553 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6554 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6555 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6558 bp->rx_nr_rings = rx_rings;
6559 bp->cp_nr_rings = cp;
6564 if (!netif_is_rxfh_configured(bp->dev))
6565 bnxt_set_dflt_rss_indir_tbl(bp);
6570 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6577 if (!BNXT_NEW_RM(bp))
6580 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6588 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6592 return hwrm_req_send_silent(bp, req);
6595 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6602 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6605 if (BNXT_NEW_RM(bp)) {
6610 if (bp->flags & BNXT_FLAG_CHIP_P5)
6618 return hwrm_req_send_silent(bp, req);
6621 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6625 if (bp->hwrm_spec_code < 0x10801)
6628 if (BNXT_PF(bp))
6629 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6633 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6637 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6639 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6654 if (bp->hwrm_spec_code < 0x10902)
6657 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6660 resp = hwrm_req_hold(bp, req);
6661 rc = hwrm_req_send_silent(bp, req);
6681 hwrm_req_drop(bp, req);
6684 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6686 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6691 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6695 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6714 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6732 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6747 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6752 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6760 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6768 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6772 return hwrm_req_send(bp, req);
6775 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6785 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6793 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6797 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6799 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6801 return hwrm_req_send(bp, req_rx);
6804 int bnxt_hwrm_set_coal(struct bnxt *bp)
6810 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6814 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6816 hwrm_req_drop(bp, req_rx);
6820 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6821 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6823 hwrm_req_hold(bp, req_rx);
6824 hwrm_req_hold(bp, req_tx);
6825 for (i = 0; i < bp->cp_nr_rings; i++) {
6826 struct bnxt_napi *bnapi = bp->bnapi[i];
6832 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6835 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6839 rc = hwrm_req_send(bp, req);
6843 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6848 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6850 rc = hwrm_req_send(bp, req);
6855 hw_coal = &bp->rx_coal;
6857 hw_coal = &bp->tx_coal;
6858 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6860 hwrm_req_drop(bp, req_rx);
6861 hwrm_req_drop(bp, req_tx);
6865 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6871 if (!bp->bnapi)
6874 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6877 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6879 if (BNXT_FW_MAJ(bp) <= 20) {
6880 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6881 hwrm_req_drop(bp, req);
6884 hwrm_req_hold(bp, req0);
6886 hwrm_req_hold(bp, req);
6887 for (i = 0; i < bp->cp_nr_rings; i++) {
6888 struct bnxt_napi *bnapi = bp->bnapi[i];
6895 hwrm_req_send(bp, req0);
6897 hwrm_req_send(bp, req);
6902 hwrm_req_drop(bp, req);
6904 hwrm_req_drop(bp, req0);
6907 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6913 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6916 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6920 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6921 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6923 resp = hwrm_req_hold(bp, req);
6924 for (i = 0; i < bp->cp_nr_rings; i++) {
6925 struct bnxt_napi *bnapi = bp->bnapi[i];
6930 rc = hwrm_req_send(bp, req);
6936 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6938 hwrm_req_drop(bp, req);
6942 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6950 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6955 resp = hwrm_req_hold(bp, req);
6956 rc = hwrm_req_send(bp, req);
6961 if (BNXT_VF(bp)) {
6962 struct bnxt_vf_info *vf = &bp->vf;
6966 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6972 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6974 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6976 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6977 bp->flags |= BNXT_FLAG_MULTI_HOST;
6980 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6986 bp->port_partition_type = resp->port_partition_type;
6989 if (bp->hwrm_spec_code < 0x10707 ||
6991 bp->br_mode = BRIDGE_MODE_VEB;
6993 bp->br_mode = BRIDGE_MODE_VEPA;
6995 bp->br_mode = BRIDGE_MODE_UNDEF;
6997 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6998 if (!bp->max_mtu)
6999 bp->max_mtu = BNXT_MAX_MTU;
7001 if (bp->db_size)
7004 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7005 if (BNXT_PF(bp))
7010 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
7012 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
7013 bp->db_size <= min_db_offset)
7014 bp->db_size = pci_resource_len(bp->pdev, 2);
7017 hwrm_req_drop(bp, req);
7054 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7060 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7063 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7067 resp = hwrm_req_hold(bp, req);
7068 rc = hwrm_req_send_silent(bp, req);
7115 ctx->tqm_fp_rings_count = bp->max_q;
7128 bp->ctx = ctx;
7133 hwrm_req_drop(bp, req);
7162 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7165 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7180 if (req_len > bp->hwrm_max_ext_req_len)
7182 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7268 return hwrm_req_send(bp, req);
7271 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7282 return bnxt_alloc_ring(bp, rmem);
7285 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7310 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7332 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7341 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7346 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7363 bnxt_free_ring(bp, rmem2);
7371 bnxt_free_ring(bp, rmem);
7375 void bnxt_free_ctx_mem(struct bnxt *bp)
7377 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7385 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7390 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7391 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7392 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7393 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7394 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7395 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7396 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7400 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7413 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7415 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7419 ctx = bp->ctx;
7423 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7435 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7445 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7455 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7466 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7476 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7482 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7495 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7509 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7528 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7536 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7538 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7546 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7550 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7553 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7558 resp = hwrm_req_hold(bp, req);
7559 rc = hwrm_req_send_silent(bp, req);
7584 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7591 if (BNXT_PF(bp)) {
7592 struct bnxt_pf_info *pf = &bp->pf;
7600 hwrm_req_drop(bp, req);
7604 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7608 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7613 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
7618 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7622 req->port_id = cpu_to_le16(bp->pf.port_id);
7623 resp = hwrm_req_hold(bp, req);
7624 rc = hwrm_req_send(bp, req);
7639 ptp->bp = bp;
7640 bp->ptp_cfg = ptp;
7645 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7653 rc = bnxt_ptp_init(bp, phc_cfg);
7655 netdev_warn(bp->dev, "PTP initialization failed.\n");
7657 hwrm_req_drop(bp, req);
7662 bnxt_ptp_clear(bp);
7664 bp->ptp_cfg = NULL;
7668 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7672 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7676 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7681 resp = hwrm_req_hold(bp, req);
7682 rc = hwrm_req_send(bp, req);
7688 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7690 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7692 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7694 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7696 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7698 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7700 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7702 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7704 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7708 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7709 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7710 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7712 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7713 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7714 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7715 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7716 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7720 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7722 bp->tx_push_thresh = 0;
7724 BNXT_FW_MAJ(bp) > 217)
7725 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7738 if (BNXT_PF(bp)) {
7739 struct bnxt_pf_info *pf = &bp->pf;
7752 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7754 bp->flags |= BNXT_FLAG_WOL_CAP;
7756 bp->fw_cap |= BNXT_FW_CAP_PTP;
7758 bnxt_ptp_clear(bp);
7759 kfree(bp->ptp_cfg);
7760 bp->ptp_cfg = NULL;
7764 struct bnxt_vf_info *vf = &bp->vf;
7772 hwrm_req_drop(bp, req);
7776 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7782 bp->fw_dbg_cap = 0;
7783 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7786 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7791 resp = hwrm_req_hold(bp, req);
7792 rc = hwrm_req_send(bp, req);
7796 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7799 hwrm_req_drop(bp, req);
7802 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7804 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7808 rc = __bnxt_hwrm_func_qcaps(bp);
7812 bnxt_hwrm_dbg_qcaps(bp);
7814 rc = bnxt_hwrm_queue_qportcfg(bp);
7816 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7819 if (bp->hwrm_spec_code >= 0x10803) {
7820 rc = bnxt_alloc_ctx_mem(bp);
7823 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7825 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7830 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7837 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7840 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7844 resp = hwrm_req_hold(bp, req);
7845 rc = hwrm_req_send(bp, req);
7852 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7855 hwrm_req_drop(bp, req);
7859 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7861 if (bp->fw_health)
7864 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7865 if (!bp->fw_health)
7868 mutex_init(&bp->fw_health->lock);
7872 static int bnxt_alloc_fw_health(struct bnxt *bp)
7876 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7877 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7880 rc = __bnxt_alloc_fw_health(bp);
7882 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7883 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7890 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7892 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7897 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7899 struct bnxt_fw_health *fw_health = bp->fw_health;
7914 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7921 if (bp->fw_health)
7922 bp->fw_health->status_reliable = false;
7924 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7925 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7929 if (!bp->chip_num) {
7930 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7931 bp->chip_num = readl(bp->bar0 +
7935 if (!BNXT_CHIP_P5(bp))
7945 if (__bnxt_alloc_fw_health(bp)) {
7946 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7950 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7953 __bnxt_map_fw_health_reg(bp, status_loc);
7954 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7958 bp->fw_health->status_reliable = true;
7961 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7963 struct bnxt_fw_health *fw_health = bp->fw_health;
7967 bp->fw_health->status_reliable = false;
7968 bp->fw_health->resets_reliable = false;
7981 bp->fw_health->status_reliable = true;
7982 bp->fw_health->resets_reliable = true;
7986 __bnxt_map_fw_health_reg(bp, reg_base);
7990 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7992 if (!bp->fw_health)
7995 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7996 bp->fw_health->status_reliable = true;
7997 bp->fw_health->resets_reliable = true;
7999 bnxt_try_map_fw_health_reg(bp);
8003 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
8005 struct bnxt_fw_health *fw_health = bp->fw_health;
8010 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
8013 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
8017 resp = hwrm_req_hold(bp, req);
8018 rc = hwrm_req_send(bp, req);
8023 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8060 hwrm_req_drop(bp, req);
8062 rc = bnxt_map_fw_health_regs(bp);
8064 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8068 static int bnxt_hwrm_func_reset(struct bnxt *bp)
8073 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8078 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8079 return hwrm_req_send(bp, req);
8082 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8086 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8087 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8092 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8100 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8104 resp = hwrm_req_hold(bp, req);
8105 rc = hwrm_req_send(bp, req);
8113 bp->max_tc = resp->max_configurable_queues;
8114 bp->max_lltc = resp->max_configurable_lossless_queues;
8115 if (bp->max_tc > BNXT_MAX_QUEUE)
8116 bp->max_tc = BNXT_MAX_QUEUE;
8118 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8120 for (i = 0, j = 0; i < bp->max_tc; i++) {
8121 bp->q_info[j].queue_id = *qptr;
8122 bp->q_ids[i] = *qptr++;
8123 bp->q_info[j].queue_profile = *qptr++;
8124 bp->tc_to_qidx[j] = j;
8125 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8126 (no_rdma && BNXT_PF(bp)))
8129 bp->max_q = bp->max_tc;
8130 bp->max_tc = max_t(u8, j, 1);
8133 bp->max_tc = 1;
8135 if (bp->max_lltc > bp->max_tc)
8136 bp->max_lltc = bp->max_tc;
8139 hwrm_req_drop(bp, req);
8143 static int bnxt_hwrm_poll(struct bnxt *bp)
8148 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8156 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8157 rc = hwrm_req_send(bp, req);
8161 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8169 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8173 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8174 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8179 resp = hwrm_req_hold(bp, req);
8180 rc = hwrm_req_send(bp, req);
8184 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8186 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8190 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8193 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8199 if (bp->hwrm_spec_code > hwrm_ver)
8200 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8204 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8209 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8221 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8222 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8226 int fw_ver_len = strlen(bp->fw_ver_str);
8228 snprintf(bp->fw_ver_str + fw_ver_len,
8231 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8234 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8235 if (!bp->hwrm_cmd_timeout)
8236 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8237 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8238 if (!bp->hwrm_cmd_max_timeout)
8239 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8240 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8241 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8242 bp->hwrm_cmd_max_timeout / 1000);
8245 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8246 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8248 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8249 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8251 bp->chip_num = le16_to_cpu(resp->chip_num);
8252 bp->chip_rev = resp->chip_rev;
8253 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8255 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8260 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8263 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8267 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8271 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8275 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8278 hwrm_req_drop(bp, req);
8282 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8289 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8290 bp->hwrm_spec_code < 0x10400)
8294 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8304 return hwrm_req_send(bp, req);
8345 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8352 if (bp->flags & BNXT_FLAG_CHIP_P5)
8355 for (i = 0; i < bp->cp_nr_rings; i++) {
8356 struct bnxt_napi *bnapi = bp->bnapi[i];
8368 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8369 struct bnxt_stats_mem *stats = &bp->port_stats;
8384 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8385 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8386 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8390 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8393 struct bnxt_pf_info *pf = &bp->pf;
8396 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8399 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8402 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8408 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8410 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8411 return hwrm_req_send(bp, req);
8414 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8420 struct bnxt_pf_info *pf = &bp->pf;
8424 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8427 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8430 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8437 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8438 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8441 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8442 resp_qs = hwrm_req_hold(bp, req_qs);
8443 rc = hwrm_req_send(bp, req_qs);
8445 bp->fw_rx_stats_ext_size =
8447 if (BNXT_FW_MAJ(bp) < 220 &&
8448 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8449 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8451 bp->fw_tx_stats_ext_size = tx_stat_size ?
8454 bp->fw_rx_stats_ext_size = 0;
8455 bp->fw_tx_stats_ext_size = 0;
8457 hwrm_req_drop(bp, req_qs);
8462 if (bp->fw_tx_stats_ext_size <=
8464 bp->pri2cos_valid = 0;
8468 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8474 resp_qc = hwrm_req_hold(bp, req_qc);
8475 rc = hwrm_req_send(bp, req_qc);
8488 bp->pri2cos_valid = false;
8489 hwrm_req_drop(bp, req_qc);
8492 for (j = 0; j < bp->max_q; j++) {
8493 if (bp->q_ids[j] == queue_id)
8494 bp->pri2cos_idx[i] = queue_idx;
8497 bp->pri2cos_valid = true;
8499 hwrm_req_drop(bp, req_qc);
8504 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8506 bnxt_hwrm_tunnel_dst_port_free(bp,
8508 bnxt_hwrm_tunnel_dst_port_free(bp,
8512 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8518 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8519 else if (BNXT_NO_FW_ACCESS(bp))
8521 for (i = 0; i < bp->nr_vnics; i++) {
8522 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8524 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8532 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8536 for (i = 0; i < bp->nr_vnics; i++)
8537 bnxt_hwrm_vnic_set_rss(bp, i, false);
8540 static void bnxt_clear_vnic(struct bnxt *bp)
8542 if (!bp->vnic_info)
8545 bnxt_hwrm_clear_vnic_filter(bp);
8546 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8548 bnxt_hwrm_clear_vnic_rss(bp);
8549 bnxt_hwrm_vnic_ctx_free(bp);
8552 if (bp->flags & BNXT_FLAG_TPA)
8553 bnxt_set_tpa(bp, false);
8554 bnxt_hwrm_vnic_free(bp);
8555 if (bp->flags & BNXT_FLAG_CHIP_P5)
8556 bnxt_hwrm_vnic_ctx_free(bp);
8559 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8562 bnxt_clear_vnic(bp);
8563 bnxt_hwrm_ring_free(bp, close_path);
8564 bnxt_hwrm_ring_grp_free(bp);
8566 bnxt_hwrm_stat_ctx_free(bp);
8567 bnxt_hwrm_free_tunnel_ports(bp);
8571 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8584 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8591 return hwrm_req_send(bp, req);
8594 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8599 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8602 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8612 return hwrm_req_send(bp, req);
8615 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8617 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8624 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8626 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8630 bp->rsscos_nr_ctxs++;
8632 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8633 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8635 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8639 bp->rsscos_nr_ctxs++;
8644 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8646 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8652 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8654 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8659 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8660 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8662 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8671 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8675 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8677 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8679 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8683 bp->rsscos_nr_ctxs++;
8688 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8690 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8694 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8696 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8700 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8701 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8703 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8710 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8712 if (bp->flags & BNXT_FLAG_CHIP_P5)
8713 return __bnxt_setup_vnic_p5(bp, vnic_id);
8715 return __bnxt_setup_vnic(bp, vnic_id);
8718 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8723 if (bp->flags & BNXT_FLAG_CHIP_P5)
8726 for (i = 0; i < bp->rx_nr_rings; i++) {
8731 if (vnic_id >= bp->nr_vnics)
8734 vnic = &bp->vnic_info[vnic_id];
8736 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8738 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8740 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8744 rc = bnxt_setup_vnic(bp, vnic_id);
8755 static bool bnxt_promisc_ok(struct bnxt *bp)
8758 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8764 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8768 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8770 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8775 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8777 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8787 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8789 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8791 unsigned int rx_nr_rings = bp->rx_nr_rings;
8794 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8796 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8802 rc = bnxt_hwrm_ring_alloc(bp);
8804 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8808 rc = bnxt_hwrm_ring_grp_alloc(bp);
8810 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8814 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8818 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8820 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8824 if (BNXT_VF(bp))
8825 bnxt_hwrm_func_qcfg(bp);
8827 rc = bnxt_setup_vnic(bp, 0);
8830 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
8831 bnxt_hwrm_update_rss_hash_cfg(bp);
8833 if (bp->flags & BNXT_FLAG_RFS) {
8834 rc = bnxt_alloc_rfs_vnics(bp);
8839 if (bp->flags & BNXT_FLAG_TPA) {
8840 rc = bnxt_set_tpa(bp, true);
8845 if (BNXT_VF(bp))
8846 bnxt_update_vf_mac(bp);
8849 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8851 if (BNXT_VF(bp) && rc == -ENODEV)
8852 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8854 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8860 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8863 if (bp->dev->flags & IFF_BROADCAST)
8866 if (bp->dev->flags & IFF_PROMISC)
8869 if (bp->dev->flags & IFF_ALLMULTI) {
8872 } else if (bp->dev->flags & IFF_MULTICAST) {
8875 bnxt_mc_list_updated(bp, &mask);
8879 rc = bnxt_cfg_rx_mode(bp);
8884 rc = bnxt_hwrm_set_coal(bp);
8886 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8889 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8890 rc = bnxt_setup_nitroa0_vnic(bp);
8892 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8896 if (BNXT_VF(bp)) {
8897 bnxt_hwrm_func_qcfg(bp);
8898 netdev_update_features(bp->dev);
8904 bnxt_hwrm_resource_free(bp, 0, true);
8909 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8911 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8915 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8917 bnxt_init_cp_rings(bp);
8918 bnxt_init_rx_rings(bp);
8919 bnxt_init_tx_rings(bp);
8920 bnxt_init_ring_grps(bp, irq_re_init);
8921 bnxt_init_vnics(bp);
8923 return bnxt_init_chip(bp, irq_re_init);
8926 static int bnxt_set_real_num_queues(struct bnxt *bp)
8929 struct net_device *dev = bp->dev;
8931 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8932 bp->tx_nr_rings_xdp);
8936 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8941 if (bp->flags & BNXT_FLAG_RFS)
8942 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8948 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8972 static void bnxt_setup_msix(struct bnxt *bp)
8974 const int len = sizeof(bp->irq_tbl[0].name);
8975 struct net_device *dev = bp->dev;
8983 count = bp->tx_nr_rings_per_tc;
8989 for (i = 0; i < bp->cp_nr_rings; i++) {
8990 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8993 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8995 else if (i < bp->rx_nr_rings)
9000 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
9002 bp->irq_tbl[map_idx].handler = bnxt_msix;
9006 static void bnxt_setup_inta(struct bnxt *bp)
9008 const int len = sizeof(bp->irq_tbl[0].name);
9010 if (netdev_get_num_tc(bp->dev))
9011 netdev_reset_tc(bp->dev);
9013 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
9015 bp->irq_tbl[0].handler = bnxt_inta;
9018 static int bnxt_init_int_mode(struct bnxt *bp);
9020 static int bnxt_setup_int_mode(struct bnxt *bp)
9024 if (!bp->irq_tbl) {
9025 rc = bnxt_init_int_mode(bp);
9026 if (rc || !bp->irq_tbl)
9030 if (bp->flags & BNXT_FLAG_USING_MSIX)
9031 bnxt_setup_msix(bp);
9033 bnxt_setup_inta(bp);
9035 rc = bnxt_set_real_num_queues(bp);
9040 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9042 return bp->hw_resc.max_rsscos_ctxs;
9045 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9047 return bp->hw_resc.max_vnics;
9051 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9053 return bp->hw_resc.max_stat_ctxs;
9056 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9058 return bp->hw_resc.max_cp_rings;
9061 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
9063 unsigned int cp = bp->hw_resc.max_cp_rings;
9065 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9066 cp -= bnxt_get_ulp_msix_num(bp);
9071 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
9073 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9075 if (bp->flags & BNXT_FLAG_CHIP_P5)
9081 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
9083 bp->hw_resc.max_irqs = max_irqs;
9086 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9090 cp = bnxt_get_max_func_cp_rings_for_en(bp);
9091 if (bp->flags & BNXT_FLAG_CHIP_P5)
9092 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9094 return cp - bp->cp_nr_rings;
9097 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9099 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
9102 int bnxt_get_avail_msix(struct bnxt *bp, int num)
9104 int max_cp = bnxt_get_max_func_cp_rings(bp);
9105 int max_irq = bnxt_get_max_func_irqs(bp);
9106 int total_req = bp->cp_nr_rings + num;
9109 max_idx = bp->total_irqs;
9110 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9111 max_idx = min_t(int, bp->total_irqs, max_cp);
9112 avail_msix = max_idx - bp->cp_nr_rings;
9113 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
9117 num = max_irq - bp->cp_nr_rings;
9124 static int bnxt_get_num_msix(struct bnxt *bp)
9126 if (!BNXT_NEW_RM(bp))
9127 return bnxt_get_max_func_irqs(bp);
9129 return bnxt_nq_rings_in_use(bp);
9132 static int bnxt_init_msix(struct bnxt *bp)
9137 total_vecs = bnxt_get_num_msix(bp);
9138 max = bnxt_get_max_func_irqs(bp);
9154 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9157 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
9158 ulp_msix = bnxt_get_ulp_msix_num(bp);
9164 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9165 if (bp->irq_tbl) {
9167 bp->irq_tbl[i].vector = msix_ent[i].vector;
9169 bp->total_irqs = total_vecs;
9171 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9176 bp->cp_nr_rings = (min == 1) ?
9177 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9178 bp->tx_nr_rings + bp->rx_nr_rings;
9184 bp->flags |= BNXT_FLAG_USING_MSIX;
9189 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9190 kfree(bp->irq_tbl);
9191 bp->irq_tbl = NULL;
9192 pci_disable_msix(bp->pdev);
9197 static int bnxt_init_inta(struct bnxt *bp)
9199 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9200 if (!bp->irq_tbl)
9203 bp->total_irqs = 1;
9204 bp->rx_nr_rings = 1;
9205 bp->tx_nr_rings = 1;
9206 bp->cp_nr_rings = 1;
9207 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9208 bp->irq_tbl[0].vector = bp->pdev->irq;
9212 static int bnxt_init_int_mode(struct bnxt *bp)
9216 if (bp->flags & BNXT_FLAG_MSIX_CAP)
9217 rc = bnxt_init_msix(bp);
9219 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9221 rc = bnxt_init_inta(bp);
9226 static void bnxt_clear_int_mode(struct bnxt *bp)
9228 if (bp->flags & BNXT_FLAG_USING_MSIX)
9229 pci_disable_msix(bp->pdev);
9231 kfree(bp->irq_tbl);
9232 bp->irq_tbl = NULL;
9233 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9236 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9238 int tcs = netdev_get_num_tc(bp->dev);
9242 if (!bnxt_need_reserve_rings(bp))
9245 if (irq_re_init && BNXT_NEW_RM(bp) &&
9246 bnxt_get_num_msix(bp) != bp->total_irqs) {
9247 bnxt_ulp_irq_stop(bp);
9248 bnxt_clear_int_mode(bp);
9251 rc = __bnxt_reserve_rings(bp);
9254 rc = bnxt_init_int_mode(bp);
9255 bnxt_ulp_irq_restart(bp, rc);
9258 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9261 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
9262 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
9263 netdev_err(bp->dev, "tx ring reservation failure\n");
9264 netdev_reset_tc(bp->dev);
9265 if (bp->tx_nr_rings_xdp)
9266 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
9268 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9274 static void bnxt_free_irq(struct bnxt *bp)
9280 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9281 bp->dev->rx_cpu_rmap = NULL;
9283 if (!bp->irq_tbl || !bp->bnapi)
9286 for (i = 0; i < bp->cp_nr_rings; i++) {
9287 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9289 irq = &bp->irq_tbl[map_idx];
9296 free_irq(irq->vector, bp->bnapi[i]);
9303 static int bnxt_request_irq(struct bnxt *bp)
9311 rc = bnxt_setup_int_mode(bp);
9313 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9318 rmap = bp->dev->rx_cpu_rmap;
9320 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9323 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9324 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9325 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9328 if (rmap && bp->bnapi[i]->rx_ring) {
9331 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9337 bp->bnapi[i]);
9344 int numa_node = dev_to_node(&bp->pdev->dev);
9351 netdev_warn(bp->dev,
9361 static void bnxt_del_napi(struct bnxt *bp)
9365 if (!bp->bnapi)
9368 for (i = 0; i < bp->cp_nr_rings; i++) {
9369 struct bnxt_napi *bnapi = bp->bnapi[i];
9379 static void bnxt_init_napi(struct bnxt *bp)
9382 unsigned int cp_nr_rings = bp->cp_nr_rings;
9385 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9388 if (bp->flags & BNXT_FLAG_CHIP_P5)
9390 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9393 bnapi = bp->bnapi[i];
9394 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
9396 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9397 bnapi = bp->bnapi[cp_nr_rings];
9398 netif_napi_add(bp->dev, &bnapi->napi,
9402 bnapi = bp->bnapi[0];
9403 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
9407 static void bnxt_disable_napi(struct bnxt *bp)
9411 if (!bp->bnapi ||
9412 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9415 for (i = 0; i < bp->cp_nr_rings; i++) {
9416 struct bnxt_napi *bnapi = bp->bnapi[i];
9430 static void bnxt_enable_napi(struct bnxt *bp)
9434 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9435 for (i = 0; i < bp->cp_nr_rings; i++) {
9436 struct bnxt_napi *bnapi = bp->bnapi[i];
9454 void bnxt_tx_disable(struct bnxt *bp)
9459 if (bp->tx_ring) {
9460 for (i = 0; i < bp->tx_nr_rings; i++) {
9461 txr = &bp->tx_ring[i];
9468 netif_carrier_off(bp->dev);
9470 netif_tx_disable(bp->dev);
9473 void bnxt_tx_enable(struct bnxt *bp)
9478 for (i = 0; i < bp->tx_nr_rings; i++) {
9479 txr = &bp->tx_ring[i];
9484 netif_tx_wake_all_queues(bp->dev);
9485 if (BNXT_LINK_IS_UP(bp))
9486 netif_carrier_on(bp->dev);
9513 void bnxt_report_link(struct bnxt *bp)
9515 if (BNXT_LINK_IS_UP(bp)) {
9522 netif_carrier_on(bp->dev);
9523 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9525 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9528 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9532 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9534 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9536 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9540 if (bp->link_info.phy_qcfg_resp.option_flags &
9542 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9555 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9557 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9558 netdev_info(bp->dev, "EEE is %s\n",
9559 bp->eee.eee_active ? "active" :
9561 fec = bp->link_info.fec_cfg;
9563 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9565 bnxt_report_fec(&bp->link_info));
9567 netif_carrier_off(bp->dev);
9568 netdev_err(bp->dev, "NIC Link is Down\n");
9582 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9584 struct bnxt_link_info *link_info = &bp->link_info;
9589 if (bp->hwrm_spec_code < 0x10201)
9592 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9596 resp = hwrm_req_hold(bp, req);
9597 rc = hwrm_req_send(bp, req);
9601 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9603 struct ethtool_eee *eee = &bp->eee;
9607 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9609 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9613 if (bp->hwrm_spec_code >= 0x10a01) {
9616 netdev_warn(bp->dev, "Ethernet link disabled\n");
9619 netdev_info(bp->dev, "Ethernet link enabled\n");
9632 bp->port_count = resp->port_cnt;
9635 hwrm_req_drop(bp, req);
9646 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9648 struct bnxt_link_info *link_info = &bp->link_info;
9655 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9659 resp = hwrm_req_hold(bp, req);
9660 rc = hwrm_req_send(bp, req);
9662 hwrm_req_drop(bp, req);
9663 if (BNXT_VF(bp) && rc == -ENODEV) {
9664 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9673 if (bp->hwrm_spec_code >= 0x10800)
9708 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9709 struct ethtool_eee *eee = &bp->eee;
9745 if (bp->hwrm_spec_code >= 0x10504) {
9756 bnxt_report_link(bp);
9761 hwrm_req_drop(bp, req);
9763 if (!BNXT_PHY_CFG_ABLE(bp))
9780 bnxt_hwrm_set_link_setting(bp, true, false);
9784 static void bnxt_get_port_module_status(struct bnxt *bp)
9786 struct bnxt_link_info *link_info = &bp->link_info;
9790 if (bnxt_update_link(bp, true))
9798 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9799 bp->pf.port_id);
9800 if (bp->hwrm_spec_code >= 0x10201) {
9801 netdev_warn(bp->dev, "Module part number %s\n",
9805 netdev_warn(bp->dev, "TX is disabled\n");
9807 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9812 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9814 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9815 if (bp->hwrm_spec_code >= 0x10201)
9818 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9820 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9825 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9827 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9831 if (bp->hwrm_spec_code >= 0x10201) {
9839 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9841 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9843 if (bp->link_info.advertising) {
9845 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9847 if (bp->link_info.advertising_pam4) {
9851 cpu_to_le16(bp->link_info.advertising_pam4);
9857 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9858 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9861 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9869 int bnxt_hwrm_set_pause(struct bnxt *bp)
9874 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9878 bnxt_hwrm_set_pause_common(bp, req);
9880 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9881 bp->link_info.force_link_chng)
9882 bnxt_hwrm_set_link_common(bp, req);
9884 rc = hwrm_req_send(bp, req);
9885 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9890 bp->link_info.pause =
9891 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9892 bp->link_info.auto_pause_setting = 0;
9893 if (!bp->link_info.force_link_chng)
9894 bnxt_report_link(bp);
9896 bp->link_info.force_link_chng = false;
9900 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9903 struct ethtool_eee *eee = &bp->eee;
9923 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9928 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9933 bnxt_hwrm_set_pause_common(bp, req);
9935 bnxt_hwrm_set_link_common(bp, req);
9938 bnxt_hwrm_set_eee(bp, req);
9939 return hwrm_req_send(bp, req);
9942 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9947 if (!BNXT_SINGLE_PF(bp))
9950 if (pci_num_vf(bp->pdev) &&
9951 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9954 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9959 rc = hwrm_req_send(bp, req);
9961 mutex_lock(&bp->link_lock);
9967 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9968 mutex_unlock(&bp->link_lock);
9973 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9979 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9983 netdev_err(bp->dev, "OP-TEE not supported\n");
9988 static int bnxt_try_recover_fw(struct bnxt *bp)
9990 if (bp->fw_health && bp->fw_health->status_reliable) {
9995 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9996 rc = bnxt_hwrm_poll(bp);
10004 netdev_err(bp->dev,
10010 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
10011 return bnxt_fw_reset_via_optee(bp);
10019 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
10021 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10023 if (!BNXT_NEW_RM(bp))
10034 bp->tx_nr_rings = 0;
10035 bp->rx_nr_rings = 0;
10039 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10043 if (!BNXT_NEW_RM(bp))
10046 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10048 netdev_err(bp->dev, "resc_qcaps failed\n");
10050 bnxt_clear_reservations(bp, fw_reset);
10055 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10059 bool fw_reset = !bp->irq_tbl;
10064 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10067 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10073 resp = hwrm_req_hold(bp, req);
10075 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10077 rc = hwrm_req_send(bp, req);
10086 hwrm_req_drop(bp, req);
10091 rc = bnxt_try_recover_fw(bp);
10094 hwrm_req_drop(bp, req);
10099 bnxt_inv_fw_health_reg(bp);
10106 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
10109 bnxt_remap_fw_health_regs(bp);
10111 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10112 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
10113 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10118 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10119 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10120 bnxt_ulp_stop(bp);
10121 bnxt_free_ctx_mem(bp);
10122 kfree(bp->ctx);
10123 bp->ctx = NULL;
10124 bnxt_dcb_free(bp);
10125 rc = bnxt_fw_init_one(bp);
10127 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10128 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10131 bnxt_clear_int_mode(bp);
10132 rc = bnxt_init_int_mode(bp);
10134 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10135 netdev_err(bp->dev, "init int mode failed\n");
10139 rc = bnxt_cancel_reservations(bp, fw_reset);
10144 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10148 struct bnxt_pf_info *pf = &bp->pf;
10151 bp->num_leds = 0;
10152 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10155 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10160 resp = hwrm_req_hold(bp, req);
10161 rc = hwrm_req_send(bp, req);
10163 hwrm_req_drop(bp, req);
10169 bp->num_leds = resp->num_leds;
10170 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10171 bp->num_leds);
10172 for (i = 0; i < bp->num_leds; i++) {
10173 struct bnxt_led_info *led = &bp->leds[i];
10178 bp->num_leds = 0;
10183 hwrm_req_drop(bp, req);
10187 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10193 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10197 req->port_id = cpu_to_le16(bp->pf.port_id);
10200 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10202 resp = hwrm_req_hold(bp, req);
10203 rc = hwrm_req_send(bp, req);
10205 bp->wol_filter_id = resp->wol_filter_id;
10206 hwrm_req_drop(bp, req);
10210 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10215 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10219 req->port_id = cpu_to_le16(bp->pf.port_id);
10221 req->wol_filter_id = bp->wol_filter_id;
10223 return hwrm_req_send(bp, req);
10226 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10233 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10237 req->port_id = cpu_to_le16(bp->pf.port_id);
10239 resp = hwrm_req_hold(bp, req);
10240 rc = hwrm_req_send(bp, req);
10246 bp->wol = 1;
10247 bp->wol_filter_id = resp->wol_filter_id;
10251 hwrm_req_drop(bp, req);
10255 static void bnxt_get_wol_settings(struct bnxt *bp)
10259 bp->wol = 0;
10260 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10264 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10274 struct bnxt *bp = dev_get_drvdata(dev);
10278 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10281 resp = hwrm_req_hold(bp, req);
10282 rc = hwrm_req_send(bp, req);
10285 hwrm_req_drop(bp, req);
10298 static void bnxt_hwmon_close(struct bnxt *bp)
10300 if (bp->hwmon_dev) {
10301 hwmon_device_unregister(bp->hwmon_dev);
10302 bp->hwmon_dev = NULL;
10306 static void bnxt_hwmon_open(struct bnxt *bp)
10309 struct pci_dev *pdev = bp->pdev;
10312 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10314 rc = hwrm_req_send_silent(bp, req);
10316 bnxt_hwmon_close(bp);
10320 if (bp->hwmon_dev)
10323 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10324 DRV_MODULE_NAME, bp,
10326 if (IS_ERR(bp->hwmon_dev)) {
10327 bp->hwmon_dev = NULL;
10332 static void bnxt_hwmon_close(struct bnxt *bp)
10336 static void bnxt_hwmon_open(struct bnxt *bp)
10341 static bool bnxt_eee_config_ok(struct bnxt *bp)
10343 struct ethtool_eee *eee = &bp->eee;
10344 struct bnxt_link_info *link_info = &bp->link_info;
10346 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10365 static int bnxt_update_phy_setting(struct bnxt *bp)
10371 struct bnxt_link_info *link_info = &bp->link_info;
10373 rc = bnxt_update_link(bp, true);
10375 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10379 if (!BNXT_SINGLE_PF(bp))
10411 if (!BNXT_LINK_IS_UP(bp))
10414 if (!bnxt_eee_config_ok(bp))
10418 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10420 rc = bnxt_hwrm_set_pause(bp);
10422 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10435 static void bnxt_preset_reg_win(struct bnxt *bp)
10437 if (BNXT_PF(bp)) {
10440 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10444 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10446 static int bnxt_reinit_after_abort(struct bnxt *bp)
10450 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10453 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10456 rc = bnxt_fw_init_one(bp);
10458 bnxt_clear_int_mode(bp);
10459 rc = bnxt_init_int_mode(bp);
10461 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10462 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10468 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10472 bnxt_preset_reg_win(bp);
10473 netif_carrier_off(bp->dev);
10476 rc = bnxt_init_dflt_ring_mode(bp);
10478 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10482 rc = bnxt_reserve_rings(bp, irq_re_init);
10485 if ((bp->flags & BNXT_FLAG_RFS) &&
10486 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10488 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10489 bp->flags &= ~BNXT_FLAG_RFS;
10492 rc = bnxt_alloc_mem(bp, irq_re_init);
10494 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10499 bnxt_init_napi(bp);
10500 rc = bnxt_request_irq(bp);
10502 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10507 rc = bnxt_init_nic(bp, irq_re_init);
10509 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10513 bnxt_enable_napi(bp);
10514 bnxt_debug_dev_init(bp);
10517 mutex_lock(&bp->link_lock);
10518 rc = bnxt_update_phy_setting(bp);
10519 mutex_unlock(&bp->link_lock);
10521 netdev_warn(bp->dev, "failed to update phy settings\n");
10522 if (BNXT_SINGLE_PF(bp)) {
10523 bp->link_info.phy_retry = true;
10524 bp->link_info.phy_retry_expires =
10531 udp_tunnel_nic_reset_ntf(bp->dev);
10533 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10539 set_bit(BNXT_STATE_OPEN, &bp->state);
10540 bnxt_enable_int(bp);
10542 bnxt_tx_enable(bp);
10543 mod_timer(&bp->timer, jiffies + bp->current_interval);
10545 mutex_lock(&bp->link_lock);
10546 bnxt_get_port_module_status(bp);
10547 mutex_unlock(&bp->link_lock);
10550 if (BNXT_PF(bp))
10551 bnxt_vf_reps_open(bp);
10552 bnxt_ptp_init_rtc(bp, true);
10553 bnxt_ptp_cfg_tstamp_filters(bp);
10557 bnxt_del_napi(bp);
10560 bnxt_free_skbs(bp);
10561 bnxt_free_irq(bp);
10562 bnxt_free_mem(bp, true);
10567 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10571 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10574 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10576 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10577 dev_close(bp->dev);
10586 int bnxt_half_open_nic(struct bnxt *bp)
10590 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10591 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10596 rc = bnxt_alloc_mem(bp, true);
10598 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10601 bnxt_init_napi(bp);
10602 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10603 rc = bnxt_init_nic(bp, true);
10605 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10606 bnxt_del_napi(bp);
10607 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10613 bnxt_free_skbs(bp);
10614 bnxt_free_mem(bp, true);
10615 dev_close(bp->dev);
10622 void bnxt_half_close_nic(struct bnxt *bp)
10624 bnxt_hwrm_resource_free(bp, false, true);
10625 bnxt_del_napi(bp);
10626 bnxt_free_skbs(bp);
10627 bnxt_free_mem(bp, true);
10628 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10631 void bnxt_reenable_sriov(struct bnxt *bp)
10633 if (BNXT_PF(bp)) {
10634 struct bnxt_pf_info *pf = &bp->pf;
10638 bnxt_cfg_hw_sriov(bp, &n, true);
10644 struct bnxt *bp = netdev_priv(dev);
10647 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10648 rc = bnxt_reinit_after_abort(bp);
10651 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10653 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10658 rc = bnxt_hwrm_if_change(bp, true);
10662 rc = __bnxt_open_nic(bp, true, true);
10664 bnxt_hwrm_if_change(bp, false);
10666 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10667 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10668 bnxt_ulp_start(bp, 0);
10669 bnxt_reenable_sriov(bp);
10672 bnxt_hwmon_open(bp);
10678 static bool bnxt_drv_busy(struct bnxt *bp)
10680 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10681 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10684 static void bnxt_get_ring_stats(struct bnxt *bp,
10687 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10691 if (BNXT_PF(bp))
10692 bnxt_vf_reps_close(bp);
10695 bnxt_tx_disable(bp);
10697 clear_bit(BNXT_STATE_OPEN, &bp->state);
10699 while (bnxt_drv_busy(bp))
10703 bnxt_shutdown_nic(bp, irq_re_init);
10707 bnxt_debug_dev_exit(bp);
10708 bnxt_disable_napi(bp);
10709 del_timer_sync(&bp->timer);
10710 bnxt_free_skbs(bp);
10713 if (bp->bnapi && irq_re_init) {
10714 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10715 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
10718 bnxt_free_irq(bp);
10719 bnxt_del_napi(bp);
10721 bnxt_free_mem(bp, irq_re_init);
10724 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10726 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10734 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10735 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10739 if (bp->sriov_cfg) {
10742 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10743 !bp->sriov_cfg,
10746 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
10748 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
10751 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10756 struct bnxt *bp = netdev_priv(dev);
10758 bnxt_hwmon_close(bp);
10759 bnxt_close_nic(bp, true, true);
10760 bnxt_hwrm_shutdown_link(bp);
10761 bnxt_hwrm_if_change(bp, false);
10765 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10772 if (bp->hwrm_spec_code < 0x10a00)
10775 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10779 req->port_id = cpu_to_le16(bp->pf.port_id);
10789 resp = hwrm_req_hold(bp, req);
10790 rc = hwrm_req_send(bp, req);
10793 hwrm_req_drop(bp, req);
10797 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10803 if (bp->hwrm_spec_code < 0x10a00)
10806 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10810 req->port_id = cpu_to_le16(bp->pf.port_id);
10821 return hwrm_req_send(bp, req);
10828 struct bnxt *bp = netdev_priv(dev);
10833 mdio->phy_id = bp->link_info.phy_addr;
10842 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10852 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10868 static void bnxt_get_ring_stats(struct bnxt *bp,
10873 for (i = 0; i < bp->cp_nr_rings; i++) {
10874 struct bnxt_napi *bnapi = bp->bnapi[i];
10907 static void bnxt_add_prev_stats(struct bnxt *bp,
10910 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10925 struct bnxt *bp = netdev_priv(dev);
10927 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10932 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10933 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10934 *stats = bp->net_stats_prev;
10938 bnxt_get_ring_stats(bp, stats);
10939 bnxt_add_prev_stats(bp, stats);
10941 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10942 u64 *rx = bp->port_stats.sw_stats;
10943 u64 *tx = bp->port_stats.sw_stats +
10963 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10966 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
10986 void bnxt_get_ring_err_stats(struct bnxt *bp,
10991 for (i = 0; i < bp->cp_nr_rings; i++)
10992 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
10995 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10997 struct net_device *dev = bp->dev;
10998 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11029 static bool bnxt_uc_list_updated(struct bnxt *bp)
11031 struct net_device *dev = bp->dev;
11032 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11050 struct bnxt *bp = netdev_priv(dev);
11056 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
11059 vnic = &bp->vnic_info[0];
11069 uc_update = bnxt_uc_list_updated(bp);
11077 mc_update = bnxt_mc_list_updated(bp, &mask);
11083 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11087 static int bnxt_cfg_rx_mode(struct bnxt *bp)
11089 struct net_device *dev = bp->dev;
11090 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11097 uc_update = bnxt_uc_list_updated(bp);
11103 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11106 hwrm_req_hold(bp, req);
11110 rc = hwrm_req_send(bp, req);
11112 hwrm_req_drop(bp, req);
11129 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11131 if (BNXT_VF(bp) && rc == -ENODEV) {
11132 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11133 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11135 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11138 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11144 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11145 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
11149 !bnxt_promisc_ok(bp))
11151 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11153 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11158 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11161 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
11167 static bool bnxt_can_reserve_rings(struct bnxt *bp)
11170 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
11171 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11179 if (!netif_running(bp->dev))
11187 static bool bnxt_rfs_supported(struct bnxt *bp)
11189 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11190 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
11195 if (BNXT_FW_MAJ(bp) == 212)
11197 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11199 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11205 static bool bnxt_rfs_capable(struct bnxt *bp)
11210 if (bp->flags & BNXT_FLAG_CHIP_P5)
11211 return bnxt_rfs_supported(bp);
11212 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
11215 vnics = 1 + bp->rx_nr_rings;
11216 max_vnics = bnxt_get_max_func_vnics(bp);
11217 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
11220 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11223 if (bp->rx_nr_rings > 1)
11224 netdev_warn(bp->dev,
11230 if (!BNXT_NEW_RM(bp))
11233 if (vnics == bp->hw_resc.resv_vnics)
11236 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11237 if (vnics <= bp->hw_resc.resv_vnics)
11240 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11241 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11251 struct bnxt *bp = netdev_priv(dev);
11254 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11257 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
11277 if (BNXT_VF(bp) && bp->vf.vlan)
11285 struct bnxt *bp = netdev_priv(dev);
11286 u32 flags = bp->flags;
11298 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11307 changes = flags ^ bp->flags;
11310 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11312 (bp->flags & BNXT_FLAG_CHIP_P5))
11319 if (flags != bp->flags) {
11320 u32 old_flags = bp->flags;
11322 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11323 bp->flags = flags;
11325 bnxt_set_ring_params(bp);
11330 bnxt_close_nic(bp, false, false);
11331 bp->flags = flags;
11333 bnxt_set_ring_params(bp);
11335 return bnxt_open_nic(bp, false, false);
11338 bp->flags = flags;
11339 rc = bnxt_set_tpa(bp,
11343 bp->flags = old_flags;
11349 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11416 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11421 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11430 return bnxt_exthdr_check(bp, skb,
11438 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11442 return bnxt_udp_tunl_check(bp, skb);
11457 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11467 struct bnxt *bp = netdev_priv(dev);
11476 if (bnxt_tunl_check(bp, skb, *l4_proto))
11480 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11483 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11490 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11499 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11503 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11512 resp = hwrm_req_hold(bp, req);
11516 rc = hwrm_req_send(bp, req);
11525 hwrm_req_drop(bp, req);
11529 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11536 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11542 resp = hwrm_req_hold(bp, req);
11543 rc = hwrm_req_send(bp, req);
11548 hwrm_req_drop(bp, req);
11560 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11573 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11584 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11588 static void bnxt_dbg_dump_states(struct bnxt *bp)
11593 for (i = 0; i < bp->cp_nr_rings; i++) {
11594 bnapi = bp->bnapi[i];
11595 if (netif_msg_drv(bp)) {
11603 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11605 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11612 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11620 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11621 return hwrm_req_send_silent(bp, req);
11624 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11627 bnxt_dbg_dump_states(bp);
11628 if (netif_running(bp->dev)) {
11632 bnxt_close_nic(bp, false, false);
11633 bnxt_open_nic(bp, false, false);
11635 bnxt_ulp_stop(bp);
11636 bnxt_close_nic(bp, true, false);
11637 rc = bnxt_open_nic(bp, true, false);
11638 bnxt_ulp_start(bp, rc);
11645 struct bnxt *bp = netdev_priv(dev);
11647 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11648 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
11651 static void bnxt_fw_health_check(struct bnxt *bp)
11653 struct bnxt_fw_health *fw_health = bp->fw_health;
11654 struct pci_dev *pdev = bp->pdev;
11657 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11667 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11675 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11685 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
11690 struct bnxt *bp = from_timer(bp, t, timer);
11691 struct net_device *dev = bp->dev;
11693 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11696 if (atomic_read(&bp->intr_sem) != 0)
11699 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11700 bnxt_fw_health_check(bp);
11702 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
11703 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
11705 if (bnxt_tc_flower_enabled(bp))
11706 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
11709 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
11710 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
11713 if (bp->link_info.phy_retry) {
11714 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11715 bp->link_info.phy_retry = false;
11716 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11718 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
11722 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11723 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11725 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11727 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
11730 mod_timer(&bp->timer, jiffies + bp->current_interval);
11733 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11740 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11744 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11746 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11751 static void bnxt_reset(struct bnxt *bp, bool silent)
11753 bnxt_rtnl_lock_sp(bp);
11754 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11755 bnxt_reset_task(bp, silent);
11756 bnxt_rtnl_unlock_sp(bp);
11760 static void bnxt_rx_ring_reset(struct bnxt *bp)
11764 bnxt_rtnl_lock_sp(bp);
11765 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11766 bnxt_rtnl_unlock_sp(bp);
11770 if (bp->flags & BNXT_FLAG_TPA)
11771 bnxt_set_tpa(bp, false);
11772 for (i = 0; i < bp->rx_nr_rings; i++) {
11773 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11780 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11783 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11785 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11787 bnxt_reset_task(bp, true);
11790 bnxt_free_one_rx_ring_skbs(bp, i);
11796 bnxt_alloc_one_rx_ring(bp, i);
11799 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11800 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11801 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11803 if (bp->flags & BNXT_FLAG_TPA)
11804 bnxt_set_tpa(bp, true);
11805 bnxt_rtnl_unlock_sp(bp);
11808 static void bnxt_fw_reset_close(struct bnxt *bp)
11810 bnxt_ulp_stop(bp);
11815 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11818 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11820 bp->fw_reset_min_dsecs = 0;
11821 bnxt_tx_disable(bp);
11822 bnxt_disable_napi(bp);
11823 bnxt_disable_int_sync(bp);
11824 bnxt_free_irq(bp);
11825 bnxt_clear_int_mode(bp);
11826 pci_disable_device(bp->pdev);
11828 __bnxt_close_nic(bp, true, false);
11829 bnxt_vf_reps_free(bp);
11830 bnxt_clear_int_mode(bp);
11831 bnxt_hwrm_func_drv_unrgtr(bp);
11832 if (pci_is_enabled(bp->pdev))
11833 pci_disable_device(bp->pdev);
11834 bnxt_free_ctx_mem(bp);
11835 kfree(bp->ctx);
11836 bp->ctx = NULL;
11839 static bool is_bnxt_fw_ok(struct bnxt *bp)
11841 struct bnxt_fw_health *fw_health = bp->fw_health;
11845 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11849 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11860 static void bnxt_force_fw_reset(struct bnxt *bp)
11862 struct bnxt_fw_health *fw_health = bp->fw_health;
11863 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11866 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11867 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11872 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11875 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11877 bnxt_fw_reset_close(bp);
11882 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11884 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11886 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11889 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11890 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11891 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11894 void bnxt_fw_exception(struct bnxt *bp)
11896 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11897 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11898 bnxt_rtnl_lock_sp(bp);
11899 bnxt_force_fw_reset(bp);
11900 bnxt_rtnl_unlock_sp(bp);
11906 static int bnxt_get_registered_vfs(struct bnxt *bp)
11911 if (!BNXT_PF(bp))
11914 rc = bnxt_hwrm_func_qcfg(bp);
11916 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11919 if (bp->pf.registered_vfs)
11920 return bp->pf.registered_vfs;
11921 if (bp->sriov_cfg)
11927 void bnxt_fw_reset(struct bnxt *bp)
11929 bnxt_rtnl_lock_sp(bp);
11930 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11931 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11932 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11937 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11940 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11942 if (bp->pf.active_vfs &&
11943 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11944 n = bnxt_get_registered_vfs(bp);
11946 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11948 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11949 dev_close(bp->dev);
11954 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11955 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11956 bp->fw_reset_state =
11958 bnxt_queue_fw_reset_work(bp, HZ / 10);
11961 bnxt_fw_reset_close(bp);
11962 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11963 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11966 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11967 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11969 bnxt_queue_fw_reset_work(bp, tmo);
11972 bnxt_rtnl_unlock_sp(bp);
11975 static void bnxt_chk_missed_irq(struct bnxt *bp)
11979 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11982 for (i = 0; i < bp->cp_nr_rings; i++) {
11983 struct bnxt_napi *bnapi = bp->bnapi[i];
11997 !bnxt_has_work(bp, cpr2))
12005 bnxt_dbg_hwrm_ring_info_get(bp,
12015 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
12017 struct bnxt_link_info *link_info = &bp->link_info;
12021 if (bp->hwrm_spec_code >= 0x10201) {
12047 static void bnxt_fw_echo_reply(struct bnxt *bp)
12049 struct bnxt_fw_health *fw_health = bp->fw_health;
12053 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
12058 hwrm_req_send(bp, req);
12063 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
12065 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12067 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12068 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12072 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
12073 bnxt_cfg_rx_mode(bp);
12075 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12076 bnxt_cfg_ntp_filters(bp);
12077 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12078 bnxt_hwrm_exec_fwd_req(bp);
12079 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12080 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12081 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
12082 bnxt_hwrm_port_qstats(bp, 0);
12083 bnxt_hwrm_port_qstats_ext(bp, 0);
12084 bnxt_accumulate_all_stats(bp);
12087 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
12090 mutex_lock(&bp->link_lock);
12092 &bp->sp_event))
12093 bnxt_hwrm_phy_qcaps(bp);
12095 rc = bnxt_update_link(bp, true);
12097 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12101 &bp->sp_event))
12102 bnxt_init_ethtool_link_settings(bp);
12103 mutex_unlock(&bp->link_lock);
12105 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12108 mutex_lock(&bp->link_lock);
12109 rc = bnxt_update_phy_setting(bp);
12110 mutex_unlock(&bp->link_lock);
12112 netdev_warn(bp->dev, "update phy settings retry failed\n");
12114 bp->link_info.phy_retry = false;
12115 netdev_info(bp->dev, "update phy settings retry succeeded\n");
12118 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
12119 mutex_lock(&bp->link_lock);
12120 bnxt_get_port_module_status(bp);
12121 mutex_unlock(&bp->link_lock);
12124 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12125 bnxt_tc_flow_stats_work(bp);
12127 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12128 bnxt_chk_missed_irq(bp);
12130 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12131 bnxt_fw_echo_reply(bp);
12136 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12137 bnxt_reset(bp, false);
12139 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12140 bnxt_reset(bp, true);
12142 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12143 bnxt_rx_ring_reset(bp);
12145 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12146 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12147 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12148 bnxt_devlink_health_fw_report(bp);
12150 bnxt_fw_reset(bp);
12153 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12154 if (!is_bnxt_fw_ok(bp))
12155 bnxt_devlink_health_fw_report(bp);
12159 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12163 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12174 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12186 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
12189 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12193 if (BNXT_NEW_RM(bp)) {
12194 cp += bnxt_get_ulp_msix_num(bp);
12195 stats += bnxt_get_ulp_stat_ctxs(bp);
12197 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
12201 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12203 if (bp->bar2) {
12204 pci_iounmap(pdev, bp->bar2);
12205 bp->bar2 = NULL;
12208 if (bp->bar1) {
12209 pci_iounmap(pdev, bp->bar1);
12210 bp->bar1 = NULL;
12213 if (bp->bar0) {
12214 pci_iounmap(pdev, bp->bar0);
12215 bp->bar0 = NULL;
12219 static void bnxt_cleanup_pci(struct bnxt *bp)
12221 bnxt_unmap_bars(bp, bp->pdev);
12222 pci_release_regions(bp->pdev);
12223 if (pci_is_enabled(bp->pdev))
12224 pci_disable_device(bp->pdev);
12227 static void bnxt_init_dflt_coal(struct bnxt *bp)
12229 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
12240 coal = &bp->rx_coal;
12250 coal = &bp->tx_coal;
12258 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12261 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12265 bp->fw_cap = 0;
12266 rc = bnxt_hwrm_ver_get(bp);
12272 bnxt_try_map_fw_health_reg(bp);
12274 rc = bnxt_try_recover_fw(bp);
12277 rc = bnxt_hwrm_ver_get(bp);
12282 bnxt_nvm_cfg_ver_get(bp);
12284 rc = bnxt_hwrm_func_reset(bp);
12288 bnxt_hwrm_fw_set_time(bp);
12292 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12297 rc = bnxt_hwrm_func_qcaps(bp);
12299 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12304 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12306 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12309 if (bnxt_alloc_fw_health(bp)) {
12310 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12312 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12314 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12318 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12322 bnxt_hwrm_func_qcfg(bp);
12323 bnxt_hwrm_vnic_qcaps(bp);
12324 bnxt_hwrm_port_led_qcaps(bp);
12325 bnxt_ethtool_init(bp);
12326 if (bp->fw_cap & BNXT_FW_CAP_PTP)
12327 __bnxt_hwrm_ptp_qcfg(bp);
12328 bnxt_dcb_init(bp);
12332 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12334 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12335 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12339 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
12340 bp->rss_hash_delta = bp->rss_hash_cfg;
12341 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12342 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12343 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12348 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12350 struct net_device *dev = bp->dev;
12354 bp->flags &= ~BNXT_FLAG_RFS;
12355 if (bnxt_rfs_supported(bp)) {
12357 if (bnxt_rfs_capable(bp)) {
12358 bp->flags |= BNXT_FLAG_RFS;
12364 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12366 struct pci_dev *pdev = bp->pdev;
12368 bnxt_set_dflt_rss_hash_type(bp);
12369 bnxt_set_dflt_rfs(bp);
12371 bnxt_get_wol_settings(bp);
12372 if (bp->flags & BNXT_FLAG_WOL_CAP)
12373 device_set_wakeup_enable(&pdev->dev, bp->wol);
12377 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12378 bnxt_hwrm_coal_params_qcaps(bp);
12381 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12383 int bnxt_fw_init_one(struct bnxt *bp)
12387 rc = bnxt_fw_init_one_p1(bp);
12389 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12392 rc = bnxt_fw_init_one_p2(bp);
12394 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12397 rc = bnxt_probe_phy(bp, false);
12400 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12404 bnxt_fw_init_one_p3(bp);
12408 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12410 struct bnxt_fw_health *fw_health = bp->fw_health;
12420 pci_write_config_dword(bp->pdev, reg_off, val);
12424 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12428 writel(val, bp->bar0 + reg_off);
12431 writel(val, bp->bar1 + reg_off);
12435 pci_read_config_dword(bp->pdev, 0, &val);
12440 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12446 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12449 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12453 resp = hwrm_req_hold(bp, req);
12454 if (!hwrm_req_send(bp, req))
12457 hwrm_req_drop(bp, req);
12461 static void bnxt_reset_all(struct bnxt *bp)
12463 struct bnxt_fw_health *fw_health = bp->fw_health;
12466 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12467 bnxt_fw_reset_via_optee(bp);
12468 bp->fw_reset_timestamp = jiffies;
12474 bnxt_fw_reset_writel(bp, i);
12478 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12484 rc = hwrm_req_send(bp, req);
12487 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12489 bp->fw_reset_timestamp = jiffies;
12492 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12494 return time_after(jiffies, bp->fw_reset_timestamp +
12495 (bp->fw_reset_max_dsecs * HZ / 10));
12498 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12500 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12501 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12502 bnxt_ulp_start(bp, rc);
12503 bnxt_dl_health_fw_status_update(bp, false);
12505 bp->fw_reset_state = 0;
12506 dev_close(bp->dev);
12511 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12514 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12515 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12519 switch (bp->fw_reset_state) {
12521 int n = bnxt_get_registered_vfs(bp);
12525 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12527 bp->fw_reset_timestamp));
12530 if (bnxt_fw_reset_timeout(bp)) {
12531 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12532 bp->fw_reset_state = 0;
12533 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12537 bnxt_queue_fw_reset_work(bp, HZ / 10);
12540 bp->fw_reset_timestamp = jiffies;
12542 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12543 bnxt_fw_reset_abort(bp, rc);
12547 bnxt_fw_reset_close(bp);
12548 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12549 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12552 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12553 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12556 bnxt_queue_fw_reset_work(bp, tmo);
12562 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12564 !bnxt_fw_reset_timeout(bp)) {
12565 bnxt_queue_fw_reset_work(bp, HZ / 5);
12569 if (!bp->fw_health->primary) {
12570 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12572 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12573 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12576 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12580 bnxt_reset_all(bp);
12581 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12582 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12585 bnxt_inv_fw_health_reg(bp);
12586 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12587 !bp->fw_reset_min_dsecs) {
12590 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12592 if (bnxt_fw_reset_timeout(bp)) {
12593 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12597 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12601 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12602 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12603 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12604 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12605 bnxt_dl_remote_reload(bp);
12606 if (pci_enable_device(bp->pdev)) {
12607 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12611 pci_set_master(bp->pdev);
12612 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12615 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12616 rc = bnxt_hwrm_poll(bp);
12618 if (bnxt_fw_reset_timeout(bp)) {
12619 netdev_err(bp->dev, "Firmware reset aborted\n");
12622 bnxt_queue_fw_reset_work(bp, HZ / 5);
12625 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12626 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12630 bnxt_queue_fw_reset_work(bp, HZ / 10);
12633 rc = bnxt_open(bp->dev);
12635 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12636 bnxt_fw_reset_abort(bp, rc);
12641 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12642 bp->fw_health->enabled) {
12643 bp->fw_health->last_fw_reset_cnt =
12644 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12646 bp->fw_reset_state = 0;
12649 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12650 bnxt_ulp_start(bp, 0);
12651 bnxt_reenable_sriov(bp);
12652 bnxt_vf_reps_alloc(bp);
12653 bnxt_vf_reps_open(bp);
12654 bnxt_ptp_reapply_pps(bp);
12655 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12656 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12657 bnxt_dl_health_fw_recovery_done(bp);
12658 bnxt_dl_health_fw_status_update(bp, true);
12666 if (bp->fw_health->status_reliable ||
12667 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12668 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12670 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12674 bnxt_fw_reset_abort(bp, rc);
12681 struct bnxt *bp = netdev_priv(dev);
12714 bp->dev = dev;
12715 bp->pdev = pdev;
12717 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12720 bp->bar0 = pci_ioremap_bar(pdev, 0);
12721 if (!bp->bar0) {
12727 bp->bar2 = pci_ioremap_bar(pdev, 4);
12728 if (!bp->bar2) {
12734 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12735 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12737 spin_lock_init(&bp->ntp_fltr_lock);
12739 spin_lock_init(&bp->db_lock);
12742 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12743 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12745 timer_setup(&bp->timer, bnxt_timer, 0);
12746 bp->current_interval = BNXT_TIMER_INTERVAL;
12748 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12749 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12751 clear_bit(BNXT_STATE_OPEN, &bp->state);
12755 bnxt_unmap_bars(bp, pdev);
12769 struct bnxt *bp = netdev_priv(dev);
12778 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12784 bnxt_close_nic(bp, false, false);
12785 rc = bnxt_open_nic(bp, false, false);
12794 struct bnxt *bp = netdev_priv(dev);
12797 bnxt_close_nic(bp, true, false);
12800 bnxt_set_ring_params(bp);
12803 return bnxt_open_nic(bp, true, false);
12810 struct bnxt *bp = netdev_priv(dev);
12814 if (tc > bp->max_tc) {
12816 tc, bp->max_tc);
12823 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12826 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12827 sh, tc, bp->tx_nr_rings_xdp);
12832 if (netif_running(bp->dev))
12833 bnxt_close_nic(bp, true, false);
12836 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12839 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12842 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12843 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12844 bp->tx_nr_rings + bp->rx_nr_rings;
12846 if (netif_running(bp->dev))
12847 return bnxt_open_nic(bp, true, false);
12855 struct bnxt *bp = cb_priv;
12857 if (!bnxt_tc_flower_enabled(bp) ||
12858 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12863 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12874 struct bnxt *bp = netdev_priv(dev);
12881 bp, bp, true);
12929 struct bnxt *bp = netdev_priv(dev);
12938 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12971 bp->hwrm_spec_code < 0x10601) {
12977 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12986 head = &bp->ntp_fltr_hash_tbl[idx];
12997 spin_lock_bh(&bp->ntp_fltr_lock);
12998 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
13001 spin_unlock_bh(&bp->ntp_fltr_lock);
13011 bp->ntp_fltr_count++;
13012 spin_unlock_bh(&bp->ntp_fltr_lock);
13014 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
13023 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13033 head = &bp->ntp_fltr_hash_tbl[i];
13038 if (rps_may_expire_flow(bp->dev, fltr->rxq,
13041 bnxt_hwrm_cfa_ntuple_filter_free(bp,
13046 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
13055 spin_lock_bh(&bp->ntp_fltr_lock);
13057 bp->ntp_fltr_count--;
13058 spin_unlock_bh(&bp->ntp_fltr_lock);
13060 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
13069 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13078 struct bnxt *bp = netdev_priv(netdev);
13086 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
13092 struct bnxt *bp = netdev_priv(netdev);
13100 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
13118 struct bnxt *bp = netdev_priv(dev);
13120 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13127 struct bnxt *bp = netdev_priv(dev);
13131 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13145 if (mode == bp->br_mode)
13148 rc = bnxt_hwrm_set_br_mode(bp, mode);
13150 bp->br_mode = mode;
13159 struct bnxt *bp = netdev_priv(dev);
13161 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13165 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
13168 ppid->id_len = sizeof(bp->dsn);
13169 memcpy(ppid->id, bp->dsn, ppid->id_len);
13210 struct bnxt *bp = netdev_priv(dev);
13212 if (BNXT_PF(bp))
13213 bnxt_sriov_disable(bp);
13215 bnxt_rdma_aux_device_uninit(bp);
13217 bnxt_ptp_clear(bp);
13219 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13221 cancel_work_sync(&bp->sp_task);
13222 cancel_delayed_work_sync(&bp->fw_reset_task);
13223 bp->sp_event = 0;
13225 bnxt_dl_fw_reporters_destroy(bp);
13226 bnxt_dl_unregister(bp);
13227 bnxt_shutdown_tc(bp);
13229 bnxt_clear_int_mode(bp);
13230 bnxt_hwrm_func_drv_unrgtr(bp);
13231 bnxt_free_hwrm_resources(bp);
13232 bnxt_ethtool_free(bp);
13233 bnxt_dcb_free(bp);
13234 kfree(bp->ptp_cfg);
13235 bp->ptp_cfg = NULL;
13236 kfree(bp->fw_health);
13237 bp->fw_health = NULL;
13238 bnxt_cleanup_pci(bp);
13239 bnxt_free_ctx_mem(bp);
13240 kfree(bp->ctx);
13241 bp->ctx = NULL;
13242 kfree(bp->rss_indir_tbl);
13243 bp->rss_indir_tbl = NULL;
13244 bnxt_free_port_stats(bp);
13248 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13251 struct bnxt_link_info *link_info = &bp->link_info;
13253 bp->phy_flags = 0;
13254 rc = bnxt_hwrm_phy_qcaps(bp);
13256 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13260 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13261 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13263 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13267 mutex_lock(&bp->link_lock);
13268 rc = bnxt_update_link(bp, false);
13270 mutex_unlock(&bp->link_lock);
13271 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13282 bnxt_init_ethtool_link_settings(bp);
13283 mutex_unlock(&bp->link_lock);
13298 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13301 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13306 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13307 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13308 bnxt_get_ulp_msix_num(bp),
13309 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13310 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13313 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13317 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13319 if (bp->flags & BNXT_FLAG_CHIP_P5) {
13320 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13327 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13331 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13337 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13340 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13345 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13346 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13348 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13349 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13352 bp->flags |= BNXT_FLAG_AGG_RINGS;
13355 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13356 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13357 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13358 bnxt_set_ring_params(bp);
13361 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13365 max_cp = bnxt_get_max_func_cp_rings(bp);
13366 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13367 max_irq = bnxt_get_max_func_irqs(bp);
13378 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13388 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13390 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13391 bp->rx_nr_rings = bp->cp_nr_rings;
13392 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13393 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13396 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13400 if (!bnxt_can_reserve_rings(bp))
13404 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13409 if (bp->port_count > 1) {
13411 max_t(int, num_online_cpus() / bp->port_count, 1);
13415 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13418 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13419 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13421 bnxt_trim_dflt_sh_rings(bp);
13423 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13424 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13426 rc = __bnxt_reserve_rings(bp);
13428 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13429 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13431 bnxt_trim_dflt_sh_rings(bp);
13434 if (bnxt_need_reserve_rings(bp)) {
13435 rc = __bnxt_reserve_rings(bp);
13437 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13438 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13440 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13441 bp->rx_nr_rings++;
13442 bp->cp_nr_rings++;
13445 bp->tx_nr_rings = 0;
13446 bp->rx_nr_rings = 0;
13451 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13455 if (bp->tx_nr_rings)
13458 bnxt_ulp_irq_stop(bp);
13459 bnxt_clear_int_mode(bp);
13460 rc = bnxt_set_dflt_rings(bp, true);
13462 if (BNXT_VF(bp) && rc == -ENODEV)
13463 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13465 netdev_err(bp->dev, "Not enough rings available.\n");
13468 rc = bnxt_init_int_mode(bp);
13472 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13474 bnxt_set_dflt_rfs(bp);
13477 bnxt_ulp_irq_restart(bp, rc);
13481 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13486 bnxt_hwrm_func_qcaps(bp);
13488 if (netif_running(bp->dev))
13489 __bnxt_close_nic(bp, true, false);
13491 bnxt_ulp_irq_stop(bp);
13492 bnxt_clear_int_mode(bp);
13493 rc = bnxt_init_int_mode(bp);
13494 bnxt_ulp_irq_restart(bp, rc);
13496 if (netif_running(bp->dev)) {
13498 dev_close(bp->dev);
13500 rc = bnxt_open_nic(bp, true, false);
13506 static int bnxt_init_mac_addr(struct bnxt *bp)
13510 if (BNXT_PF(bp)) {
13511 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13514 struct bnxt_vf_info *vf = &bp->vf;
13519 eth_hw_addr_set(bp->dev, vf->mac_addr);
13525 eth_hw_addr_random(bp->dev);
13527 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13533 static void bnxt_vpd_read_info(struct bnxt *bp)
13535 struct pci_dev *pdev = bp->pdev;
13552 memcpy(bp->board_partno, &vpd_data[pos], size);
13562 memcpy(bp->board_serialno, &vpd_data[pos], size);
13567 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13569 struct pci_dev *pdev = bp->pdev;
13574 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13580 bp->flags |= BNXT_FLAG_DSN_VALID;
13584 static int bnxt_map_db_bar(struct bnxt *bp)
13586 if (!bp->db_size)
13588 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13589 if (!bp->bar1)
13594 void bnxt_print_device_info(struct bnxt *bp)
13596 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13597 board_info[bp->board_idx].name,
13598 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13600 pcie_print_link_status(bp->pdev);
13606 struct bnxt *bp;
13621 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13625 bp = netdev_priv(dev);
13626 bp->board_idx = ent->driver_data;
13627 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13628 bnxt_set_max_func_irqs(bp, max_irqs);
13630 if (bnxt_vf_pciid(bp->board_idx))
13631 bp->flags |= BNXT_FLAG_VF;
13634 if (BNXT_PF(bp))
13635 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
13638 bp->flags |= BNXT_FLAG_MSIX_CAP;
13649 rc = bnxt_alloc_hwrm_resources(bp);
13653 mutex_init(&bp->hwrm_cmd_lock);
13654 mutex_init(&bp->link_lock);
13656 rc = bnxt_fw_init_one_p1(bp);
13660 if (BNXT_PF(bp))
13661 bnxt_vpd_read_info(bp);
13663 if (BNXT_CHIP_P5(bp)) {
13664 bp->flags |= BNXT_FLAG_CHIP_P5;
13665 if (BNXT_CHIP_SR2(bp))
13666 bp->flags |= BNXT_FLAG_CHIP_SR2;
13669 rc = bnxt_alloc_rss_indir_tbl(bp);
13673 rc = bnxt_fw_init_one_p2(bp);
13677 rc = bnxt_map_db_bar(bp);
13692 if (BNXT_SUPPORTS_TPA(bp))
13706 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13708 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13710 if (BNXT_SUPPORTS_TPA(bp))
13723 init_waitqueue_head(&bp->sriov_cfg_wait);
13725 if (BNXT_SUPPORTS_TPA(bp)) {
13726 bp->gro_func = bnxt_gro_func_5730x;
13727 if (BNXT_CHIP_P4(bp))
13728 bp->gro_func = bnxt_gro_func_5731x;
13729 else if (BNXT_CHIP_P5(bp))
13730 bp->gro_func = bnxt_gro_func_5750x;
13732 if (!BNXT_CHIP_P4_PLUS(bp))
13733 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13735 rc = bnxt_init_mac_addr(bp);
13742 if (BNXT_PF(bp)) {
13744 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13749 dev->max_mtu = bp->max_mtu;
13751 rc = bnxt_probe_phy(bp, true);
13755 bnxt_set_rx_skb_mode(bp, false);
13756 bnxt_set_tpa_flags(bp);
13757 bnxt_set_ring_params(bp);
13758 rc = bnxt_set_dflt_rings(bp, true);
13760 if (BNXT_VF(bp) && rc == -ENODEV) {
13761 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13763 netdev_err(bp->dev, "Not enough rings available.\n");
13769 bnxt_fw_init_one_p3(bp);
13771 bnxt_init_dflt_coal(bp);
13774 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13776 rc = bnxt_init_int_mode(bp);
13783 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13785 if (BNXT_PF(bp)) {
13795 rc = bnxt_init_tc(bp);
13801 bnxt_inv_fw_health_reg(bp);
13802 rc = bnxt_dl_register(bp);
13810 bnxt_dl_fw_reporters_create(bp);
13812 bnxt_rdma_aux_device_init(bp);
13814 bnxt_print_device_info(bp);
13820 bnxt_dl_unregister(bp);
13822 bnxt_shutdown_tc(bp);
13823 bnxt_clear_int_mode(bp);
13826 bnxt_hwrm_func_drv_unrgtr(bp);
13827 bnxt_free_hwrm_resources(bp);
13828 bnxt_ethtool_free(bp);
13829 bnxt_ptp_clear(bp);
13830 kfree(bp->ptp_cfg);
13831 bp->ptp_cfg = NULL;
13832 kfree(bp->fw_health);
13833 bp->fw_health = NULL;
13834 bnxt_cleanup_pci(bp);
13835 bnxt_free_ctx_mem(bp);
13836 kfree(bp->ctx);
13837 bp->ctx = NULL;
13838 kfree(bp->rss_indir_tbl);
13839 bp->rss_indir_tbl = NULL;
13849 struct bnxt *bp;
13855 bp = netdev_priv(dev);
13856 if (!bp)
13862 bnxt_clear_int_mode(bp);
13866 pci_wake_from_d3(pdev, bp->wol);
13878 struct bnxt *bp = netdev_priv(dev);
13882 bnxt_ulp_stop(bp);
13887 bnxt_hwrm_func_drv_unrgtr(bp);
13888 pci_disable_device(bp->pdev);
13889 bnxt_free_ctx_mem(bp);
13890 kfree(bp->ctx);
13891 bp->ctx = NULL;
13899 struct bnxt *bp = netdev_priv(dev);
13903 rc = pci_enable_device(bp->pdev);
13909 pci_set_master(bp->pdev);
13910 if (bnxt_hwrm_ver_get(bp)) {
13914 rc = bnxt_hwrm_func_reset(bp);
13920 rc = bnxt_hwrm_func_qcaps(bp);
13924 bnxt_clear_reservations(bp, true);
13926 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13931 bnxt_get_wol_settings(bp);
13939 bnxt_ulp_start(bp, rc);
13941 bnxt_reenable_sriov(bp);
13967 struct bnxt *bp = netdev_priv(netdev);
13974 bnxt_ulp_stop(bp);
13982 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13989 bnxt_free_ctx_mem(bp);
13990 kfree(bp->ctx);
13991 bp->ctx = NULL;
14011 struct bnxt *bp = netdev_priv(netdev);
14016 netdev_info(bp->dev, "PCI Slot Reset\n");
14034 &bp->state)) {
14037 pci_write_config_dword(bp->pdev, off, 0);
14042 bnxt_inv_fw_health_reg(bp);
14043 bnxt_try_map_fw_health_reg(bp);
14049 err = bnxt_try_recover_fw(bp);
14060 err = bnxt_hwrm_func_reset(bp);
14064 bnxt_ulp_irq_stop(bp);
14065 bnxt_clear_int_mode(bp);
14066 err = bnxt_init_int_mode(bp);
14067 bnxt_ulp_irq_restart(bp, err);
14071 bnxt_clear_reservations(bp, true);
14087 struct bnxt *bp = netdev_priv(netdev);
14090 netdev_info(bp->dev, "PCI Slot Resume\n");
14093 err = bnxt_hwrm_func_qcaps(bp);
14097 bnxt_ulp_start(bp, err);
14099 bnxt_reenable_sriov(bp);