Lines Matching refs:bp
308 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
310 if (bp->flags & BNXT_FLAG_CHIP_P5)
316 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
318 if (bp->flags & BNXT_FLAG_CHIP_P5)
324 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
326 if (bp->flags & BNXT_FLAG_CHIP_P5)
365 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
368 bnxt_db_write(bp, &txr->tx_db, prod);
372 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
384 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
394 struct bnxt *bp = netdev_priv(dev);
403 struct pci_dev *pdev = bp->pdev;
408 if (unlikely(i >= bp->tx_nr_rings)) {
415 txr = &bp->tx_ring[bp->tx_ring_map[i]];
418 free_size = bnxt_tx_avail(bp, txr);
422 netif_warn(bp, tx_err, dev,
424 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
452 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
621 bnxt_txr_db_kick(bp, txr, prod);
627 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
629 bnxt_txr_db_kick(bp, txr, prod);
631 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
658 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
664 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
667 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
669 struct pci_dev *pdev = bp->pdev;
720 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
725 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
729 struct device *dev = &bp->pdev->dev;
736 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
742 *mapping += bp->rx_dma_offset;
746 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
750 struct pci_dev *pdev = bp->pdev;
752 data = kmalloc(bp->rx_buf_size, gfp);
756 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
757 bp->rx_buf_use_size, bp->rx_dir,
767 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
774 if (BNXT_RX_PAGE_MODE(bp)) {
776 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
782 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
784 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
790 rx_buf->data_ptr = data + bp->rx_offset;
828 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
835 struct pci_dev *pdev = bp->pdev;
885 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
897 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
910 struct bnxt *bp = bnapi->bp;
917 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
928 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
930 agg = bnxt_get_agg(bp, cpr, idx, start + i);
963 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
977 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
982 dma_addr -= bp->rx_dma_offset;
983 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
988 payload = eth_get_headlen(bp->dev, data_ptr, len);
1010 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1020 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1027 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1028 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1034 skb_reserve(skb, bp->rx_offset);
1039 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1045 struct pci_dev *pdev = bp->pdev;
1051 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1062 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1064 agg = bnxt_get_agg(bp, cpr, idx, i);
1082 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1116 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1133 struct bnxt *bp = bnapi->bp;
1134 struct pci_dev *pdev = bp->pdev;
1141 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1142 bp->rx_dir);
1147 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1148 bp->rx_dir);
1154 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1170 if (bp->flags & BNXT_FLAG_CHIP_P5)
1177 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1184 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1186 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1189 if (BNXT_PF(bp))
1190 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1192 schedule_delayed_work(&bp->fw_reset_task, delay);
1195 static void bnxt_queue_sp_work(struct bnxt *bp)
1197 if (BNXT_PF(bp))
1198 queue_work(bnxt_pf_wq, &bp->sp_task);
1200 schedule_work(&bp->sp_task);
1203 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1207 if (bp->flags & BNXT_FLAG_CHIP_P5)
1208 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1210 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1211 bnxt_queue_sp_work(bp);
1243 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1253 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1267 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1270 bnxt_sched_reset(bp, rxr);
1308 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1498 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1516 if (bp->flags & BNXT_FLAG_CHIP_P5)
1520 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1530 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1532 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1535 return dev ? dev : bp->dev;
1538 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1557 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1564 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1570 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1578 gro = !!(bp->flags & BNXT_FLAG_GRO);
1585 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1602 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1607 if (len <= bp->rx_copy_thresh) {
1617 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1624 tpa_info->data_ptr = new_data + bp->rx_offset;
1628 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1629 bp->rx_buf_use_size, bp->rx_dir,
1637 skb_reserve(skb, bp->rx_offset);
1642 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1650 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1677 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1682 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1694 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1697 if (skb->dev != bp->dev) {
1699 bnxt_vf_rep_rx(bp, skb);
1713 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1718 struct net_device *dev = bp->dev;
1738 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1757 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1764 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1773 bnxt_deliver_skb(bp, bnapi, skb);
1782 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1786 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1788 bnxt_sched_reset(bp, rxr);
1802 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1822 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1823 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1824 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1826 bnxt_sched_reset(bp, rxr);
1835 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1840 if (len <= bp->rx_copy_thresh) {
1857 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1866 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1884 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1915 bnxt_deliver_skb(bp, bnapi, skb);
1935 static int bnxt_force_rx_discard(struct bnxt *bp,
1972 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1975 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1977 struct bnxt_fw_health *fw_health = bp->fw_health;
1985 pci_read_config_dword(bp->pdev, reg_off, &val);
1991 val = readl(bp->bar0 + reg_off);
1994 val = readl(bp->bar1 + reg_off);
2002 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2006 for (i = 0; i < bp->rx_nr_rings; i++) {
2007 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2010 grp_info = &bp->grp_info[grp_idx];
2029 static int bnxt_async_event_process(struct bnxt *bp,
2039 struct bnxt_link_info *link_info = &bp->link_info;
2041 if (BNXT_VF(bp))
2051 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2054 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2059 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2062 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2065 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2070 if (BNXT_VF(bp))
2073 if (bp->pf.port_id != port_id)
2076 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2080 if (BNXT_PF(bp))
2082 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2087 if (!bp->fw_health)
2090 bp->fw_reset_timestamp = jiffies;
2091 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2092 if (!bp->fw_reset_min_dsecs)
2093 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2094 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2095 if (!bp->fw_reset_max_dsecs)
2096 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2099 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2101 netif_warn(bp, hw, bp->dev,
2104 bp->fw_reset_min_dsecs * 100,
2105 bp->fw_reset_max_dsecs * 100);
2106 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2110 struct bnxt_fw_health *fw_health = bp->fw_health;
2117 netif_info(bp, drv, bp->dev,
2124 bp->current_interval * 10);
2128 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2130 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2131 netif_info(bp, drv, bp->dev,
2134 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2145 netif_notice(bp, hw, bp->dev,
2153 if (bp->flags & BNXT_FLAG_CHIP_P5)
2156 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2161 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2163 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2167 rxr = bp->bnapi[grp_idx]->rx_ring;
2168 bnxt_sched_reset(bp, rxr);
2174 bnxt_queue_sp_work(bp);
2176 bnxt_ulp_async_events(bp, cmpl);
2180 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2190 if (seq_id == bp->hwrm_intr_seq_id)
2191 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2193 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2199 if ((vf_id < bp->pf.first_vf_id) ||
2200 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2201 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2206 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2207 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2208 bnxt_queue_sp_work(bp);
2212 bnxt_async_event_process(bp,
2225 struct bnxt *bp = bnapi->bp;
2235 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2249 struct bnxt *bp = bnapi->bp;
2256 if (!bnxt_has_work(bp, cpr)) {
2257 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2267 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2274 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2303 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2312 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2314 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2333 bnxt_hwrm_handler(bp, txcmp);
2353 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2362 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2365 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2373 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2374 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2379 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2385 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2391 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2393 __bnxt_poll_work_done(bp, bnapi);
2400 struct bnxt *bp = bnapi->bp;
2437 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2446 bnxt_hwrm_handler(bp, txcmp);
2448 netdev_err(bp->dev,
2459 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2462 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2466 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2476 struct bnxt *bp = bnapi->bp;
2481 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2489 if (!bnxt_has_work(bp, cpr)) {
2495 if (bp->flags & BNXT_FLAG_DIM) {
2507 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2516 work_done += __bnxt_poll_work(bp, cpr2,
2524 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2541 __bnxt_poll_work_done(bp, bnapi);
2549 struct bnxt *bp = bnapi->bp;
2556 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2566 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2588 work_done += __bnxt_poll_work(bp, cpr2,
2592 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2596 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2604 static void bnxt_free_tx_skbs(struct bnxt *bp)
2607 struct pci_dev *pdev = bp->pdev;
2609 if (!bp->tx_ring)
2612 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2613 for (i = 0; i < bp->tx_nr_rings; i++) {
2614 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2625 if (i < bp->tx_nr_rings_xdp &&
2660 int ring_idx = j & bp->tx_ring_mask;
2671 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2675 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2677 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2678 struct pci_dev *pdev = bp->pdev;
2682 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2683 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2687 for (i = 0; i < bp->max_tpa; i++) {
2695 bp->rx_buf_use_size, bp->rx_dir,
2716 if (BNXT_RX_PAGE_MODE(bp)) {
2717 mapping -= bp->rx_dma_offset;
2719 bp->rx_dir,
2724 bp->rx_buf_use_size, bp->rx_dir,
2761 static void bnxt_free_rx_skbs(struct bnxt *bp)
2765 if (!bp->rx_ring)
2768 for (i = 0; i < bp->rx_nr_rings; i++)
2769 bnxt_free_one_rx_ring_skbs(bp, i);
2772 static void bnxt_free_skbs(struct bnxt *bp)
2774 bnxt_free_tx_skbs(bp);
2775 bnxt_free_rx_skbs(bp);
2778 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2780 struct pci_dev *pdev = bp->pdev;
2807 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2809 struct pci_dev *pdev = bp->pdev;
2860 static void bnxt_free_tpa_info(struct bnxt *bp)
2864 for (i = 0; i < bp->rx_nr_rings; i++) {
2865 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2870 for (j = 0; j < bp->max_tpa; j++) {
2880 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2884 bp->max_tpa = MAX_TPA;
2885 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2886 if (!bp->max_tpa_v2)
2888 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2891 for (i = 0; i < bp->rx_nr_rings; i++) {
2892 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2895 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2900 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2902 for (j = 0; j < bp->max_tpa; j++) {
2916 static void bnxt_free_rx_rings(struct bnxt *bp)
2920 if (!bp->rx_ring)
2923 bnxt_free_tpa_info(bp);
2924 for (i = 0; i < bp->rx_nr_rings; i++) {
2925 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2941 bnxt_free_ring(bp, &ring->ring_mem);
2944 bnxt_free_ring(bp, &ring->ring_mem);
2948 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2953 pp.pool_size = bp->rx_ring_size;
2954 pp.nid = dev_to_node(&bp->pdev->dev);
2955 pp.dev = &bp->pdev->dev;
2968 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2972 if (!bp->rx_ring)
2975 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2978 for (i = 0; i < bp->rx_nr_rings; i++) {
2979 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2984 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2988 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
3000 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3009 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3014 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3021 if (bp->flags & BNXT_FLAG_TPA)
3022 rc = bnxt_alloc_tpa_info(bp);
3026 static void bnxt_free_tx_rings(struct bnxt *bp)
3029 struct pci_dev *pdev = bp->pdev;
3031 if (!bp->tx_ring)
3034 for (i = 0; i < bp->tx_nr_rings; i++) {
3035 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3039 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3046 bnxt_free_ring(bp, &ring->ring_mem);
3050 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3053 struct pci_dev *pdev = bp->pdev;
3055 bp->tx_push_size = 0;
3056 if (bp->tx_push_thresh) {
3060 bp->tx_push_thresh);
3064 bp->tx_push_thresh = 0;
3067 bp->tx_push_size = push_size;
3070 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3071 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3077 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3082 if (bp->tx_push_size) {
3089 bp->tx_push_size,
3100 qidx = bp->tc_to_qidx[j];
3101 ring->queue_id = bp->q_info[qidx].queue_id;
3102 if (i < bp->tx_nr_rings_xdp)
3104 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3110 static void bnxt_free_cp_rings(struct bnxt *bp)
3114 if (!bp->bnapi)
3117 for (i = 0; i < bp->cp_nr_rings; i++) {
3118 struct bnxt_napi *bnapi = bp->bnapi[i];
3129 bnxt_free_ring(bp, &ring->ring_mem);
3136 bnxt_free_ring(bp, &ring->ring_mem);
3144 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3157 rmem->nr_pages = bp->cp_nr_pages;
3162 rc = bnxt_alloc_ring(bp, rmem);
3164 bnxt_free_ring(bp, rmem);
3171 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3173 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3176 ulp_msix = bnxt_get_ulp_msix_num(bp);
3177 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3178 for (i = 0; i < bp->cp_nr_rings; i++) {
3179 struct bnxt_napi *bnapi = bp->bnapi[i];
3190 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3199 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3202 if (i < bp->rx_nr_rings) {
3204 bnxt_alloc_cp_sub_ring(bp);
3211 if ((sh && i < bp->tx_nr_rings) ||
3212 (!sh && i >= bp->rx_nr_rings)) {
3214 bnxt_alloc_cp_sub_ring(bp);
3225 static void bnxt_init_ring_struct(struct bnxt *bp)
3229 for (i = 0; i < bp->cp_nr_rings; i++) {
3230 struct bnxt_napi *bnapi = bp->bnapi[i];
3243 rmem->nr_pages = bp->cp_nr_pages;
3255 rmem->nr_pages = bp->rx_nr_pages;
3259 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3264 rmem->nr_pages = bp->rx_agg_nr_pages;
3268 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3278 rmem->nr_pages = bp->tx_nr_pages;
3282 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3309 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3311 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3312 struct net_device *dev = bp->dev;
3317 for (i = 0; i < bp->rx_ring_size; i++) {
3318 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3320 ring_nr, i, bp->rx_ring_size);
3327 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3331 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3332 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3334 ring_nr, i, bp->rx_ring_size);
3345 for (i = 0; i < bp->max_tpa; i++) {
3346 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3351 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3358 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3364 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3370 rxr = &bp->rx_ring[ring_nr];
3374 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3375 bpf_prog_add(bp->xdp_prog, 1);
3376 rxr->xdp_prog = bp->xdp_prog;
3383 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3390 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3393 static void bnxt_init_cp_rings(struct bnxt *bp)
3397 for (i = 0; i < bp->cp_nr_rings; i++) {
3398 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3402 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3403 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3412 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3413 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3418 static int bnxt_init_rx_rings(struct bnxt *bp)
3422 if (BNXT_RX_PAGE_MODE(bp)) {
3423 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3424 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3426 bp->rx_offset = BNXT_RX_OFFSET;
3427 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3430 for (i = 0; i < bp->rx_nr_rings; i++) {
3431 rc = bnxt_init_one_rx_ring(bp, i);
3439 static int bnxt_init_tx_rings(struct bnxt *bp)
3443 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3446 for (i = 0; i < bp->tx_nr_rings; i++) {
3447 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3456 static void bnxt_free_ring_grps(struct bnxt *bp)
3458 kfree(bp->grp_info);
3459 bp->grp_info = NULL;
3462 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3467 bp->grp_info = kcalloc(bp->cp_nr_rings,
3470 if (!bp->grp_info)
3473 for (i = 0; i < bp->cp_nr_rings; i++) {
3475 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3476 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3477 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3478 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3479 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3484 static void bnxt_free_vnics(struct bnxt *bp)
3486 kfree(bp->vnic_info);
3487 bp->vnic_info = NULL;
3488 bp->nr_vnics = 0;
3491 static int bnxt_alloc_vnics(struct bnxt *bp)
3496 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3497 num_vnics += bp->rx_nr_rings;
3500 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3503 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3505 if (!bp->vnic_info)
3508 bp->nr_vnics = num_vnics;
3512 static void bnxt_init_vnics(struct bnxt *bp)
3516 for (i = 0; i < bp->nr_vnics; i++) {
3517 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3526 if (bp->vnic_info[i].rss_hash_key) {
3532 bp->vnic_info[0].rss_hash_key,
3555 void bnxt_set_tpa_flags(struct bnxt *bp)
3557 bp->flags &= ~BNXT_FLAG_TPA;
3558 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3560 if (bp->dev->features & NETIF_F_LRO)
3561 bp->flags |= BNXT_FLAG_LRO;
3562 else if (bp->dev->features & NETIF_F_GRO_HW)
3563 bp->flags |= BNXT_FLAG_GRO;
3566 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3569 void bnxt_set_ring_params(struct bnxt *bp)
3575 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3580 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3581 ring_size = bp->rx_ring_size;
3582 bp->rx_agg_ring_size = 0;
3583 bp->rx_agg_nr_pages = 0;
3585 if (bp->flags & BNXT_FLAG_TPA)
3588 bp->flags &= ~BNXT_FLAG_JUMBO;
3589 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3592 bp->flags |= BNXT_FLAG_JUMBO;
3593 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3600 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3602 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3605 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3607 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3610 bp->rx_agg_ring_size = agg_ring_size;
3611 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3617 bp->rx_buf_use_size = rx_size;
3618 bp->rx_buf_size = rx_space;
3620 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3621 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3623 ring_size = bp->tx_ring_size;
3624 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3625 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3627 max_rx_cmpl = bp->rx_ring_size;
3632 if (bp->flags & BNXT_FLAG_TPA)
3633 max_rx_cmpl += bp->max_tpa;
3635 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3636 bp->cp_ring_size = ring_size;
3638 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3639 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3640 bp->cp_nr_pages = MAX_CP_PAGES;
3641 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3642 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3643 ring_size, bp->cp_ring_size);
3645 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3646 bp->cp_ring_mask = bp->cp_bit - 1;
3652 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3655 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3657 bp->dev->max_mtu =
3658 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3659 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3660 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3661 bp->rx_dir = DMA_BIDIRECTIONAL;
3662 bp->rx_skb_func = bnxt_rx_page_skb;
3664 netdev_update_features(bp->dev);
3666 bp->dev->max_mtu = bp->max_mtu;
3667 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3668 bp->rx_dir = DMA_FROM_DEVICE;
3669 bp->rx_skb_func = bnxt_rx_skb;
3674 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3678 struct pci_dev *pdev = bp->pdev;
3680 if (!bp->vnic_info)
3683 for (i = 0; i < bp->nr_vnics; i++) {
3684 vnic = &bp->vnic_info[i];
3710 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3714 struct pci_dev *pdev = bp->pdev;
3717 for (i = 0; i < bp->nr_vnics; i++) {
3718 vnic = &bp->vnic_info[i];
3745 if (bp->flags & BNXT_FLAG_CHIP_P5)
3749 max_rings = bp->rx_nr_rings;
3759 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3765 if (bp->flags & BNXT_FLAG_CHIP_P5)
3787 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3789 struct pci_dev *pdev = bp->pdev;
3791 if (bp->hwrm_cmd_resp_addr) {
3792 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3793 bp->hwrm_cmd_resp_dma_addr);
3794 bp->hwrm_cmd_resp_addr = NULL;
3797 if (bp->hwrm_cmd_kong_resp_addr) {
3799 bp->hwrm_cmd_kong_resp_addr,
3800 bp->hwrm_cmd_kong_resp_dma_addr);
3801 bp->hwrm_cmd_kong_resp_addr = NULL;
3805 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3807 struct pci_dev *pdev = bp->pdev;
3809 if (bp->hwrm_cmd_kong_resp_addr)
3812 bp->hwrm_cmd_kong_resp_addr =
3814 &bp->hwrm_cmd_kong_resp_dma_addr,
3816 if (!bp->hwrm_cmd_kong_resp_addr)
3822 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3824 struct pci_dev *pdev = bp->pdev;
3826 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3827 &bp->hwrm_cmd_resp_dma_addr,
3829 if (!bp->hwrm_cmd_resp_addr)
3835 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3837 if (bp->hwrm_short_cmd_req_addr) {
3838 struct pci_dev *pdev = bp->pdev;
3840 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3841 bp->hwrm_short_cmd_req_addr,
3842 bp->hwrm_short_cmd_req_dma_addr);
3843 bp->hwrm_short_cmd_req_addr = NULL;
3847 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3849 struct pci_dev *pdev = bp->pdev;
3851 if (bp->hwrm_short_cmd_req_addr)
3854 bp->hwrm_short_cmd_req_addr =
3855 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3856 &bp->hwrm_short_cmd_req_dma_addr,
3858 if (!bp->hwrm_short_cmd_req_addr)
3864 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3871 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3877 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3880 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3897 bnxt_free_stats_mem(bp, stats);
3917 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3920 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3925 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3926 !(bp->flags & BNXT_FLAG_CHIP_P5))
3929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3932 mutex_lock(&bp->hwrm_cmd_lock);
3933 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3941 mutex_unlock(&bp->hwrm_cmd_lock);
3945 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3946 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3948 static void bnxt_init_stats(struct bnxt *bp)
3950 struct bnxt_napi *bnapi = bp->bnapi[0];
3961 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3963 if (bp->flags & BNXT_FLAG_CHIP_P5)
3969 if (bp->flags & BNXT_FLAG_PORT_STATS) {
3970 stats = &bp->port_stats;
3979 rc = bnxt_hwrm_port_qstats(bp, flags);
3988 bnxt_hwrm_port_qstats(bp, 0);
3991 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3992 stats = &bp->rx_port_stats_ext;
3996 stats = &bp->tx_port_stats_ext;
4002 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4014 bnxt_hwrm_port_qstats_ext(bp, 0);
4019 static void bnxt_free_port_stats(struct bnxt *bp)
4021 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4022 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4024 bnxt_free_stats_mem(bp, &bp->port_stats);
4025 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4026 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4029 static void bnxt_free_ring_stats(struct bnxt *bp)
4033 if (!bp->bnapi)
4036 for (i = 0; i < bp->cp_nr_rings; i++) {
4037 struct bnxt_napi *bnapi = bp->bnapi[i];
4040 bnxt_free_stats_mem(bp, &cpr->stats);
4044 static int bnxt_alloc_stats(struct bnxt *bp)
4049 size = bp->hw_ring_stats_size;
4051 for (i = 0; i < bp->cp_nr_rings; i++) {
4052 struct bnxt_napi *bnapi = bp->bnapi[i];
4056 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4063 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4066 if (bp->port_stats.hw_stats)
4069 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4070 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4074 bp->flags |= BNXT_FLAG_PORT_STATS;
4078 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4079 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4082 if (bp->rx_port_stats_ext.hw_stats)
4085 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4086 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4092 if (bp->tx_port_stats_ext.hw_stats)
4095 if (bp->hwrm_spec_code >= 0x10902 ||
4096 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4097 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4098 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4103 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4107 static void bnxt_clear_ring_indices(struct bnxt *bp)
4111 if (!bp->bnapi)
4114 for (i = 0; i < bp->cp_nr_rings; i++) {
4115 struct bnxt_napi *bnapi = bp->bnapi[i];
4142 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4155 head = &bp->ntp_fltr_hash_tbl[i];
4162 kfree(bp->ntp_fltr_bmap);
4163 bp->ntp_fltr_bmap = NULL;
4165 bp->ntp_fltr_count = 0;
4169 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4174 if (!(bp->flags & BNXT_FLAG_RFS))
4178 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4180 bp->ntp_fltr_count = 0;
4181 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4185 if (!bp->ntp_fltr_bmap)
4194 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4196 bnxt_free_vnic_attributes(bp);
4197 bnxt_free_tx_rings(bp);
4198 bnxt_free_rx_rings(bp);
4199 bnxt_free_cp_rings(bp);
4200 bnxt_free_ntp_fltrs(bp, irq_re_init);
4202 bnxt_free_ring_stats(bp);
4203 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
4204 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4205 bnxt_free_port_stats(bp);
4206 bnxt_free_ring_grps(bp);
4207 bnxt_free_vnics(bp);
4208 kfree(bp->tx_ring_map);
4209 bp->tx_ring_map = NULL;
4210 kfree(bp->tx_ring);
4211 bp->tx_ring = NULL;
4212 kfree(bp->rx_ring);
4213 bp->rx_ring = NULL;
4214 kfree(bp->bnapi);
4215 bp->bnapi = NULL;
4217 bnxt_clear_ring_indices(bp);
4221 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4231 bp->cp_nr_rings);
4233 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4237 bp->bnapi = bnapi;
4239 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4240 bp->bnapi[i] = bnapi;
4241 bp->bnapi[i]->index = i;
4242 bp->bnapi[i]->bp = bp;
4243 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4245 &bp->bnapi[i]->cp_ring;
4252 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4255 if (!bp->rx_ring)
4258 for (i = 0; i < bp->rx_nr_rings; i++) {
4259 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4261 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4267 rxr->bnapi = bp->bnapi[i];
4268 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4271 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4274 if (!bp->tx_ring)
4277 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4280 if (!bp->tx_ring_map)
4283 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4286 j = bp->rx_nr_rings;
4288 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4289 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4291 if (bp->flags & BNXT_FLAG_CHIP_P5)
4294 txr->bnapi = bp->bnapi[j];
4295 bp->bnapi[j]->tx_ring = txr;
4296 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4297 if (i >= bp->tx_nr_rings_xdp) {
4298 txr->txq_index = i - bp->tx_nr_rings_xdp;
4299 bp->bnapi[j]->tx_int = bnxt_tx_int;
4301 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4302 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4306 rc = bnxt_alloc_stats(bp);
4309 bnxt_init_stats(bp);
4311 rc = bnxt_alloc_ntp_fltrs(bp);
4315 rc = bnxt_alloc_vnics(bp);
4320 bnxt_init_ring_struct(bp);
4322 rc = bnxt_alloc_rx_rings(bp);
4326 rc = bnxt_alloc_tx_rings(bp);
4330 rc = bnxt_alloc_cp_rings(bp);
4334 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4336 rc = bnxt_alloc_vnic_attributes(bp);
4342 bnxt_free_mem(bp, true);
4346 static void bnxt_disable_int(struct bnxt *bp)
4350 if (!bp->bnapi)
4353 for (i = 0; i < bp->cp_nr_rings; i++) {
4354 struct bnxt_napi *bnapi = bp->bnapi[i];
4359 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4363 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4365 struct bnxt_napi *bnapi = bp->bnapi[n];
4372 static void bnxt_disable_int_sync(struct bnxt *bp)
4376 atomic_inc(&bp->intr_sem);
4378 bnxt_disable_int(bp);
4379 for (i = 0; i < bp->cp_nr_rings; i++) {
4380 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4382 synchronize_irq(bp->irq_tbl[map_idx].vector);
4386 static void bnxt_enable_int(struct bnxt *bp)
4390 atomic_set(&bp->intr_sem, 0);
4391 for (i = 0; i < bp->cp_nr_rings; i++) {
4392 struct bnxt_napi *bnapi = bp->bnapi[i];
4395 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4399 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4407 if (bnxt_kong_hwrm_message(bp, req))
4408 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4410 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4442 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4450 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4457 if (BNXT_NO_FW_ACCESS(bp) &&
4462 if (msg_len > bp->hwrm_max_ext_req_len ||
4463 !bp->hwrm_short_cmd_req_addr)
4467 if (bnxt_hwrm_kong_chnl(bp, req)) {
4471 resp = bp->hwrm_cmd_kong_resp_addr;
4478 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4481 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4483 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4485 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4492 max_msg_len = bp->hwrm_max_ext_req_len;
4504 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4516 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4519 writel(0, bp->bar0 + bar_offset + i);
4522 writel(1, bp->bar0 + doorbell_offset);
4524 if (!pci_is_enabled(bp->pdev))
4542 u16 seq_id = bp->hwrm_intr_seq_id;
4545 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4550 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4561 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4563 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4577 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4593 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4612 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4628 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4634 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4636 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4639 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4642 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4645 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4649 mutex_lock(&bp->hwrm_cmd_lock);
4650 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4651 mutex_unlock(&bp->hwrm_cmd_lock);
4655 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4660 mutex_lock(&bp->hwrm_cmd_lock);
4661 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4662 mutex_unlock(&bp->hwrm_cmd_lock);
4666 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4669 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4676 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4685 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4687 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4698 if (BNXT_PF(bp)) {
4719 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4728 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4745 mutex_lock(&bp->hwrm_cmd_lock);
4746 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4748 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4751 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4753 mutex_unlock(&bp->hwrm_cmd_lock);
4757 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4761 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4764 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4765 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4768 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4773 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4778 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4779 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4782 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4783 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4789 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4791 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4796 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4801 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4808 mutex_lock(&bp->hwrm_cmd_lock);
4809 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4811 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4818 bp->vxlan_fw_dst_port_id =
4822 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4829 mutex_unlock(&bp->hwrm_cmd_lock);
4833 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4836 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4838 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4844 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4848 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4855 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4877 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4887 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4888 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4890 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4894 vnic = &bp->vnic_info[fltr->rxq + 1];
4936 mutex_lock(&bp->hwrm_cmd_lock);
4937 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4939 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4942 mutex_unlock(&bp->hwrm_cmd_lock);
4947 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4952 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4954 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4956 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4959 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4972 mutex_lock(&bp->hwrm_cmd_lock);
4973 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4975 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4977 mutex_unlock(&bp->hwrm_cmd_lock);
4981 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4987 mutex_lock(&bp->hwrm_cmd_lock);
4989 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4994 bnxt_hwrm_cmd_hdr_init(bp, &req,
4999 rc = _hwrm_send_message(bp, &req, sizeof(req),
5004 mutex_unlock(&bp->hwrm_cmd_lock);
5009 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5011 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5018 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5021 u16 mss = bp->dev->mtu - 40;
5052 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5054 max_aggs = bp->max_tpa;
5065 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5068 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5072 grp_info = &bp->grp_info[ring->grp_idx];
5076 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5078 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5085 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5089 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5098 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5102 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5106 if (bp->flags & BNXT_FLAG_CHIP_P5)
5111 bp->rss_indir_tbl_entries = entries;
5112 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5114 if (!bp->rss_indir_tbl)
5119 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5123 if (!bp->rx_nr_rings)
5126 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5127 max_rings = bp->rx_nr_rings - 1;
5129 max_rings = bp->rx_nr_rings;
5131 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5134 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5136 pad = bp->rss_indir_tbl_entries - max_entries;
5138 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5141 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5145 if (!bp->rss_indir_tbl)
5148 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5150 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5154 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5156 if (bp->flags & BNXT_FLAG_CHIP_P5)
5158 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5163 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5171 j = bp->rss_indir_tbl[i];
5176 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5183 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5188 j = bp->rss_indir_tbl[i];
5189 rxr = &bp->rx_ring[j];
5193 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5198 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5200 if (bp->flags & BNXT_FLAG_CHIP_P5)
5201 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5203 __bnxt_fill_hw_rss_tbl(bp, vnic);
5206 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5208 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5211 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5215 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5217 bnxt_fill_hw_rss_tbl(bp, vnic);
5218 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5225 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5228 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5230 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5238 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5241 bnxt_fill_hw_rss_tbl(bp, vnic);
5242 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5246 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5253 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5260 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5262 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5265 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5273 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5274 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5276 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5279 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5286 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5288 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5289 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5292 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5296 for (i = 0; i < bp->nr_vnics; i++) {
5297 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5301 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5304 bp->rsscos_nr_ctxs = 0;
5307 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5312 bp->hwrm_cmd_resp_addr;
5314 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5317 mutex_lock(&bp->hwrm_cmd_lock);
5318 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5320 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5322 mutex_unlock(&bp->hwrm_cmd_lock);
5327 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5329 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5334 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5337 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5341 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5343 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5344 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5349 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5363 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5371 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5383 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5384 ring = bp->rx_nr_rings - 1;
5386 grp_idx = bp->rx_ring[ring].bnapi->index;
5387 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5390 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5394 if (BNXT_VF(bp))
5395 def_vlan = bp->vf.vlan;
5397 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5399 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5400 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5402 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5405 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5407 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5410 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5412 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5414 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5415 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5419 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5423 for (i = 0; i < bp->nr_vnics; i++)
5424 bnxt_hwrm_vnic_free_one(bp, i);
5427 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5434 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5435 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5437 if (bp->flags & BNXT_FLAG_CHIP_P5)
5442 grp_idx = bp->rx_ring[i].bnapi->index;
5443 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5444 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5448 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5457 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5459 mutex_lock(&bp->hwrm_cmd_lock);
5460 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5463 mutex_unlock(&bp->hwrm_cmd_lock);
5467 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5469 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5473 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5474 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5475 if (bp->hwrm_spec_code < 0x10600)
5478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5479 mutex_lock(&bp->hwrm_cmd_lock);
5480 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5484 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5486 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5489 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5495 (BNXT_CHIP_P5_THOR(bp) &&
5496 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5497 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5498 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5499 if (bp->max_tpa_v2) {
5500 if (BNXT_CHIP_P5_THOR(bp))
5501 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5503 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5506 mutex_unlock(&bp->hwrm_cmd_lock);
5510 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5515 if (bp->flags & BNXT_FLAG_CHIP_P5)
5518 mutex_lock(&bp->hwrm_cmd_lock);
5519 for (i = 0; i < bp->rx_nr_rings; i++) {
5522 bp->hwrm_cmd_resp_addr;
5523 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5525 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5527 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5528 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5529 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5530 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5532 rc = _hwrm_send_message(bp, &req, sizeof(req),
5537 bp->grp_info[grp_idx].fw_grp_id =
5540 mutex_unlock(&bp->hwrm_cmd_lock);
5544 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5549 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5552 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5554 mutex_lock(&bp->hwrm_cmd_lock);
5555 for (i = 0; i < bp->cp_nr_rings; i++) {
5556 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5559 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5561 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5562 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5564 mutex_unlock(&bp->hwrm_cmd_lock);
5567 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5573 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5601 grp_info = &bp->grp_info[ring->grp_idx];
5602 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5603 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5610 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5611 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5615 grp_info = &bp->grp_info[ring->grp_idx];
5616 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5626 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5629 grp_info = &bp->grp_info[ring->grp_idx];
5639 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5643 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5644 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5646 grp_info = &bp->grp_info[map_index];
5651 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5657 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5658 if (bp->flags & BNXT_FLAG_USING_MSIX)
5662 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5667 mutex_lock(&bp->hwrm_cmd_lock);
5668 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5671 mutex_unlock(&bp->hwrm_cmd_lock);
5674 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5682 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5686 if (BNXT_PF(bp)) {
5689 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5693 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5697 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5701 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5706 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5709 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5710 if (BNXT_PF(bp))
5711 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5713 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5731 db->doorbell = bp->bar1 + map_idx * 0x80;
5747 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5749 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5753 if (bp->flags & BNXT_FLAG_CHIP_P5)
5757 for (i = 0; i < bp->cp_nr_rings; i++) {
5758 struct bnxt_napi *bnapi = bp->bnapi[i];
5764 vector = bp->irq_tbl[map_idx].vector;
5766 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5771 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5772 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5774 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5777 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5779 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5784 for (i = 0; i < bp->tx_nr_rings; i++) {
5785 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5789 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5799 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5802 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5804 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5808 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5811 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5815 for (i = 0; i < bp->rx_nr_rings; i++) {
5816 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5821 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5824 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5827 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5828 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5829 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5837 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5840 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5842 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5848 for (i = 0; i < bp->rx_nr_rings; i++) {
5849 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5853 u32 map_idx = grp_idx + bp->rx_nr_rings;
5855 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5859 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5861 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5862 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5863 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5870 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5876 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5879 if (BNXT_NO_FW_ACCESS(bp))
5882 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5886 mutex_lock(&bp->hwrm_cmd_lock);
5887 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5889 mutex_unlock(&bp->hwrm_cmd_lock);
5892 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5899 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5904 if (!bp->bnapi)
5907 for (i = 0; i < bp->tx_nr_rings; i++) {
5908 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5912 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5914 hwrm_ring_free_send_msg(bp, ring,
5922 for (i = 0; i < bp->rx_nr_rings; i++) {
5923 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5928 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5930 hwrm_ring_free_send_msg(bp, ring,
5935 bp->grp_info[grp_idx].rx_fw_ring_id =
5940 if (bp->flags & BNXT_FLAG_CHIP_P5)
5944 for (i = 0; i < bp->rx_nr_rings; i++) {
5945 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5950 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5952 hwrm_ring_free_send_msg(bp, ring, type,
5956 bp->grp_info[grp_idx].agg_fw_ring_id =
5965 bnxt_disable_int_sync(bp);
5967 if (bp->flags & BNXT_FLAG_CHIP_P5)
5971 for (i = 0; i < bp->cp_nr_rings; i++) {
5972 struct bnxt_napi *bnapi = bp->bnapi[i];
5984 hwrm_ring_free_send_msg(bp, ring,
5992 hwrm_ring_free_send_msg(bp, ring, type,
5995 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6000 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6003 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6005 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6006 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6010 if (bp->hwrm_spec_code < 0x10601)
6013 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6015 mutex_lock(&bp->hwrm_cmd_lock);
6016 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6018 mutex_unlock(&bp->hwrm_cmd_lock);
6023 if (BNXT_NEW_RM(bp)) {
6033 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6037 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6040 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6041 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6052 mutex_unlock(&bp->hwrm_cmd_lock);
6056 /* Caller must hold bp->hwrm_cmd_lock */
6057 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6059 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6063 if (bp->hwrm_spec_code < 0x10601)
6066 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6068 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6075 static bool bnxt_rfs_supported(struct bnxt *bp);
6078 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6084 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6088 if (BNXT_NEW_RM(bp)) {
6091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6107 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6116 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6117 bnxt_rfs_supported(bp))
6128 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6135 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6140 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6155 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6170 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6176 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6181 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6185 if (bp->hwrm_spec_code < 0x10601)
6186 bp->hw_resc.resv_tx_rings = tx_rings;
6188 return bnxt_hwrm_get_rings(bp);
6192 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6198 if (!BNXT_NEW_RM(bp)) {
6199 bp->hw_resc.resv_tx_rings = tx_rings;
6203 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6205 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6209 return bnxt_hwrm_get_rings(bp);
6212 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6215 if (BNXT_PF(bp))
6216 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6219 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6223 int bnxt_nq_rings_in_use(struct bnxt *bp)
6225 int cp = bp->cp_nr_rings;
6228 ulp_msix = bnxt_get_ulp_msix_num(bp);
6230 ulp_base = bnxt_get_ulp_msix_base(bp);
6238 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6242 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6243 return bnxt_nq_rings_in_use(bp);
6245 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6249 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6251 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6252 int cp = bp->cp_nr_rings;
6257 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6258 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6266 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6268 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6271 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6272 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6273 if (!netif_is_rxfh_configured(bp->dev))
6274 bnxt_set_dflt_rss_indir_tbl(bp);
6278 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6280 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6281 int cp = bnxt_cp_rings_in_use(bp);
6282 int nq = bnxt_nq_rings_in_use(bp);
6283 int rx = bp->rx_nr_rings, stat;
6286 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6287 bp->hwrm_spec_code >= 0x10601)
6295 if (!BNXT_NEW_RM(bp)) {
6296 bnxt_check_rss_tbl_no_rmgr(bp);
6299 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6301 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6303 stat = bnxt_get_func_stat_ctxs(bp);
6307 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6309 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6315 static int __bnxt_reserve_rings(struct bnxt *bp)
6317 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6318 int cp = bnxt_nq_rings_in_use(bp);
6319 int tx = bp->tx_nr_rings;
6320 int rx = bp->rx_nr_rings;
6325 if (!bnxt_need_reserve_rings(bp))
6328 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6330 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6332 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6334 grp = bp->rx_nr_rings;
6335 stat = bnxt_get_func_stat_ctxs(bp);
6337 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6342 if (BNXT_NEW_RM(bp)) {
6351 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6355 if (netif_running(bp->dev))
6358 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6359 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6360 bp->dev->hw_features &= ~NETIF_F_LRO;
6361 bp->dev->features &= ~NETIF_F_LRO;
6362 bnxt_set_ring_params(bp);
6366 cp = min_t(int, cp, bp->cp_nr_rings);
6367 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6368 stat -= bnxt_get_ulp_stat_ctxs(bp);
6370 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6371 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6374 bp->tx_nr_rings = tx;
6379 if (rx_rings != bp->rx_nr_rings) {
6380 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6381 rx_rings, bp->rx_nr_rings);
6382 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6383 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6384 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6385 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6386 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6387 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6390 bp->rx_nr_rings = rx_rings;
6391 bp->cp_nr_rings = cp;
6396 if (!netif_is_rxfh_configured(bp->dev))
6397 bnxt_set_dflt_rss_indir_tbl(bp);
6402 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6409 if (!BNXT_NEW_RM(bp))
6412 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6420 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6424 return hwrm_send_message_silent(bp, &req, sizeof(req),
6428 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6435 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6438 if (BNXT_NEW_RM(bp)) {
6443 if (bp->flags & BNXT_FLAG_CHIP_P5)
6451 return hwrm_send_message_silent(bp, &req, sizeof(req),
6455 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6459 if (bp->hwrm_spec_code < 0x10801)
6462 if (BNXT_PF(bp))
6463 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6467 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6471 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6473 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6474 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6488 if (bp->hwrm_spec_code < 0x10902)
6491 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6492 mutex_lock(&bp->hwrm_cmd_lock);
6493 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6513 mutex_unlock(&bp->hwrm_cmd_lock);
6516 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6518 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6523 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6527 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6546 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6564 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6581 /* Caller holds bp->hwrm_cmd_lock */
6582 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6587 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6600 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6604 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6607 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6616 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6624 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6627 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6629 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6631 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6635 int bnxt_hwrm_set_coal(struct bnxt *bp)
6641 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6643 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6646 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6647 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6649 mutex_lock(&bp->hwrm_cmd_lock);
6650 for (i = 0; i < bp->cp_nr_rings; i++) {
6651 struct bnxt_napi *bnapi = bp->bnapi[i];
6657 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6660 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6664 rc = _hwrm_send_message(bp, req, sizeof(*req),
6669 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6674 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6676 rc = _hwrm_send_message(bp, req, sizeof(*req),
6682 hw_coal = &bp->rx_coal;
6684 hw_coal = &bp->tx_coal;
6685 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6687 mutex_unlock(&bp->hwrm_cmd_lock);
6691 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6697 if (!bp->bnapi)
6700 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6703 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6704 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6706 mutex_lock(&bp->hwrm_cmd_lock);
6707 for (i = 0; i < bp->cp_nr_rings; i++) {
6708 struct bnxt_napi *bnapi = bp->bnapi[i];
6713 if (BNXT_FW_MAJ(bp) <= 20) {
6715 _hwrm_send_message(bp, &req0, sizeof(req0),
6718 _hwrm_send_message(bp, &req, sizeof(req),
6724 mutex_unlock(&bp->hwrm_cmd_lock);
6727 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6731 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6733 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6736 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6738 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6739 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6741 mutex_lock(&bp->hwrm_cmd_lock);
6742 for (i = 0; i < bp->cp_nr_rings; i++) {
6743 struct bnxt_napi *bnapi = bp->bnapi[i];
6748 rc = _hwrm_send_message(bp, &req, sizeof(req),
6755 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6757 mutex_unlock(&bp->hwrm_cmd_lock);
6761 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6764 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6769 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6771 mutex_lock(&bp->hwrm_cmd_lock);
6772 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6777 if (BNXT_VF(bp)) {
6778 struct bnxt_vf_info *vf = &bp->vf;
6782 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6788 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6790 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6792 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6793 bp->flags |= BNXT_FLAG_MULTI_HOST;
6795 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6801 bp->port_partition_type = resp->port_partition_type;
6804 if (bp->hwrm_spec_code < 0x10707 ||
6806 bp->br_mode = BRIDGE_MODE_VEB;
6808 bp->br_mode = BRIDGE_MODE_VEPA;
6810 bp->br_mode = BRIDGE_MODE_UNDEF;
6812 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6813 if (!bp->max_mtu)
6814 bp->max_mtu = BNXT_MAX_MTU;
6816 if (bp->db_size)
6819 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6820 if (BNXT_PF(bp))
6825 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6827 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6828 bp->db_size <= min_db_offset)
6829 bp->db_size = pci_resource_len(bp->pdev, 2);
6832 mutex_unlock(&bp->hwrm_cmd_lock);
6836 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6840 bp->hwrm_cmd_resp_addr;
6843 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6846 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6847 mutex_lock(&bp->hwrm_cmd_lock);
6848 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6893 ctx->tqm_fp_rings_count = bp->max_q;
6906 bp->ctx = ctx;
6911 mutex_unlock(&bp->hwrm_cmd_lock);
6937 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6940 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7035 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7038 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7049 return bnxt_alloc_ring(bp, rmem);
7052 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7077 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7093 rmem->init_val = bp->ctx->ctx_kind_initializer;
7100 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7109 rmem->init_val = bp->ctx->ctx_kind_initializer;
7110 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7115 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7132 bnxt_free_ring(bp, rmem2);
7140 bnxt_free_ring(bp, rmem);
7144 static void bnxt_free_ctx_mem(struct bnxt *bp)
7146 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7154 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7159 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7160 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7161 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7162 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7163 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7164 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7165 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7169 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7181 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7183 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7187 ctx = bp->ctx;
7191 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7201 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7208 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7215 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7223 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7230 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7235 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7246 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7258 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7275 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7281 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7283 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7291 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7293 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7295 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7298 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7301 mutex_lock(&bp->hwrm_cmd_lock);
7302 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7328 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7335 if (BNXT_PF(bp)) {
7336 struct bnxt_pf_info *pf = &bp->pf;
7344 mutex_unlock(&bp->hwrm_cmd_lock);
7348 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7352 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7353 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7356 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7359 mutex_lock(&bp->hwrm_cmd_lock);
7360 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7366 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7368 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7370 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7372 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7374 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7376 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7378 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7380 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7384 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7386 bp->tx_push_thresh = 0;
7388 BNXT_FW_MAJ(bp) > 217)
7389 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7402 if (BNXT_PF(bp)) {
7403 struct bnxt_pf_info *pf = &bp->pf;
7416 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7418 bp->flags |= BNXT_FLAG_WOL_CAP;
7421 struct bnxt_vf_info *vf = &bp->vf;
7429 mutex_unlock(&bp->hwrm_cmd_lock);
7433 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7435 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7439 rc = __bnxt_hwrm_func_qcaps(bp);
7442 rc = bnxt_hwrm_queue_qportcfg(bp);
7444 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7447 if (bp->hwrm_spec_code >= 0x10803) {
7448 rc = bnxt_alloc_ctx_mem(bp);
7451 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7453 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7458 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7465 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7468 resp = bp->hwrm_cmd_resp_addr;
7469 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7471 mutex_lock(&bp->hwrm_cmd_lock);
7472 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7479 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7482 mutex_unlock(&bp->hwrm_cmd_lock);
7486 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7488 if (bp->fw_health)
7491 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7492 if (!bp->fw_health)
7498 static int bnxt_alloc_fw_health(struct bnxt *bp)
7502 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7503 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7506 rc = __bnxt_alloc_fw_health(bp);
7508 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7509 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7516 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7518 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7523 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7530 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7531 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7535 if (bp->fw_health)
7536 bp->fw_health->status_reliable = false;
7540 if (__bnxt_alloc_fw_health(bp)) {
7541 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7546 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7549 __bnxt_map_fw_health_reg(bp, status_loc);
7550 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7554 bp->fw_health->status_reliable = true;
7557 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7559 struct bnxt_fw_health *fw_health = bp->fw_health;
7578 __bnxt_map_fw_health_reg(bp, reg_base);
7582 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7584 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7585 struct bnxt_fw_health *fw_health = bp->fw_health;
7589 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7592 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7593 mutex_lock(&bp->hwrm_cmd_lock);
7594 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7599 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7636 mutex_unlock(&bp->hwrm_cmd_lock);
7638 rc = bnxt_map_fw_health_regs(bp);
7640 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7644 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7648 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7651 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7654 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7658 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7659 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7664 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7668 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7674 mutex_lock(&bp->hwrm_cmd_lock);
7675 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7683 bp->max_tc = resp->max_configurable_queues;
7684 bp->max_lltc = resp->max_configurable_lossless_queues;
7685 if (bp->max_tc > BNXT_MAX_QUEUE)
7686 bp->max_tc = BNXT_MAX_QUEUE;
7688 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7690 for (i = 0, j = 0; i < bp->max_tc; i++) {
7691 bp->q_info[j].queue_id = *qptr;
7692 bp->q_ids[i] = *qptr++;
7693 bp->q_info[j].queue_profile = *qptr++;
7694 bp->tc_to_qidx[j] = j;
7695 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7696 (no_rdma && BNXT_PF(bp)))
7699 bp->max_q = bp->max_tc;
7700 bp->max_tc = max_t(u8, j, 1);
7703 bp->max_tc = 1;
7705 if (bp->max_lltc > bp->max_tc)
7706 bp->max_lltc = bp->max_tc;
7709 mutex_unlock(&bp->hwrm_cmd_lock);
7713 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7723 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7728 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7730 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7735 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7736 mutex_lock(&bp->hwrm_cmd_lock);
7737 rc = __bnxt_hwrm_ver_get(bp, false);
7741 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7743 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7747 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7750 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7756 if (bp->hwrm_spec_code > hwrm_ver)
7757 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7761 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7766 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7778 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7779 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7783 int fw_ver_len = strlen(bp->fw_ver_str);
7785 snprintf(bp->fw_ver_str + fw_ver_len,
7788 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7791 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7792 if (!bp->hwrm_cmd_timeout)
7793 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7796 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7797 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7799 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7800 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7802 bp->chip_num = le16_to_cpu(resp->chip_num);
7803 bp->chip_rev = resp->chip_rev;
7804 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7806 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7811 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7814 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7818 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7822 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7826 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7829 mutex_unlock(&bp->hwrm_cmd_lock);
7833 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7839 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7840 bp->hwrm_spec_code < 0x10400)
7844 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7851 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7892 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7899 if (bp->flags & BNXT_FLAG_CHIP_P5)
7902 for (i = 0; i < bp->cp_nr_rings; i++) {
7903 struct bnxt_napi *bnapi = bp->bnapi[i];
7915 if (bp->flags & BNXT_FLAG_PORT_STATS) {
7916 struct bnxt_stats_mem *stats = &bp->port_stats;
7931 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7932 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7933 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7937 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7939 struct bnxt_pf_info *pf = &bp->pf;
7942 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7945 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7949 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7951 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7953 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7954 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7957 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7959 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7962 struct bnxt_pf_info *pf = &bp->pf;
7966 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7969 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7972 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7976 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7977 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7980 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7981 mutex_lock(&bp->hwrm_cmd_lock);
7982 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7984 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7985 bp->fw_tx_stats_ext_size = tx_stat_size ?
7988 bp->fw_rx_stats_ext_size = 0;
7989 bp->fw_tx_stats_ext_size = 0;
7994 if (bp->fw_tx_stats_ext_size <=
7996 mutex_unlock(&bp->hwrm_cmd_lock);
7997 bp->pri2cos_valid = 0;
8001 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8004 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8010 resp2 = bp->hwrm_cmd_resp_addr;
8019 bp->pri2cos_valid = false;
8022 for (j = 0; j < bp->max_q; j++) {
8023 if (bp->q_ids[j] == queue_id)
8024 bp->pri2cos_idx[i] = queue_idx;
8027 bp->pri2cos_valid = 1;
8030 mutex_unlock(&bp->hwrm_cmd_lock);
8034 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8036 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8038 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8039 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8041 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8044 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8050 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8051 else if (BNXT_NO_FW_ACCESS(bp))
8053 for (i = 0; i < bp->nr_vnics; i++) {
8054 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8056 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8064 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8068 for (i = 0; i < bp->nr_vnics; i++)
8069 bnxt_hwrm_vnic_set_rss(bp, i, false);
8072 static void bnxt_clear_vnic(struct bnxt *bp)
8074 if (!bp->vnic_info)
8077 bnxt_hwrm_clear_vnic_filter(bp);
8078 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8080 bnxt_hwrm_clear_vnic_rss(bp);
8081 bnxt_hwrm_vnic_ctx_free(bp);
8084 if (bp->flags & BNXT_FLAG_TPA)
8085 bnxt_set_tpa(bp, false);
8086 bnxt_hwrm_vnic_free(bp);
8087 if (bp->flags & BNXT_FLAG_CHIP_P5)
8088 bnxt_hwrm_vnic_ctx_free(bp);
8091 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8094 bnxt_clear_vnic(bp);
8095 bnxt_hwrm_ring_free(bp, close_path);
8096 bnxt_hwrm_ring_grp_free(bp);
8098 bnxt_hwrm_stat_ctx_free(bp);
8099 bnxt_hwrm_free_tunnel_ports(bp);
8103 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8107 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8116 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8119 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8123 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8126 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8133 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8136 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8138 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8145 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8147 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8151 bp->rsscos_nr_ctxs++;
8153 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8154 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8156 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8160 bp->rsscos_nr_ctxs++;
8165 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8167 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8173 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8175 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8180 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8181 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8183 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8192 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8196 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8198 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8200 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8204 bp->rsscos_nr_ctxs++;
8209 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8211 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8215 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8217 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8221 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8222 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8224 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8231 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8233 if (bp->flags & BNXT_FLAG_CHIP_P5)
8234 return __bnxt_setup_vnic_p5(bp, vnic_id);
8236 return __bnxt_setup_vnic(bp, vnic_id);
8239 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8244 if (bp->flags & BNXT_FLAG_CHIP_P5)
8247 for (i = 0; i < bp->rx_nr_rings; i++) {
8252 if (vnic_id >= bp->nr_vnics)
8255 vnic = &bp->vnic_info[vnic_id];
8257 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8259 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8261 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8265 rc = bnxt_setup_vnic(bp, vnic_id);
8276 static bool bnxt_promisc_ok(struct bnxt *bp)
8279 if (BNXT_VF(bp) && !bp->vf.vlan)
8285 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8289 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8291 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8296 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8298 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8308 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8310 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8312 unsigned int rx_nr_rings = bp->rx_nr_rings;
8315 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8317 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8323 rc = bnxt_hwrm_ring_alloc(bp);
8325 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8329 rc = bnxt_hwrm_ring_grp_alloc(bp);
8331 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8335 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8339 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8341 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8345 if (BNXT_VF(bp))
8346 bnxt_hwrm_func_qcfg(bp);
8348 rc = bnxt_setup_vnic(bp, 0);
8352 if (bp->flags & BNXT_FLAG_RFS) {
8353 rc = bnxt_alloc_rfs_vnics(bp);
8358 if (bp->flags & BNXT_FLAG_TPA) {
8359 rc = bnxt_set_tpa(bp, true);
8364 if (BNXT_VF(bp))
8365 bnxt_update_vf_mac(bp);
8368 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8370 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8376 if (bp->dev->flags & IFF_BROADCAST)
8379 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8382 if (bp->dev->flags & IFF_ALLMULTI) {
8388 bnxt_mc_list_updated(bp, &mask);
8392 rc = bnxt_cfg_rx_mode(bp);
8396 rc = bnxt_hwrm_set_coal(bp);
8398 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8401 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8402 rc = bnxt_setup_nitroa0_vnic(bp);
8404 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8408 if (BNXT_VF(bp)) {
8409 bnxt_hwrm_func_qcfg(bp);
8410 netdev_update_features(bp->dev);
8416 bnxt_hwrm_resource_free(bp, 0, true);
8421 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8423 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8427 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8429 bnxt_init_cp_rings(bp);
8430 bnxt_init_rx_rings(bp);
8431 bnxt_init_tx_rings(bp);
8432 bnxt_init_ring_grps(bp, irq_re_init);
8433 bnxt_init_vnics(bp);
8435 return bnxt_init_chip(bp, irq_re_init);
8438 static int bnxt_set_real_num_queues(struct bnxt *bp)
8441 struct net_device *dev = bp->dev;
8443 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8444 bp->tx_nr_rings_xdp);
8448 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8453 if (bp->flags & BNXT_FLAG_RFS)
8454 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8460 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8484 static void bnxt_setup_msix(struct bnxt *bp)
8486 const int len = sizeof(bp->irq_tbl[0].name);
8487 struct net_device *dev = bp->dev;
8495 count = bp->tx_nr_rings_per_tc;
8501 for (i = 0; i < bp->cp_nr_rings; i++) {
8502 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8505 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8507 else if (i < bp->rx_nr_rings)
8512 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8514 bp->irq_tbl[map_idx].handler = bnxt_msix;
8518 static void bnxt_setup_inta(struct bnxt *bp)
8520 const int len = sizeof(bp->irq_tbl[0].name);
8522 if (netdev_get_num_tc(bp->dev))
8523 netdev_reset_tc(bp->dev);
8525 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8527 bp->irq_tbl[0].handler = bnxt_inta;
8530 static int bnxt_init_int_mode(struct bnxt *bp);
8532 static int bnxt_setup_int_mode(struct bnxt *bp)
8536 if (!bp->irq_tbl) {
8537 rc = bnxt_init_int_mode(bp);
8538 if (rc || !bp->irq_tbl)
8542 if (bp->flags & BNXT_FLAG_USING_MSIX)
8543 bnxt_setup_msix(bp);
8545 bnxt_setup_inta(bp);
8547 rc = bnxt_set_real_num_queues(bp);
8552 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8554 return bp->hw_resc.max_rsscos_ctxs;
8557 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8559 return bp->hw_resc.max_vnics;
8563 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8565 return bp->hw_resc.max_stat_ctxs;
8568 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8570 return bp->hw_resc.max_cp_rings;
8573 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8575 unsigned int cp = bp->hw_resc.max_cp_rings;
8577 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8578 cp -= bnxt_get_ulp_msix_num(bp);
8583 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8585 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8587 if (bp->flags & BNXT_FLAG_CHIP_P5)
8593 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8595 bp->hw_resc.max_irqs = max_irqs;
8598 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8602 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8603 if (bp->flags & BNXT_FLAG_CHIP_P5)
8604 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8606 return cp - bp->cp_nr_rings;
8609 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8611 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8614 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8616 int max_cp = bnxt_get_max_func_cp_rings(bp);
8617 int max_irq = bnxt_get_max_func_irqs(bp);
8618 int total_req = bp->cp_nr_rings + num;
8621 max_idx = bp->total_irqs;
8622 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8623 max_idx = min_t(int, bp->total_irqs, max_cp);
8624 avail_msix = max_idx - bp->cp_nr_rings;
8625 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8629 num = max_irq - bp->cp_nr_rings;
8636 static int bnxt_get_num_msix(struct bnxt *bp)
8638 if (!BNXT_NEW_RM(bp))
8639 return bnxt_get_max_func_irqs(bp);
8641 return bnxt_nq_rings_in_use(bp);
8644 static int bnxt_init_msix(struct bnxt *bp)
8649 total_vecs = bnxt_get_num_msix(bp);
8650 max = bnxt_get_max_func_irqs(bp);
8666 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8669 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8670 ulp_msix = bnxt_get_ulp_msix_num(bp);
8676 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8677 if (bp->irq_tbl) {
8679 bp->irq_tbl[i].vector = msix_ent[i].vector;
8681 bp->total_irqs = total_vecs;
8683 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8688 bp->cp_nr_rings = (min == 1) ?
8689 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8690 bp->tx_nr_rings + bp->rx_nr_rings;
8696 bp->flags |= BNXT_FLAG_USING_MSIX;
8701 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8702 kfree(bp->irq_tbl);
8703 bp->irq_tbl = NULL;
8704 pci_disable_msix(bp->pdev);
8709 static int bnxt_init_inta(struct bnxt *bp)
8711 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8712 if (!bp->irq_tbl)
8715 bp->total_irqs = 1;
8716 bp->rx_nr_rings = 1;
8717 bp->tx_nr_rings = 1;
8718 bp->cp_nr_rings = 1;
8719 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8720 bp->irq_tbl[0].vector = bp->pdev->irq;
8724 static int bnxt_init_int_mode(struct bnxt *bp)
8728 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8729 rc = bnxt_init_msix(bp);
8731 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8733 rc = bnxt_init_inta(bp);
8738 static void bnxt_clear_int_mode(struct bnxt *bp)
8740 if (bp->flags & BNXT_FLAG_USING_MSIX)
8741 pci_disable_msix(bp->pdev);
8743 kfree(bp->irq_tbl);
8744 bp->irq_tbl = NULL;
8745 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8748 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8750 int tcs = netdev_get_num_tc(bp->dev);
8754 if (!bnxt_need_reserve_rings(bp))
8757 if (irq_re_init && BNXT_NEW_RM(bp) &&
8758 bnxt_get_num_msix(bp) != bp->total_irqs) {
8759 bnxt_ulp_irq_stop(bp);
8760 bnxt_clear_int_mode(bp);
8763 rc = __bnxt_reserve_rings(bp);
8766 rc = bnxt_init_int_mode(bp);
8767 bnxt_ulp_irq_restart(bp, rc);
8770 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8773 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
8774 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
8775 netdev_err(bp->dev, "tx ring reservation failure\n");
8776 netdev_reset_tc(bp->dev);
8777 if (bp->tx_nr_rings_xdp)
8778 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
8780 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8786 static void bnxt_free_irq(struct bnxt *bp)
8792 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8793 bp->dev->rx_cpu_rmap = NULL;
8795 if (!bp->irq_tbl || !bp->bnapi)
8798 for (i = 0; i < bp->cp_nr_rings; i++) {
8799 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8801 irq = &bp->irq_tbl[map_idx];
8808 free_irq(irq->vector, bp->bnapi[i]);
8815 static int bnxt_request_irq(struct bnxt *bp)
8823 rc = bnxt_setup_int_mode(bp);
8825 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8830 rmap = bp->dev->rx_cpu_rmap;
8832 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8835 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8836 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8837 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8840 if (rmap && bp->bnapi[i]->rx_ring) {
8843 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8849 bp->bnapi[i]);
8856 int numa_node = dev_to_node(&bp->pdev->dev);
8863 netdev_warn(bp->dev,
8873 static void bnxt_del_napi(struct bnxt *bp)
8877 if (!bp->bnapi)
8880 for (i = 0; i < bp->cp_nr_rings; i++) {
8881 struct bnxt_napi *bnapi = bp->bnapi[i];
8891 static void bnxt_init_napi(struct bnxt *bp)
8894 unsigned int cp_nr_rings = bp->cp_nr_rings;
8897 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8900 if (bp->flags & BNXT_FLAG_CHIP_P5)
8902 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8905 bnapi = bp->bnapi[i];
8906 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8908 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8909 bnapi = bp->bnapi[cp_nr_rings];
8910 netif_napi_add(bp->dev, &bnapi->napi,
8914 bnapi = bp->bnapi[0];
8915 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8919 static void bnxt_disable_napi(struct bnxt *bp)
8923 if (!bp->bnapi)
8926 for (i = 0; i < bp->cp_nr_rings; i++) {
8927 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8929 napi_disable(&bp->bnapi[i]->napi);
8930 if (bp->bnapi[i]->rx_ring)
8935 static void bnxt_enable_napi(struct bnxt *bp)
8939 for (i = 0; i < bp->cp_nr_rings; i++) {
8940 struct bnxt_napi *bnapi = bp->bnapi[i];
8956 void bnxt_tx_disable(struct bnxt *bp)
8961 if (bp->tx_ring) {
8962 for (i = 0; i < bp->tx_nr_rings; i++) {
8963 txr = &bp->tx_ring[i];
8970 netif_carrier_off(bp->dev);
8972 netif_tx_disable(bp->dev);
8975 void bnxt_tx_enable(struct bnxt *bp)
8980 for (i = 0; i < bp->tx_nr_rings; i++) {
8981 txr = &bp->tx_ring[i];
8986 netif_tx_wake_all_queues(bp->dev);
8987 if (bp->link_info.link_up)
8988 netif_carrier_on(bp->dev);
9015 static void bnxt_report_link(struct bnxt *bp)
9017 if (bp->link_info.link_up) {
9023 netif_carrier_on(bp->dev);
9024 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9026 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9029 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9033 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9035 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9037 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9041 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
9043 if (bp->flags & BNXT_FLAG_EEE_CAP)
9044 netdev_info(bp->dev, "EEE is %s\n",
9045 bp->eee.eee_active ? "active" :
9047 fec = bp->link_info.fec_cfg;
9049 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9051 bnxt_report_fec(&bp->link_info));
9053 netif_carrier_off(bp->dev);
9054 netdev_err(bp->dev, "NIC Link is Down\n");
9068 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9072 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9073 struct bnxt_link_info *link_info = &bp->link_info;
9075 bp->flags &= ~BNXT_FLAG_EEE_CAP;
9076 if (bp->test_info)
9077 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
9079 if (bp->hwrm_spec_code < 0x10201)
9082 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9084 mutex_lock(&bp->hwrm_cmd_lock);
9085 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9090 struct ethtool_eee *eee = &bp->eee;
9093 bp->flags |= BNXT_FLAG_EEE_CAP;
9095 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9097 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9101 if (bp->test_info)
9102 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
9105 if (bp->test_info)
9106 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
9109 if (BNXT_PF(bp))
9110 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
9113 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
9115 if (bp->hwrm_spec_code >= 0x10a01) {
9118 netdev_warn(bp->dev, "Ethernet link disabled\n");
9121 netdev_info(bp->dev, "Ethernet link enabled\n");
9134 bp->port_count = resp->port_cnt;
9137 mutex_unlock(&bp->hwrm_cmd_lock);
9148 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9151 struct bnxt_link_info *link_info = &bp->link_info;
9153 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9157 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9159 mutex_lock(&bp->hwrm_cmd_lock);
9160 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9162 mutex_unlock(&bp->hwrm_cmd_lock);
9169 if (bp->hwrm_spec_code >= 0x10800)
9204 if (bp->flags & BNXT_FLAG_EEE_CAP) {
9205 struct ethtool_eee *eee = &bp->eee;
9241 if (bp->hwrm_spec_code >= 0x10504) {
9252 bnxt_report_link(bp);
9257 mutex_unlock(&bp->hwrm_cmd_lock);
9259 if (!BNXT_PHY_CFG_ABLE(bp))
9276 bnxt_hwrm_set_link_setting(bp, true, false);
9280 static void bnxt_get_port_module_status(struct bnxt *bp)
9282 struct bnxt_link_info *link_info = &bp->link_info;
9286 if (bnxt_update_link(bp, true))
9294 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9295 bp->pf.port_id);
9296 if (bp->hwrm_spec_code >= 0x10201) {
9297 netdev_warn(bp->dev, "Module part number %s\n",
9301 netdev_warn(bp->dev, "TX is disabled\n");
9303 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9308 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9310 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9311 if (bp->hwrm_spec_code >= 0x10201)
9314 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9316 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9321 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9323 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9327 if (bp->hwrm_spec_code >= 0x10201) {
9335 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9337 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9339 if (bp->link_info.advertising) {
9341 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9343 if (bp->link_info.advertising_pam4) {
9347 cpu_to_le16(bp->link_info.advertising_pam4);
9353 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9354 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9357 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9365 int bnxt_hwrm_set_pause(struct bnxt *bp)
9370 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9371 bnxt_hwrm_set_pause_common(bp, &req);
9373 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9374 bp->link_info.force_link_chng)
9375 bnxt_hwrm_set_link_common(bp, &req);
9377 mutex_lock(&bp->hwrm_cmd_lock);
9378 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9379 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9384 bp->link_info.pause =
9385 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9386 bp->link_info.auto_pause_setting = 0;
9387 if (!bp->link_info.force_link_chng)
9388 bnxt_report_link(bp);
9390 bp->link_info.force_link_chng = false;
9391 mutex_unlock(&bp->hwrm_cmd_lock);
9395 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9398 struct ethtool_eee *eee = &bp->eee;
9418 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9422 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9424 bnxt_hwrm_set_pause_common(bp, &req);
9426 bnxt_hwrm_set_link_common(bp, &req);
9429 bnxt_hwrm_set_eee(bp, &req);
9430 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9433 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9437 if (!BNXT_SINGLE_PF(bp))
9440 if (pci_num_vf(bp->pdev))
9443 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9445 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9448 static int bnxt_fw_init_one(struct bnxt *bp);
9450 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9452 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9454 bool fw_reset = !bp->irq_tbl;
9459 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9462 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9465 mutex_lock(&bp->hwrm_cmd_lock);
9466 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9469 mutex_unlock(&bp->hwrm_cmd_lock);
9481 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9482 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9483 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9488 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9489 bnxt_ulp_stop(bp);
9490 bnxt_free_ctx_mem(bp);
9491 kfree(bp->ctx);
9492 bp->ctx = NULL;
9493 bnxt_dcb_free(bp);
9494 rc = bnxt_fw_init_one(bp);
9496 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9499 bnxt_clear_int_mode(bp);
9500 rc = bnxt_init_int_mode(bp);
9502 netdev_err(bp->dev, "init int mode failed\n");
9505 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9507 if (BNXT_NEW_RM(bp)) {
9508 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9510 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9519 bp->tx_nr_rings = 0;
9520 bp->rx_nr_rings = 0;
9527 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9529 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9531 struct bnxt_pf_info *pf = &bp->pf;
9534 bp->num_leds = 0;
9535 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9538 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9540 mutex_lock(&bp->hwrm_cmd_lock);
9541 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9543 mutex_unlock(&bp->hwrm_cmd_lock);
9549 bp->num_leds = resp->num_leds;
9550 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9551 bp->num_leds);
9552 for (i = 0; i < bp->num_leds; i++) {
9553 struct bnxt_led_info *led = &bp->leds[i];
9558 bp->num_leds = 0;
9563 mutex_unlock(&bp->hwrm_cmd_lock);
9567 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9570 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9573 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9574 req.port_id = cpu_to_le16(bp->pf.port_id);
9577 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9578 mutex_lock(&bp->hwrm_cmd_lock);
9579 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9581 bp->wol_filter_id = resp->wol_filter_id;
9582 mutex_unlock(&bp->hwrm_cmd_lock);
9586 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9590 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9591 req.port_id = cpu_to_le16(bp->pf.port_id);
9593 req.wol_filter_id = bp->wol_filter_id;
9594 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9597 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9600 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9604 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9605 req.port_id = cpu_to_le16(bp->pf.port_id);
9607 mutex_lock(&bp->hwrm_cmd_lock);
9608 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9614 bp->wol = 1;
9615 bp->wol_filter_id = resp->wol_filter_id;
9619 mutex_unlock(&bp->hwrm_cmd_lock);
9623 static void bnxt_get_wol_settings(struct bnxt *bp)
9627 bp->wol = 0;
9628 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9632 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9642 struct bnxt *bp = dev_get_drvdata(dev);
9646 resp = bp->hwrm_cmd_resp_addr;
9647 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9648 mutex_lock(&bp->hwrm_cmd_lock);
9649 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9652 mutex_unlock(&bp->hwrm_cmd_lock);
9665 static void bnxt_hwmon_close(struct bnxt *bp)
9667 if (bp->hwmon_dev) {
9668 hwmon_device_unregister(bp->hwmon_dev);
9669 bp->hwmon_dev = NULL;
9673 static void bnxt_hwmon_open(struct bnxt *bp)
9676 struct pci_dev *pdev = bp->pdev;
9679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9680 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9682 bnxt_hwmon_close(bp);
9686 if (bp->hwmon_dev)
9689 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9690 DRV_MODULE_NAME, bp,
9692 if (IS_ERR(bp->hwmon_dev)) {
9693 bp->hwmon_dev = NULL;
9698 static void bnxt_hwmon_close(struct bnxt *bp)
9702 static void bnxt_hwmon_open(struct bnxt *bp)
9707 static bool bnxt_eee_config_ok(struct bnxt *bp)
9709 struct ethtool_eee *eee = &bp->eee;
9710 struct bnxt_link_info *link_info = &bp->link_info;
9712 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9731 static int bnxt_update_phy_setting(struct bnxt *bp)
9737 struct bnxt_link_info *link_info = &bp->link_info;
9739 rc = bnxt_update_link(bp, true);
9741 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9745 if (!BNXT_SINGLE_PF(bp))
9777 if (!bp->link_info.link_up)
9780 if (!bnxt_eee_config_ok(bp))
9784 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9786 rc = bnxt_hwrm_set_pause(bp);
9788 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9801 static void bnxt_preset_reg_win(struct bnxt *bp)
9803 if (BNXT_PF(bp)) {
9806 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9810 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9812 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9816 bnxt_preset_reg_win(bp);
9817 netif_carrier_off(bp->dev);
9820 rc = bnxt_init_dflt_ring_mode(bp);
9822 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9826 rc = bnxt_reserve_rings(bp, irq_re_init);
9829 if ((bp->flags & BNXT_FLAG_RFS) &&
9830 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9832 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9833 bp->flags &= ~BNXT_FLAG_RFS;
9836 rc = bnxt_alloc_mem(bp, irq_re_init);
9838 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9843 bnxt_init_napi(bp);
9844 rc = bnxt_request_irq(bp);
9846 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9851 rc = bnxt_init_nic(bp, irq_re_init);
9853 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9857 bnxt_enable_napi(bp);
9858 bnxt_debug_dev_init(bp);
9861 mutex_lock(&bp->link_lock);
9862 rc = bnxt_update_phy_setting(bp);
9863 mutex_unlock(&bp->link_lock);
9865 netdev_warn(bp->dev, "failed to update phy settings\n");
9866 if (BNXT_SINGLE_PF(bp)) {
9867 bp->link_info.phy_retry = true;
9868 bp->link_info.phy_retry_expires =
9875 udp_tunnel_nic_reset_ntf(bp->dev);
9877 set_bit(BNXT_STATE_OPEN, &bp->state);
9878 bnxt_enable_int(bp);
9880 bnxt_tx_enable(bp);
9881 mod_timer(&bp->timer, jiffies + bp->current_interval);
9883 bnxt_get_port_module_status(bp);
9886 if (BNXT_PF(bp))
9887 bnxt_vf_reps_open(bp);
9891 bnxt_del_napi(bp);
9894 bnxt_free_skbs(bp);
9895 bnxt_free_irq(bp);
9896 bnxt_free_mem(bp, true);
9901 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9905 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
9908 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9910 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9911 dev_close(bp->dev);
9920 int bnxt_half_open_nic(struct bnxt *bp)
9924 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9925 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
9930 rc = bnxt_alloc_mem(bp, false);
9932 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9935 rc = bnxt_init_nic(bp, false);
9937 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9943 bnxt_free_skbs(bp);
9944 bnxt_free_mem(bp, false);
9945 dev_close(bp->dev);
9952 void bnxt_half_close_nic(struct bnxt *bp)
9954 bnxt_hwrm_resource_free(bp, false, false);
9955 bnxt_free_skbs(bp);
9956 bnxt_free_mem(bp, false);
9959 static void bnxt_reenable_sriov(struct bnxt *bp)
9961 if (BNXT_PF(bp)) {
9962 struct bnxt_pf_info *pf = &bp->pf;
9966 bnxt_cfg_hw_sriov(bp, &n, true);
9972 struct bnxt *bp = netdev_priv(dev);
9975 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9976 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9980 rc = bnxt_hwrm_if_change(bp, true);
9983 rc = __bnxt_open_nic(bp, true, true);
9985 bnxt_hwrm_if_change(bp, false);
9987 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9988 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9989 bnxt_ulp_start(bp, 0);
9990 bnxt_reenable_sriov(bp);
9993 bnxt_hwmon_open(bp);
9999 static bool bnxt_drv_busy(struct bnxt *bp)
10001 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10002 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10005 static void bnxt_get_ring_stats(struct bnxt *bp,
10008 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10012 if (BNXT_PF(bp))
10013 bnxt_vf_reps_close(bp);
10016 bnxt_tx_disable(bp);
10018 clear_bit(BNXT_STATE_OPEN, &bp->state);
10020 while (bnxt_drv_busy(bp))
10024 bnxt_shutdown_nic(bp, irq_re_init);
10028 bnxt_debug_dev_exit(bp);
10029 bnxt_disable_napi(bp);
10030 del_timer_sync(&bp->timer);
10031 bnxt_free_skbs(bp);
10034 if (bp->bnapi && irq_re_init)
10035 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10037 bnxt_free_irq(bp);
10038 bnxt_del_napi(bp);
10040 bnxt_free_mem(bp, irq_re_init);
10043 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10047 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10055 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10056 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10060 if (bp->sriov_cfg) {
10061 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10062 !bp->sriov_cfg,
10065 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10068 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10074 struct bnxt *bp = netdev_priv(dev);
10076 bnxt_hwmon_close(bp);
10077 bnxt_close_nic(bp, true, true);
10078 bnxt_hwrm_shutdown_link(bp);
10079 bnxt_hwrm_if_change(bp, false);
10083 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10086 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10090 if (bp->hwrm_spec_code < 0x10a00)
10093 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10094 req.port_id = cpu_to_le16(bp->pf.port_id);
10104 mutex_lock(&bp->hwrm_cmd_lock);
10105 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10108 mutex_unlock(&bp->hwrm_cmd_lock);
10112 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10117 if (bp->hwrm_spec_code < 0x10a00)
10120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10121 req.port_id = cpu_to_le16(bp->pf.port_id);
10132 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10139 struct bnxt *bp = netdev_priv(dev);
10144 mdio->phy_id = bp->link_info.phy_addr;
10153 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10163 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10173 static void bnxt_get_ring_stats(struct bnxt *bp,
10178 for (i = 0; i < bp->cp_nr_rings; i++) {
10179 struct bnxt_napi *bnapi = bp->bnapi[i];
10208 static void bnxt_add_prev_stats(struct bnxt *bp,
10211 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10225 struct bnxt *bp = netdev_priv(dev);
10227 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10232 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10233 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10234 *stats = bp->net_stats_prev;
10238 bnxt_get_ring_stats(bp, stats);
10239 bnxt_add_prev_stats(bp, stats);
10241 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10242 u64 *rx = bp->port_stats.sw_stats;
10243 u64 *tx = bp->port_stats.sw_stats +
10263 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10266 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10268 struct net_device *dev = bp->dev;
10269 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10300 static bool bnxt_uc_list_updated(struct bnxt *bp)
10302 struct net_device *dev = bp->dev;
10303 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10321 struct bnxt *bp = netdev_priv(dev);
10327 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10330 vnic = &bp->vnic_info[0];
10337 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
10340 uc_update = bnxt_uc_list_updated(bp);
10348 mc_update = bnxt_mc_list_updated(bp, &mask);
10354 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10355 bnxt_queue_sp_work(bp);
10359 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10361 struct net_device *dev = bp->dev;
10362 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10368 uc_update = bnxt_uc_list_updated(bp);
10374 mutex_lock(&bp->hwrm_cmd_lock);
10378 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10383 rc = _hwrm_send_message(bp, &req, sizeof(req),
10386 mutex_unlock(&bp->hwrm_cmd_lock);
10403 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10405 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10413 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10415 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10419 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10422 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10428 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10431 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10432 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10440 if (!netif_running(bp->dev))
10448 static bool bnxt_rfs_supported(struct bnxt *bp)
10450 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10451 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10456 if (BNXT_FW_MAJ(bp) == 212)
10458 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10460 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10466 static bool bnxt_rfs_capable(struct bnxt *bp)
10471 if (bp->flags & BNXT_FLAG_CHIP_P5)
10472 return bnxt_rfs_supported(bp);
10473 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
10476 vnics = 1 + bp->rx_nr_rings;
10477 max_vnics = bnxt_get_max_func_vnics(bp);
10478 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10481 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10484 if (bp->rx_nr_rings > 1)
10485 netdev_warn(bp->dev,
10491 if (!BNXT_NEW_RM(bp))
10494 if (vnics == bp->hw_resc.resv_vnics)
10497 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10498 if (vnics <= bp->hw_resc.resv_vnics)
10501 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10502 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10512 struct bnxt *bp = netdev_priv(dev);
10515 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10518 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10538 if (BNXT_VF(bp) && bp->vf.vlan)
10546 struct bnxt *bp = netdev_priv(dev);
10547 u32 flags = bp->flags;
10559 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10568 changes = flags ^ bp->flags;
10571 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10573 (bp->flags & BNXT_FLAG_CHIP_P5))
10580 if (flags != bp->flags) {
10581 u32 old_flags = bp->flags;
10583 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10584 bp->flags = flags;
10586 bnxt_set_ring_params(bp);
10591 bnxt_close_nic(bp, false, false);
10592 bp->flags = flags;
10594 bnxt_set_ring_params(bp);
10596 return bnxt_open_nic(bp, false, false);
10599 bp->flags = flags;
10600 rc = bnxt_set_tpa(bp,
10604 bp->flags = old_flags;
10610 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10613 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10619 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10627 mutex_lock(&bp->hwrm_cmd_lock);
10628 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10637 mutex_unlock(&bp->hwrm_cmd_lock);
10638 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10642 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10645 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10649 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10652 mutex_lock(&bp->hwrm_cmd_lock);
10653 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10658 mutex_unlock(&bp->hwrm_cmd_lock);
10670 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10683 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10694 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10698 static void bnxt_dbg_dump_states(struct bnxt *bp)
10703 for (i = 0; i < bp->cp_nr_rings; i++) {
10704 bnapi = bp->bnapi[i];
10705 if (netif_msg_drv(bp)) {
10713 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
10715 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
10723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
10725 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
10726 return hwrm_send_message_silent(bp, &req, sizeof(req),
10730 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10733 bnxt_dbg_dump_states(bp);
10734 if (netif_running(bp->dev)) {
10738 bnxt_close_nic(bp, false, false);
10739 bnxt_open_nic(bp, false, false);
10741 bnxt_ulp_stop(bp);
10742 bnxt_close_nic(bp, true, false);
10743 rc = bnxt_open_nic(bp, true, false);
10744 bnxt_ulp_start(bp, rc);
10751 struct bnxt *bp = netdev_priv(dev);
10753 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
10754 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10755 bnxt_queue_sp_work(bp);
10758 static void bnxt_fw_health_check(struct bnxt *bp)
10760 struct bnxt_fw_health *fw_health = bp->fw_health;
10763 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10773 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10779 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10787 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10788 bnxt_queue_sp_work(bp);
10793 struct bnxt *bp = from_timer(bp, t, timer);
10794 struct net_device *dev = bp->dev;
10796 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10799 if (atomic_read(&bp->intr_sem) != 0)
10802 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10803 bnxt_fw_health_check(bp);
10805 if (bp->link_info.link_up && bp->stats_coal_ticks) {
10806 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10807 bnxt_queue_sp_work(bp);
10810 if (bnxt_tc_flower_enabled(bp)) {
10811 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10812 bnxt_queue_sp_work(bp);
10816 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10817 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10818 bnxt_queue_sp_work(bp);
10822 if (bp->link_info.phy_retry) {
10823 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10824 bp->link_info.phy_retry = false;
10825 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10827 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10828 bnxt_queue_sp_work(bp);
10832 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10834 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10835 bnxt_queue_sp_work(bp);
10838 mod_timer(&bp->timer, jiffies + bp->current_interval);
10841 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10848 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10852 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10854 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10859 static void bnxt_reset(struct bnxt *bp, bool silent)
10861 bnxt_rtnl_lock_sp(bp);
10862 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10863 bnxt_reset_task(bp, silent);
10864 bnxt_rtnl_unlock_sp(bp);
10868 static void bnxt_rx_ring_reset(struct bnxt *bp)
10872 bnxt_rtnl_lock_sp(bp);
10873 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10874 bnxt_rtnl_unlock_sp(bp);
10878 if (bp->flags & BNXT_FLAG_TPA)
10879 bnxt_set_tpa(bp, false);
10880 for (i = 0; i < bp->rx_nr_rings; i++) {
10881 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
10888 rc = bnxt_hwrm_rx_ring_reset(bp, i);
10891 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
10893 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
10895 bnxt_reset_task(bp, true);
10898 bnxt_free_one_rx_ring_skbs(bp, i);
10904 bnxt_alloc_one_rx_ring(bp, i);
10907 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10908 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10909 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10911 if (bp->flags & BNXT_FLAG_TPA)
10912 bnxt_set_tpa(bp, true);
10913 bnxt_rtnl_unlock_sp(bp);
10916 static void bnxt_fw_reset_close(struct bnxt *bp)
10918 bnxt_ulp_stop(bp);
10922 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10923 pci_disable_device(bp->pdev);
10924 __bnxt_close_nic(bp, true, false);
10925 bnxt_clear_int_mode(bp);
10926 bnxt_hwrm_func_drv_unrgtr(bp);
10927 if (pci_is_enabled(bp->pdev))
10928 pci_disable_device(bp->pdev);
10929 bnxt_free_ctx_mem(bp);
10930 kfree(bp->ctx);
10931 bp->ctx = NULL;
10934 static bool is_bnxt_fw_ok(struct bnxt *bp)
10936 struct bnxt_fw_health *fw_health = bp->fw_health;
10940 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10944 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10955 static void bnxt_force_fw_reset(struct bnxt *bp)
10957 struct bnxt_fw_health *fw_health = bp->fw_health;
10960 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10961 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10964 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10965 bnxt_fw_reset_close(bp);
10970 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10972 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10974 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10977 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10978 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10979 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10982 void bnxt_fw_exception(struct bnxt *bp)
10984 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10985 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10986 bnxt_rtnl_lock_sp(bp);
10987 bnxt_force_fw_reset(bp);
10988 bnxt_rtnl_unlock_sp(bp);
10994 static int bnxt_get_registered_vfs(struct bnxt *bp)
10999 if (!BNXT_PF(bp))
11002 rc = bnxt_hwrm_func_qcfg(bp);
11004 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11007 if (bp->pf.registered_vfs)
11008 return bp->pf.registered_vfs;
11009 if (bp->sriov_cfg)
11015 void bnxt_fw_reset(struct bnxt *bp)
11017 bnxt_rtnl_lock_sp(bp);
11018 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11019 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11022 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11023 if (bp->pf.active_vfs &&
11024 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11025 n = bnxt_get_registered_vfs(bp);
11027 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11029 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11030 dev_close(bp->dev);
11035 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11036 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11037 bp->fw_reset_state =
11039 bnxt_queue_fw_reset_work(bp, HZ / 10);
11042 bnxt_fw_reset_close(bp);
11043 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11044 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11047 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11048 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11050 bnxt_queue_fw_reset_work(bp, tmo);
11053 bnxt_rtnl_unlock_sp(bp);
11056 static void bnxt_chk_missed_irq(struct bnxt *bp)
11060 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11063 for (i = 0; i < bp->cp_nr_rings; i++) {
11064 struct bnxt_napi *bnapi = bp->bnapi[i];
11078 !bnxt_has_work(bp, cpr2))
11086 bnxt_dbg_hwrm_ring_info_get(bp,
11096 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11098 struct bnxt_link_info *link_info = &bp->link_info;
11102 if (bp->hwrm_spec_code >= 0x10201) {
11130 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11132 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11134 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11135 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11139 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11140 bnxt_cfg_rx_mode(bp);
11142 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11143 bnxt_cfg_ntp_filters(bp);
11144 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11145 bnxt_hwrm_exec_fwd_req(bp);
11146 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11147 netdev_info(bp->dev, "Receive PF driver unload event!\n");
11148 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11149 bnxt_hwrm_port_qstats(bp, 0);
11150 bnxt_hwrm_port_qstats_ext(bp, 0);
11151 bnxt_accumulate_all_stats(bp);
11154 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11157 mutex_lock(&bp->link_lock);
11159 &bp->sp_event))
11160 bnxt_hwrm_phy_qcaps(bp);
11162 rc = bnxt_update_link(bp, true);
11164 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11168 &bp->sp_event))
11169 bnxt_init_ethtool_link_settings(bp);
11170 mutex_unlock(&bp->link_lock);
11172 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11175 mutex_lock(&bp->link_lock);
11176 rc = bnxt_update_phy_setting(bp);
11177 mutex_unlock(&bp->link_lock);
11179 netdev_warn(bp->dev, "update phy settings retry failed\n");
11181 bp->link_info.phy_retry = false;
11182 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11185 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11186 mutex_lock(&bp->link_lock);
11187 bnxt_get_port_module_status(bp);
11188 mutex_unlock(&bp->link_lock);
11191 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11192 bnxt_tc_flow_stats_work(bp);
11194 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11195 bnxt_chk_missed_irq(bp);
11200 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11201 bnxt_reset(bp, false);
11203 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11204 bnxt_reset(bp, true);
11206 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11207 bnxt_rx_ring_reset(bp);
11209 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11210 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11212 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11213 if (!is_bnxt_fw_ok(bp))
11214 bnxt_devlink_health_report(bp,
11219 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11223 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11234 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11246 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11249 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11253 if (BNXT_NEW_RM(bp)) {
11254 cp += bnxt_get_ulp_msix_num(bp);
11255 stats += bnxt_get_ulp_stat_ctxs(bp);
11257 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11261 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11263 if (bp->bar2) {
11264 pci_iounmap(pdev, bp->bar2);
11265 bp->bar2 = NULL;
11268 if (bp->bar1) {
11269 pci_iounmap(pdev, bp->bar1);
11270 bp->bar1 = NULL;
11273 if (bp->bar0) {
11274 pci_iounmap(pdev, bp->bar0);
11275 bp->bar0 = NULL;
11279 static void bnxt_cleanup_pci(struct bnxt *bp)
11281 bnxt_unmap_bars(bp, bp->pdev);
11282 pci_release_regions(bp->pdev);
11283 if (pci_is_enabled(bp->pdev))
11284 pci_disable_device(bp->pdev);
11287 static void bnxt_init_dflt_coal(struct bnxt *bp)
11294 coal = &bp->rx_coal;
11303 coal = &bp->tx_coal;
11310 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11313 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11319 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11323 netdev_err(bp->dev, "OP-TEE not supported\n");
11328 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11332 bp->fw_cap = 0;
11333 rc = bnxt_hwrm_ver_get(bp);
11339 bnxt_try_map_fw_health_reg(bp);
11341 if (bp->fw_health && bp->fw_health->status_reliable) {
11342 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11344 netdev_err(bp->dev,
11348 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11349 rc = bnxt_fw_reset_via_optee(bp);
11351 rc = bnxt_hwrm_ver_get(bp);
11358 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11359 rc = bnxt_alloc_kong_hwrm_resources(bp);
11361 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11364 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11365 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11366 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11370 bnxt_nvm_cfg_ver_get(bp);
11372 rc = bnxt_hwrm_func_reset(bp);
11376 bnxt_hwrm_fw_set_time(bp);
11380 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11385 rc = bnxt_hwrm_func_qcaps(bp);
11387 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11392 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11394 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11397 if (bnxt_alloc_fw_health(bp)) {
11398 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11400 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11402 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11406 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11410 bnxt_hwrm_func_qcfg(bp);
11411 bnxt_hwrm_vnic_qcaps(bp);
11412 bnxt_hwrm_port_led_qcaps(bp);
11413 bnxt_ethtool_init(bp);
11414 bnxt_dcb_init(bp);
11418 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11420 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11421 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11425 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11426 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11427 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11432 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11434 struct net_device *dev = bp->dev;
11438 bp->flags &= ~BNXT_FLAG_RFS;
11439 if (bnxt_rfs_supported(bp)) {
11441 if (bnxt_rfs_capable(bp)) {
11442 bp->flags |= BNXT_FLAG_RFS;
11448 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11450 struct pci_dev *pdev = bp->pdev;
11452 bnxt_set_dflt_rss_hash_type(bp);
11453 bnxt_set_dflt_rfs(bp);
11455 bnxt_get_wol_settings(bp);
11456 if (bp->flags & BNXT_FLAG_WOL_CAP)
11457 device_set_wakeup_enable(&pdev->dev, bp->wol);
11461 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11462 bnxt_hwrm_coal_params_qcaps(bp);
11465 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11467 static int bnxt_fw_init_one(struct bnxt *bp)
11471 rc = bnxt_fw_init_one_p1(bp);
11473 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11476 rc = bnxt_fw_init_one_p2(bp);
11478 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11481 rc = bnxt_probe_phy(bp, false);
11484 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11491 bnxt_dl_fw_reporters_destroy(bp, false);
11492 bnxt_dl_fw_reporters_create(bp);
11493 bnxt_fw_init_one_p3(bp);
11497 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11499 struct bnxt_fw_health *fw_health = bp->fw_health;
11509 pci_write_config_dword(bp->pdev, reg_off, val);
11513 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11517 writel(val, bp->bar0 + reg_off);
11520 writel(val, bp->bar1 + reg_off);
11524 pci_read_config_dword(bp->pdev, 0, &val);
11529 static void bnxt_reset_all(struct bnxt *bp)
11531 struct bnxt_fw_health *fw_health = bp->fw_health;
11534 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11535 bnxt_fw_reset_via_optee(bp);
11536 bp->fw_reset_timestamp = jiffies;
11542 bnxt_fw_reset_writel(bp, i);
11546 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11547 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11551 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11553 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11555 bp->fw_reset_timestamp = jiffies;
11560 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11563 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11564 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11568 switch (bp->fw_reset_state) {
11570 int n = bnxt_get_registered_vfs(bp);
11574 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11576 bp->fw_reset_timestamp));
11579 if (time_after(jiffies, bp->fw_reset_timestamp +
11580 (bp->fw_reset_max_dsecs * HZ / 10))) {
11581 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11582 bp->fw_reset_state = 0;
11583 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11587 bnxt_queue_fw_reset_work(bp, HZ / 10);
11590 bp->fw_reset_timestamp = jiffies;
11592 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11596 bnxt_fw_reset_close(bp);
11597 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11598 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11601 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11602 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11605 bnxt_queue_fw_reset_work(bp, tmo);
11611 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11613 !time_after(jiffies, bp->fw_reset_timestamp +
11614 (bp->fw_reset_max_dsecs * HZ / 10))) {
11615 bnxt_queue_fw_reset_work(bp, HZ / 5);
11619 if (!bp->fw_health->master) {
11620 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11622 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11623 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11626 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11630 bnxt_reset_all(bp);
11631 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11632 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11635 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11638 val = bnxt_fw_health_readl(bp,
11641 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11644 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11645 if (pci_enable_device(bp->pdev)) {
11646 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11649 pci_set_master(bp->pdev);
11650 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11653 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11654 rc = __bnxt_hwrm_ver_get(bp, true);
11656 if (time_after(jiffies, bp->fw_reset_timestamp +
11657 (bp->fw_reset_max_dsecs * HZ / 10))) {
11658 netdev_err(bp->dev, "Firmware reset aborted\n");
11661 bnxt_queue_fw_reset_work(bp, HZ / 5);
11664 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11665 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11669 bnxt_queue_fw_reset_work(bp, HZ / 10);
11672 rc = bnxt_open(bp->dev);
11674 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11675 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11676 dev_close(bp->dev);
11679 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
11680 bp->fw_health->enabled) {
11681 bp->fw_health->last_fw_reset_cnt =
11682 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11684 bp->fw_reset_state = 0;
11687 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11688 bnxt_ulp_start(bp, rc);
11690 bnxt_reenable_sriov(bp);
11691 bnxt_dl_health_recovery_done(bp);
11692 bnxt_dl_health_status_update(bp, true);
11699 if (bp->fw_health->status_reliable ||
11700 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11701 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11703 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11706 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11707 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11708 bnxt_dl_health_status_update(bp, false);
11709 bp->fw_reset_state = 0;
11711 dev_close(bp->dev);
11718 struct bnxt *bp = netdev_priv(dev);
11751 bp->dev = dev;
11752 bp->pdev = pdev;
11754 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11757 bp->bar0 = pci_ioremap_bar(pdev, 0);
11758 if (!bp->bar0) {
11764 bp->bar2 = pci_ioremap_bar(pdev, 4);
11765 if (!bp->bar2) {
11773 INIT_WORK(&bp->sp_task, bnxt_sp_task);
11774 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11776 spin_lock_init(&bp->ntp_fltr_lock);
11778 spin_lock_init(&bp->db_lock);
11781 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11782 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11784 bnxt_init_dflt_coal(bp);
11786 timer_setup(&bp->timer, bnxt_timer, 0);
11787 bp->current_interval = BNXT_TIMER_INTERVAL;
11789 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11790 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11792 clear_bit(BNXT_STATE_OPEN, &bp->state);
11796 bnxt_unmap_bars(bp, pdev);
11810 struct bnxt *bp = netdev_priv(dev);
11819 rc = bnxt_approve_mac(bp, addr->sa_data, true);
11825 bnxt_close_nic(bp, false, false);
11826 rc = bnxt_open_nic(bp, false, false);
11835 struct bnxt *bp = netdev_priv(dev);
11838 bnxt_close_nic(bp, true, false);
11841 bnxt_set_ring_params(bp);
11844 return bnxt_open_nic(bp, true, false);
11851 struct bnxt *bp = netdev_priv(dev);
11855 if (tc > bp->max_tc) {
11857 tc, bp->max_tc);
11864 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11867 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11868 sh, tc, bp->tx_nr_rings_xdp);
11873 if (netif_running(bp->dev))
11874 bnxt_close_nic(bp, true, false);
11877 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11880 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11883 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11884 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11885 bp->tx_nr_rings + bp->rx_nr_rings;
11887 if (netif_running(bp->dev))
11888 return bnxt_open_nic(bp, true, false);
11896 struct bnxt *bp = cb_priv;
11898 if (!bnxt_tc_flower_enabled(bp) ||
11899 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11904 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11915 struct bnxt *bp = netdev_priv(dev);
11922 bp, bp, true);
11970 struct bnxt *bp = netdev_priv(dev);
11979 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12012 bp->hwrm_spec_code < 0x10601) {
12018 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12027 head = &bp->ntp_fltr_hash_tbl[idx];
12038 spin_lock_bh(&bp->ntp_fltr_lock);
12039 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12042 spin_unlock_bh(&bp->ntp_fltr_lock);
12052 bp->ntp_fltr_count++;
12053 spin_unlock_bh(&bp->ntp_fltr_lock);
12055 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12056 bnxt_queue_sp_work(bp);
12065 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12075 head = &bp->ntp_fltr_hash_tbl[i];
12080 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12083 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12088 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12097 spin_lock_bh(&bp->ntp_fltr_lock);
12099 bp->ntp_fltr_count--;
12100 spin_unlock_bh(&bp->ntp_fltr_lock);
12102 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12111 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12120 struct bnxt *bp = netdev_priv(netdev);
12128 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
12134 struct bnxt *bp = netdev_priv(netdev);
12142 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12160 struct bnxt *bp = netdev_priv(dev);
12162 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12169 struct bnxt *bp = netdev_priv(dev);
12173 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12190 if (mode == bp->br_mode)
12193 rc = bnxt_hwrm_set_br_mode(bp, mode);
12195 bp->br_mode = mode;
12204 struct bnxt *bp = netdev_priv(dev);
12206 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12210 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12213 ppid->id_len = sizeof(bp->dsn);
12214 memcpy(ppid->id, bp->dsn, ppid->id_len);
12221 struct bnxt *bp = netdev_priv(dev);
12223 return &bp->dl_port;
12264 struct bnxt *bp = netdev_priv(dev);
12266 if (BNXT_PF(bp))
12267 bnxt_sriov_disable(bp);
12269 if (BNXT_PF(bp))
12270 devlink_port_type_clear(&bp->dl_port);
12273 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12275 cancel_work_sync(&bp->sp_task);
12276 cancel_delayed_work_sync(&bp->fw_reset_task);
12277 bp->sp_event = 0;
12279 bnxt_dl_fw_reporters_destroy(bp, true);
12280 bnxt_dl_unregister(bp);
12281 bnxt_shutdown_tc(bp);
12283 bnxt_clear_int_mode(bp);
12284 bnxt_hwrm_func_drv_unrgtr(bp);
12285 bnxt_free_hwrm_resources(bp);
12286 bnxt_free_hwrm_short_cmd_req(bp);
12287 bnxt_ethtool_free(bp);
12288 bnxt_dcb_free(bp);
12289 kfree(bp->edev);
12290 bp->edev = NULL;
12291 kfree(bp->fw_health);
12292 bp->fw_health = NULL;
12293 bnxt_cleanup_pci(bp);
12294 bnxt_free_ctx_mem(bp);
12295 kfree(bp->ctx);
12296 bp->ctx = NULL;
12297 kfree(bp->rss_indir_tbl);
12298 bp->rss_indir_tbl = NULL;
12299 bnxt_free_port_stats(bp);
12303 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12306 struct bnxt_link_info *link_info = &bp->link_info;
12308 rc = bnxt_hwrm_phy_qcaps(bp);
12310 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12317 rc = bnxt_update_link(bp, false);
12319 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12330 bnxt_init_ethtool_link_settings(bp);
12345 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12348 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12353 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12354 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12355 bnxt_get_ulp_msix_num(bp),
12356 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12357 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12360 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12364 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12366 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12367 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12374 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12378 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12384 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12387 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12392 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12393 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12395 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12396 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12399 bp->flags |= BNXT_FLAG_AGG_RINGS;
12402 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12403 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12404 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12405 bnxt_set_ring_params(bp);
12408 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12412 max_cp = bnxt_get_max_func_cp_rings(bp);
12413 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12414 max_irq = bnxt_get_max_func_irqs(bp);
12425 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12435 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12437 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12438 bp->rx_nr_rings = bp->cp_nr_rings;
12439 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12440 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12443 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12447 if (!bnxt_can_reserve_rings(bp))
12451 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12456 if (bp->port_count > 1) {
12458 max_t(int, num_online_cpus() / bp->port_count, 1);
12462 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12465 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12466 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12468 bnxt_trim_dflt_sh_rings(bp);
12470 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12471 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12473 rc = __bnxt_reserve_rings(bp);
12475 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12476 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12478 bnxt_trim_dflt_sh_rings(bp);
12481 if (bnxt_need_reserve_rings(bp)) {
12482 rc = __bnxt_reserve_rings(bp);
12484 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12485 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12487 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12488 bp->rx_nr_rings++;
12489 bp->cp_nr_rings++;
12492 bp->tx_nr_rings = 0;
12493 bp->rx_nr_rings = 0;
12498 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12502 if (bp->tx_nr_rings)
12505 bnxt_ulp_irq_stop(bp);
12506 bnxt_clear_int_mode(bp);
12507 rc = bnxt_set_dflt_rings(bp, true);
12509 netdev_err(bp->dev, "Not enough rings available.\n");
12512 rc = bnxt_init_int_mode(bp);
12516 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12518 bnxt_set_dflt_rfs(bp);
12521 bnxt_ulp_irq_restart(bp, rc);
12525 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12530 bnxt_hwrm_func_qcaps(bp);
12532 if (netif_running(bp->dev))
12533 __bnxt_close_nic(bp, true, false);
12535 bnxt_ulp_irq_stop(bp);
12536 bnxt_clear_int_mode(bp);
12537 rc = bnxt_init_int_mode(bp);
12538 bnxt_ulp_irq_restart(bp, rc);
12540 if (netif_running(bp->dev)) {
12542 dev_close(bp->dev);
12544 rc = bnxt_open_nic(bp, true, false);
12550 static int bnxt_init_mac_addr(struct bnxt *bp)
12554 if (BNXT_PF(bp)) {
12555 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12558 struct bnxt_vf_info *vf = &bp->vf;
12563 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12569 eth_hw_addr_random(bp->dev);
12571 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12578 static void bnxt_vpd_read_info(struct bnxt *bp)
12580 struct pci_dev *pdev = bp->pdev;
12591 netdev_err(bp->dev, "Unable to read VPD\n");
12597 netdev_err(bp->dev, "VPD READ-Only not found\n");
12617 memcpy(bp->board_partno, &vpd_data[pos], size);
12631 memcpy(bp->board_serialno, &vpd_data[pos], size);
12636 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12638 struct pci_dev *pdev = bp->pdev;
12643 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12649 bp->flags |= BNXT_FLAG_DSN_VALID;
12653 static int bnxt_map_db_bar(struct bnxt *bp)
12655 if (!bp->db_size)
12657 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12658 if (!bp->bar1)
12666 struct bnxt *bp;
12681 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12685 bp = netdev_priv(dev);
12686 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12687 bnxt_set_max_func_irqs(bp, max_irqs);
12690 bp->flags |= BNXT_FLAG_VF;
12693 bp->flags |= BNXT_FLAG_MSIX_CAP;
12704 if (BNXT_PF(bp))
12705 bnxt_vpd_read_info(bp);
12707 rc = bnxt_alloc_hwrm_resources(bp);
12711 mutex_init(&bp->hwrm_cmd_lock);
12712 mutex_init(&bp->link_lock);
12714 rc = bnxt_fw_init_one_p1(bp);
12718 if (BNXT_CHIP_P5(bp)) {
12719 bp->flags |= BNXT_FLAG_CHIP_P5;
12720 if (BNXT_CHIP_SR2(bp))
12721 bp->flags |= BNXT_FLAG_CHIP_SR2;
12724 rc = bnxt_alloc_rss_indir_tbl(bp);
12728 rc = bnxt_fw_init_one_p2(bp);
12732 rc = bnxt_map_db_bar(bp);
12747 if (BNXT_SUPPORTS_TPA(bp))
12761 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12763 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12765 if (BNXT_SUPPORTS_TPA(bp))
12773 init_waitqueue_head(&bp->sriov_cfg_wait);
12774 mutex_init(&bp->sriov_lock);
12776 if (BNXT_SUPPORTS_TPA(bp)) {
12777 bp->gro_func = bnxt_gro_func_5730x;
12778 if (BNXT_CHIP_P4(bp))
12779 bp->gro_func = bnxt_gro_func_5731x;
12780 else if (BNXT_CHIP_P5(bp))
12781 bp->gro_func = bnxt_gro_func_5750x;
12783 if (!BNXT_CHIP_P4_PLUS(bp))
12784 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12786 bp->ulp_probe = bnxt_ulp_probe;
12788 rc = bnxt_init_mac_addr(bp);
12795 if (BNXT_PF(bp)) {
12797 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12802 dev->max_mtu = bp->max_mtu;
12804 rc = bnxt_probe_phy(bp, true);
12808 bnxt_set_rx_skb_mode(bp, false);
12809 bnxt_set_tpa_flags(bp);
12810 bnxt_set_ring_params(bp);
12811 rc = bnxt_set_dflt_rings(bp, true);
12813 netdev_err(bp->dev, "Not enough rings available.\n");
12818 bnxt_fw_init_one_p3(bp);
12821 bp->flags |= BNXT_FLAG_STRIP_VLAN;
12823 rc = bnxt_init_int_mode(bp);
12830 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12832 if (BNXT_PF(bp)) {
12842 rc = bnxt_init_tc(bp);
12848 bnxt_dl_register(bp);
12854 if (BNXT_PF(bp))
12855 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12856 bnxt_dl_fw_reporters_create(bp);
12867 bnxt_dl_unregister(bp);
12868 bnxt_shutdown_tc(bp);
12869 bnxt_clear_int_mode(bp);
12872 bnxt_hwrm_func_drv_unrgtr(bp);
12873 bnxt_free_hwrm_short_cmd_req(bp);
12874 bnxt_free_hwrm_resources(bp);
12875 bnxt_ethtool_free(bp);
12876 kfree(bp->fw_health);
12877 bp->fw_health = NULL;
12878 bnxt_cleanup_pci(bp);
12879 bnxt_free_ctx_mem(bp);
12880 kfree(bp->ctx);
12881 bp->ctx = NULL;
12882 kfree(bp->rss_indir_tbl);
12883 bp->rss_indir_tbl = NULL;
12893 struct bnxt *bp;
12899 bp = netdev_priv(dev);
12900 if (!bp)
12906 bnxt_ulp_shutdown(bp);
12907 bnxt_clear_int_mode(bp);
12911 pci_wake_from_d3(pdev, bp->wol);
12923 struct bnxt *bp = netdev_priv(dev);
12927 bnxt_ulp_stop(bp);
12932 bnxt_hwrm_func_drv_unrgtr(bp);
12933 pci_disable_device(bp->pdev);
12934 bnxt_free_ctx_mem(bp);
12935 kfree(bp->ctx);
12936 bp->ctx = NULL;
12944 struct bnxt *bp = netdev_priv(dev);
12948 rc = pci_enable_device(bp->pdev);
12954 pci_set_master(bp->pdev);
12955 if (bnxt_hwrm_ver_get(bp)) {
12959 rc = bnxt_hwrm_func_reset(bp);
12965 rc = bnxt_hwrm_func_qcaps(bp);
12969 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12974 bnxt_get_wol_settings(bp);
12982 bnxt_ulp_start(bp, rc);
12984 bnxt_reenable_sriov(bp);
13010 struct bnxt *bp = netdev_priv(netdev);
13017 bnxt_ulp_stop(bp);
13025 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13032 bnxt_free_ctx_mem(bp);
13033 kfree(bp->ctx);
13034 bp->ctx = NULL;
13054 struct bnxt *bp = netdev_priv(netdev);
13057 netdev_info(bp->dev, "PCI Slot Reset\n");
13075 &bp->state)) {
13078 pci_write_config_dword(bp->pdev, off, 0);
13083 err = bnxt_hwrm_func_reset(bp);
13103 struct bnxt *bp = netdev_priv(netdev);
13106 netdev_info(bp->dev, "PCI Slot Resume\n");
13109 err = bnxt_hwrm_func_qcaps(bp);
13113 bnxt_ulp_start(bp, err);
13115 bnxt_reenable_sriov(bp);