Lines Matching defs:ring
342 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
421 "bnxt: ring busy w/ flush pending!\n");
1781 * -EBUSY - completion ring does not have all the agg buffers yet
2059 /* In netpoll mode, if we are using a combined completion ring, we need to
2334 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2341 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2486 /* disable ring IRQ */
2612 /* ACK completion ring before freeing tx ring and producing new
2614 * ring.
2674 "Invalid completion received on special ring\n");
3181 struct bnxt_ring_struct *ring;
3195 ring = &rxr->rx_ring_struct;
3196 bnxt_free_ring(bp, &ring->ring_mem);
3198 ring = &rxr->rx_agg_ring_struct;
3199 bnxt_free_ring(bp, &ring->ring_mem);
3242 struct bnxt_ring_struct *ring;
3244 ring = &rxr->rx_ring_struct;
3262 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3266 ring->grp_idx = i;
3270 ring = &rxr->rx_agg_ring_struct;
3271 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3275 ring->grp_idx = i;
3298 struct bnxt_ring_struct *ring;
3306 ring = &txr->tx_ring_struct;
3308 bnxt_free_ring(bp, &ring->ring_mem);
3334 struct bnxt_ring_struct *ring;
3337 ring = &txr->tx_ring_struct;
3339 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3343 ring->grp_idx = txr->bnapi->index;
3363 ring->queue_id = bp->q_info[qidx].queue_id;
3375 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3379 ring->ring_mem.pg_arr = NULL;
3382 ring->ring_mem.dma_arr = NULL;
3439 struct bnxt_ring_struct *ring;
3446 ring = &cpr->cp_ring_struct;
3448 bnxt_free_ring(bp, &ring->ring_mem);
3454 ring = &cpr2->cp_ring_struct;
3455 bnxt_free_ring(bp, &ring->ring_mem);
3467 struct bnxt_ring_struct *ring;
3481 ring = &cpr->cp_ring_struct;
3482 rmem = &ring->ring_mem;
3508 struct bnxt_ring_struct *ring;
3515 ring = &cpr->cp_ring_struct;
3517 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3522 ring->map_idx = i + ulp_msix;
3524 ring->map_idx = i;
3562 struct bnxt_ring_struct *ring;
3568 ring = &cpr->cp_ring_struct;
3569 rmem = &ring->ring_mem;
3580 ring = &rxr->rx_ring_struct;
3581 rmem = &ring->ring_mem;
3589 ring = &rxr->rx_agg_ring_struct;
3590 rmem = &ring->ring_mem;
3603 ring = &txr->tx_ring_struct;
3604 rmem = &ring->ring_mem;
3614 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3620 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3621 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3646 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3660 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3688 struct bnxt_ring_struct *ring;
3698 ring = &rxr->rx_ring_struct;
3699 bnxt_init_rxbd_pages(ring, type);
3705 ring->fw_ring_id = INVALID_HW_RING_ID;
3707 ring = &rxr->rx_agg_ring_struct;
3708 ring->fw_ring_id = INVALID_HW_RING_ID;
3714 bnxt_init_rxbd_pages(ring, type);
3726 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3728 ring->fw_ring_id = INVALID_HW_RING_ID;
3737 ring = &cpr2->cp_ring_struct;
3738 ring->fw_ring_id = INVALID_HW_RING_ID;
3775 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3777 ring->fw_ring_id = INVALID_HW_RING_ID;
3927 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3940 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3971 * the RX ring size.
3983 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4660 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4662 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5144 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5148 grp_info = &bp->grp_info[ring->grp_idx];
5244 /* Fill the RSS indirection table with ring group ids */
5460 unsigned int ring = 0, grp_idx;
5505 ring = 0;
5507 ring = vnic_id - 1;
5509 ring = bp->rx_nr_rings - 1;
5511 grp_idx = bp->rx_ring[ring].bnapi->index;
5571 /* map ring groups to this vnic */
5575 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5704 struct bnxt_ring_struct *ring,
5709 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5728 /* Association of ring index with doorbell index and MSIX number */
5735 txr = container_of(ring, struct bnxt_tx_ring_info,
5738 /* Association of transmit ring with completion ring */
5739 grp_info = &bp->grp_info[ring->grp_idx];
5743 req->queue_id = cpu_to_le16(ring->queue_id);
5752 /* Association of rx ring with stats context */
5753 grp_info = &bp->grp_info[ring->grp_idx];
5766 /* Association of agg ring with rx ring */
5767 grp_info = &bp->grp_info[ring->grp_idx];
5783 /* Association of cp ring with nq */
5786 req->cq_handle = cpu_to_le64(ring->handle);
5800 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5817 ring->fw_ring_id = ring_id;
5904 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5905 u32 map_idx = ring->map_idx;
5910 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5915 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5918 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5921 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5923 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5930 struct bnxt_ring_struct *ring;
5940 ring = &cpr2->cp_ring_struct;
5941 ring->handle = BNXT_TX_HDL;
5943 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5947 ring->fw_ring_id);
5950 ring = &txr->tx_ring_struct;
5952 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5955 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5961 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5965 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5968 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5972 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5979 ring = &cpr2->cp_ring_struct;
5980 ring->handle = BNXT_RX_HDL;
5981 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5985 ring->fw_ring_id);
5994 struct bnxt_ring_struct *ring =
5996 u32 grp_idx = ring->grp_idx;
5999 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6004 ring->fw_ring_id);
6007 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6015 struct bnxt_ring_struct *ring,
6032 req->ring_id = cpu_to_le16(ring->fw_ring_id);
6057 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6059 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6062 hwrm_ring_free_send_msg(bp, ring,
6066 ring->fw_ring_id = INVALID_HW_RING_ID;
6072 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6075 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6078 hwrm_ring_free_send_msg(bp, ring,
6082 ring->fw_ring_id = INVALID_HW_RING_ID;
6094 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6097 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6100 hwrm_ring_free_send_msg(bp, ring, type,
6103 ring->fw_ring_id = INVALID_HW_RING_ID;
6122 struct bnxt_ring_struct *ring;
6129 ring = &cpr2->cp_ring_struct;
6130 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6132 hwrm_ring_free_send_msg(bp, ring,
6135 ring->fw_ring_id = INVALID_HW_RING_ID;
6138 ring = &cpr->cp_ring_struct;
6139 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6140 hwrm_ring_free_send_msg(bp, ring, type,
6142 ring->fw_ring_id = INVALID_HW_RING_ID;
6458 /* Old firmware does not need RX ring reservations but we still
6460 * we go through RX ring reservations first and then set up the
8643 /* configure default vnic, ring grp */
8804 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
9258 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9263 netdev_err(bp->dev, "tx ring reservation failure\n");
9331 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10712 /* Save ring stats before shutdown */
11769 /* Disable and flush TPA before resetting the RX ring */
11783 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11785 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13385 /* In initial default shared ring setting, each shared ring must have a
13386 * RX/TX ring pair.