Lines Matching refs:vnic
3844 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3847 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3849 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3851 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3855 get_random_bytes(vnic->rss_hash_key,
3858 memcpy(vnic->rss_hash_key,
4028 struct bnxt_vnic_info *vnic;
4035 vnic = &bp->vnic_info[i];
4037 kfree(vnic->fw_grp_ids);
4038 vnic->fw_grp_ids = NULL;
4040 kfree(vnic->uc_list);
4041 vnic->uc_list = NULL;
4043 if (vnic->mc_list) {
4044 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4045 vnic->mc_list, vnic->mc_list_mapping);
4046 vnic->mc_list = NULL;
4049 if (vnic->rss_table) {
4050 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4051 vnic->rss_table,
4052 vnic->rss_table_dma_addr);
4053 vnic->rss_table = NULL;
4056 vnic->rss_hash_key = NULL;
4057 vnic->flags = 0;
4064 struct bnxt_vnic_info *vnic;
4069 vnic = &bp->vnic_info[i];
4071 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4075 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4076 if (!vnic->uc_list) {
4083 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4084 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4085 vnic->mc_list =
4087 vnic->mc_list_size,
4088 &vnic->mc_list_mapping,
4090 if (!vnic->mc_list) {
4099 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4104 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4105 if (!vnic->fw_grp_ids) {
4111 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4119 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4120 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4121 vnic->rss_table_size,
4122 &vnic->rss_table_dma_addr,
4124 if (!vnic->rss_table) {
4129 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4130 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4899 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4906 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4907 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4908 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4909 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4911 req->mask = cpu_to_le32(vnic->rx_mask);
4955 struct bnxt_vnic_info *vnic;
4969 vnic = &bp->vnic_info[fltr->rxq + 1];
4970 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5060 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5069 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5071 for (j = 0; j < vnic->uc_filter_count; j++) {
5072 req->l2_filter_id = vnic->fw_l2_filter_id[j];
5076 vnic->uc_filter_count = 0;
5084 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5089 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5139 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5239 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5241 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5248 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5253 struct bnxt_vnic_info *vnic)
5255 __le16 *ring_tbl = vnic->rss_table;
5276 struct bnxt_vnic_info *vnic)
5279 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5281 bnxt_fill_hw_rss_tbl(bp, vnic);
5293 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5294 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5299 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5304 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5312 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5313 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5329 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5333 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5334 ring_tbl_map = vnic->rss_table_dma_addr;
5341 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5354 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5361 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5363 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5374 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5395 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5419 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5422 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5458 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5482 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5483 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5486 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5497 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5498 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5504 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5506 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5517 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5559 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5571 /* map ring groups to this vnic */
5579 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5584 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5591 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6381 int cp, int stat, int vnic)
6385 vnic);
6388 vnic);
6452 int vnic = 1, grp = rx;
6468 vnic = rx + 1;
6473 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6490 int vnic = 1, stat;
6499 vnic = rx + 1;
6505 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6514 vnic = hw_resc->resv_vnics;
6561 if (!tx || !rx || !cp || !grp || !vnic || !stat)
8524 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8547 /* clear all RSS setting before free vnic ctx */
8551 /* before free the vnic, undo the vnic tpa settings */
8617 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8620 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8623 /* allocate context for vnic */
8626 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8635 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8643 /* configure default vnic, ring grp */
8646 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8651 /* Enable RSS hashing on vnic */
8654 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8662 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8679 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8690 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8696 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8703 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8727 struct bnxt_vnic_info *vnic;
8734 vnic = &bp->vnic_info[vnic_id];
8735 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8737 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8740 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8770 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8777 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8789 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8817 /* default vnic 0 */
8820 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8848 /* Filter for default vnic 0 */
8854 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8857 vnic->uc_filter_count = 1;
8859 vnic->rx_mask = 0;
8864 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8867 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8870 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8871 vnic->mc_list_count = 0;
8876 vnic->rx_mask |= mask;
8892 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10998 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11008 vnic->mc_list_count = 0;
11012 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
11013 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
11022 if (mc_count != vnic->mc_list_count) {
11023 vnic->mc_list_count = mc_count;
11032 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11036 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
11040 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
11051 struct bnxt_vnic_info *vnic;
11059 vnic = &bp->vnic_info[0];
11060 mask = vnic->rx_mask;
11075 vnic->mc_list_count = 0;
11080 if (mask != vnic->rx_mask || uc_update || mc_update) {
11081 vnic->rx_mask = mask;
11090 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11107 for (i = 1; i < vnic->uc_filter_count; i++) {
11108 req->l2_filter_id = vnic->fw_l2_filter_id[i];
11114 vnic->uc_filter_count = 1;
11118 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11121 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11123 vnic->uc_filter_count++;
11128 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11129 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11138 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11140 vnic->uc_filter_count = i;
11148 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11150 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11152 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
11155 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11156 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11157 vnic->mc_list_count = 0;
12938 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12942 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12944 vnic->uc_list + off)) {