Lines Matching defs:rdev
81 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
85 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
86 static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
88 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
90 static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
100 res = &rdev->qplib_res;
101 en_dev = rdev->en_dev;
102 cctx = rdev->chip_ctx;
105 rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
107 dev_info(rdev_to_dev(rdev),
122 dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
126 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
130 cctx = rdev->chip_ctx;
131 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
133 if (bnxt_re_hwrm_qcaps(rdev))
134 dev_err(rdev_to_dev(rdev),
138 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
142 if (!rdev->chip_ctx)
144 chip_ctx = rdev->chip_ctx;
145 rdev->chip_ctx = NULL;
146 rdev->rcfw.res = NULL;
147 rdev->qplib_res.cctx = NULL;
148 rdev->qplib_res.pdev = NULL;
149 rdev->qplib_res.netdev = NULL;
153 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
159 en_dev = rdev->en_dev;
167 rdev->chip_ctx = chip_ctx;
170 rdev->qplib_res.cctx = rdev->chip_ctx;
171 rdev->rcfw.res = &rdev->qplib_res;
172 rdev->qplib_res.dattr = &rdev->dev_attr;
173 rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
175 bnxt_re_set_drv_mode(rdev, wqe_mode);
177 bnxt_re_set_db_offset(rdev);
178 rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
183 ibdev_info(&rdev->ibdev,
190 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
192 if (BNXT_EN_VF(rdev->en_dev))
193 rdev->is_virtfn = 1;
201 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
207 attr = &rdev->dev_attr;
208 ctx = &rdev->qplib_ctx;
218 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
220 rdev->qplib_ctx.tqm_ctx.qcount[i] =
221 rdev->dev_attr.tqm_alloc_reqs[i];
260 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
264 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
265 bnxt_re_limit_pf_res(rdev);
267 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
268 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
270 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
273 static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
276 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
278 rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
279 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
280 bnxt_re_set_resource_limits(rdev);
281 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
282 &rdev->qplib_ctx);
288 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
290 if (!rdev)
292 ib_unregister_device(&rdev->ibdev);
293 bnxt_re_dev_uninit(rdev);
298 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
299 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
303 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
304 nq = &rdev->nq[indx - 1];
313 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
314 struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
315 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
325 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
332 for (indx = 0; indx < rdev->num_msix; indx++)
333 rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
338 ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
341 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
342 nq = &rdev->nq[indx - 1];
346 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
360 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
365 en_dev = rdev->en_dev;
367 rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
369 rdev->qplib_res.pdev = rdev->en_dev->pdev;
392 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
395 struct bnxt_en_dev *en_dev = rdev->en_dev;
414 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
416 struct bnxt_en_dev *en_dev = rdev->en_dev;
423 cctx = rdev->chip_ctx;
440 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
444 struct bnxt_en_dev *en_dev = rdev->en_dev;
449 cctx = rdev->chip_ctx;
467 static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
469 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
471 pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
472 pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
477 static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
485 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
493 if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
500 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
505 if (!mutex_trylock(&rdev->pacing.dbq_lock))
507 pacing_data = rdev->qplib_res.pacing_data;
508 pacing_save = rdev->pacing.do_pacing_save;
509 __wait_for_fifo_occupancy_below_th(rdev);
510 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
511 if (pacing_save > rdev->pacing.dbr_def_do_pacing) {
523 pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
530 rdev->pacing.do_pacing_save = pacing_data->do_pacing;
533 schedule_delayed_work(&rdev->dbq_pacing_work,
534 msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
535 rdev->stats.pacing.alerts++;
536 mutex_unlock(&rdev->pacing.dbq_lock);
541 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
546 if (!mutex_trylock(&rdev->pacing.dbq_lock))
549 pacing_data = rdev->qplib_res.pacing_data;
550 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
563 pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
564 if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
565 bnxt_re_set_default_pacing_data(rdev);
566 rdev->stats.pacing.complete++;
571 schedule_delayed_work(&rdev->dbq_pacing_work,
572 msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
573 rdev->stats.pacing.resched++;
575 rdev->pacing.do_pacing_save = pacing_data->do_pacing;
576 mutex_unlock(&rdev->pacing.dbq_lock);
579 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
583 if (!rdev->pacing.dbr_pacing)
585 mutex_lock(&rdev->pacing.dbq_lock);
586 pacing_data = rdev->qplib_res.pacing_data;
594 cancel_work_sync(&rdev->dbq_fifo_check_work);
595 schedule_work(&rdev->dbq_fifo_check_work);
596 mutex_unlock(&rdev->pacing.dbq_lock);
599 static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
601 if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
605 rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
606 if (!rdev->pacing.dbr_page)
609 memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
610 rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
613 writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
614 rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
615 rdev->pacing.dbr_db_fifo_reg_off =
616 (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
618 rdev->pacing.dbr_bar_addr =
619 pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
621 rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
622 rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
623 rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
624 rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
625 rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
626 rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
627 rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
628 rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
629 bnxt_re_set_default_pacing_data(rdev);
631 INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
632 INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
636 static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
638 cancel_work_sync(&rdev->dbq_fifo_check_work);
639 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
640 if (rdev->pacing.dbr_page)
641 free_page((u64)rdev->pacing.dbr_page);
643 rdev->pacing.dbr_page = NULL;
644 rdev->pacing.dbr_pacing = false;
647 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
656 if (!rdev)
659 en_dev = rdev->en_dev;
664 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
674 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
679 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
683 struct bnxt_en_dev *en_dev = rdev->en_dev;
715 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
718 struct bnxt_en_dev *en_dev = rdev->en_dev;
727 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
736 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
742 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
746 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
749 struct bnxt_en_dev *en_dev = rdev->en_dev;
791 struct bnxt_re_dev *rdev =
794 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
801 struct bnxt_re_dev *rdev =
804 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
875 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
877 struct ib_device *ibdev = &rdev->ibdev;
886 addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
888 ibdev->num_comp_vectors = rdev->num_msix - 1;
889 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
896 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
900 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
902 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
908 struct bnxt_re_dev *rdev;
911 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
912 if (!rdev) {
918 rdev->nb.notifier_call = NULL;
919 rdev->netdev = en_dev->net;
920 rdev->en_dev = en_dev;
921 rdev->id = rdev->en_dev->pdev->devfn;
922 INIT_LIST_HEAD(&rdev->qp_list);
923 mutex_init(&rdev->qp_lock);
924 mutex_init(&rdev->pacing.dbq_lock);
925 atomic_set(&rdev->stats.res.qp_count, 0);
926 atomic_set(&rdev->stats.res.cq_count, 0);
927 atomic_set(&rdev->stats.res.srq_count, 0);
928 atomic_set(&rdev->stats.res.mr_count, 0);
929 atomic_set(&rdev->stats.res.mw_count, 0);
930 atomic_set(&rdev->stats.res.ah_count, 0);
931 atomic_set(&rdev->stats.res.pd_count, 0);
932 rdev->cosq[0] = 0xFFFF;
933 rdev->cosq[1] = 0xFFFF;
935 return rdev;
984 event.device = &qp->rdev->ibdev;
1041 ib_event.device = &srq->rdev->ibdev;
1072 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
1074 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
1075 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
1077 rdev->en_dev->msix_entries[indx].db_offset;
1080 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
1084 for (i = 1; i < rdev->num_msix; i++)
1085 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
1087 if (rdev->qplib_res.rcfw)
1088 bnxt_qplib_cleanup_res(&rdev->qplib_res);
1091 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
1097 bnxt_qplib_init_res(&rdev->qplib_res);
1099 for (i = 1; i < rdev->num_msix ; i++) {
1100 db_offt = bnxt_re_get_nqdb_offset(rdev, i);
1101 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
1102 i - 1, rdev->en_dev->msix_entries[i].vector,
1106 ibdev_err(&rdev->ibdev,
1115 bnxt_qplib_disable_nq(&rdev->nq[i]);
1119 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
1124 for (i = 0; i < rdev->num_msix - 1; i++) {
1125 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1126 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1127 bnxt_qplib_free_nq(&rdev->nq[i]);
1128 rdev->nq[i].res = NULL;
1132 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
1134 bnxt_re_free_nq_res(rdev);
1136 if (rdev->qplib_res.dpi_tbl.max) {
1137 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1138 &rdev->dpi_privileged);
1140 if (rdev->qplib_res.rcfw) {
1141 bnxt_qplib_free_res(&rdev->qplib_res);
1142 rdev->qplib_res.rcfw = NULL;
1146 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1154 rdev->qplib_res.rcfw = &rdev->rcfw;
1155 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
1159 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1160 rdev->netdev, &rdev->dev_attr);
1164 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res,
1165 &rdev->dpi_privileged,
1166 rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
1170 for (i = 0; i < rdev->num_msix - 1; i++) {
1173 nq = &rdev->nq[i];
1175 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1177 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1181 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1183 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1187 rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
1188 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1190 ibdev_err(&rdev->ibdev,
1193 bnxt_qplib_free_nq(&rdev->nq[i]);
1201 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1202 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1203 bnxt_qplib_free_nq(&rdev->nq[i]);
1205 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1206 &rdev->dpi_privileged);
1208 bnxt_qplib_free_res(&rdev->qplib_res);
1211 rdev->qplib_res.rcfw = NULL;
1234 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1238 (qp == rdev->gsi_ctx.gsi_sqp);
1241 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1248 mutex_lock(&rdev->qp_lock);
1249 list_for_each_entry(qp, &rdev->qp_list, list) {
1251 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1256 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1263 mutex_unlock(&rdev->qp_lock);
1266 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1268 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1273 if (!ib_device_try_get(&rdev->ibdev))
1291 rdev->qplib_res.netdev->dev_addr);
1294 ib_device_put(&rdev->ibdev);
1298 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1304 netdev = rdev->netdev;
1319 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1324 prio_map = bnxt_re_get_priority_mask(rdev);
1326 if (prio_map == rdev->cur_prio_map)
1328 rdev->cur_prio_map = prio_map;
1332 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1333 (prio_map != 0 && !rdev->qplib_res.prio)) {
1334 rdev->qplib_res.prio = prio_map;
1335 bnxt_re_update_gid(rdev);
1341 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1343 struct bnxt_en_dev *en_dev = rdev->en_dev;
1358 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1363 cctx = rdev->chip_ctx;
1376 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1382 rc = bnxt_re_register_ib(rdev);
1387 dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
1388 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1390 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1393 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1398 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1403 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1404 cancel_delayed_work_sync(&rdev->worker);
1407 &rdev->flags))
1408 bnxt_re_cleanup_res(rdev);
1409 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1410 bnxt_re_free_res(rdev);
1412 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1413 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1415 ibdev_warn(&rdev->ibdev,
1417 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1418 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1419 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1420 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1421 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1422 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1425 rdev->num_msix = 0;
1427 if (rdev->pacing.dbr_pacing)
1428 bnxt_re_deinitialize_dbr_pacing(rdev);
1430 bnxt_re_destroy_chip_ctx(rdev);
1431 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
1432 bnxt_unregister_dev(rdev->en_dev);
1438 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1441 bnxt_re_setup_qos(rdev);
1442 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1445 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1455 rc = bnxt_re_register_netdev(rdev);
1457 ibdev_err(&rdev->ibdev,
1461 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1463 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1465 bnxt_unregister_dev(rdev->en_dev);
1466 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1467 ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1472 bnxt_re_get_sriov_func_type(rdev);
1474 if (!rdev->en_dev->ulp_tbl->msix_requested) {
1475 ibdev_err(&rdev->ibdev,
1480 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
1481 rdev->en_dev->ulp_tbl->msix_requested);
1482 rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
1484 bnxt_re_query_hwrm_intf_version(rdev);
1489 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1490 &rdev->qplib_ctx,
1493 ibdev_err(&rdev->ibdev,
1498 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1499 creq = &rdev->rcfw.creq;
1505 rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1506 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1508 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1511 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1512 vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1513 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1517 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1522 if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
1523 rc = bnxt_re_initialize_dbr_pacing(rdev);
1525 rdev->pacing.dbr_pacing = true;
1527 ibdev_err(&rdev->ibdev,
1529 rdev->pacing.dbr_pacing = false;
1532 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
1536 bnxt_re_set_resource_limits(rdev);
1538 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1539 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
1541 ibdev_err(&rdev->ibdev,
1545 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1546 rdev->qplib_ctx.stats.dma_map,
1547 &rdev->qplib_ctx.stats.fw_id);
1549 ibdev_err(&rdev->ibdev,
1554 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1555 rdev->is_virtfn);
1557 ibdev_err(&rdev->ibdev,
1561 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1564 rc = bnxt_re_alloc_res(rdev);
1566 ibdev_err(&rdev->ibdev,
1570 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1571 rc = bnxt_re_init_res(rdev);
1573 ibdev_err(&rdev->ibdev,
1578 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1580 if (!rdev->is_virtfn) {
1581 rc = bnxt_re_setup_qos(rdev);
1583 ibdev_info(&rdev->ibdev,
1586 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1587 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1588 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1593 bnxt_re_vf_res_config(rdev);
1598 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1600 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1602 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1604 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1605 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1607 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1609 bnxt_re_dev_uninit(rdev);
1619 struct bnxt_re_dev *rdev;
1625 rdev = bnxt_re_dev_add(aux_priv, en_dev);
1626 if (!rdev || !rdev_to_dev(rdev)) {
1631 rc = bnxt_re_dev_init(rdev, wqe_mode);
1635 rc = bnxt_re_ib_init(rdev);
1641 auxiliary_set_drvdata(adev, rdev);
1646 bnxt_re_dev_uninit(rdev);
1648 ib_dealloc_device(&rdev->ibdev);
1653 static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
1658 if (rdev->is_virtfn)
1662 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
1674 if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
1675 ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
1696 struct bnxt_re_dev *rdev;
1705 rdev = bnxt_re_from_netdev(real_dev);
1706 if (!rdev)
1714 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1722 ib_device_put(&rdev->ibdev);
1731 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1733 if (!rdev)
1737 if (rdev->nb.notifier_call) {
1738 unregister_netdevice_notifier(&rdev->nb);
1739 rdev->nb.notifier_call = NULL;
1746 bnxt_re_setup_cc(rdev, false);
1747 ib_unregister_device(&rdev->ibdev);
1748 bnxt_re_dev_uninit(rdev);
1749 ib_dealloc_device(&rdev->ibdev);
1757 struct bnxt_re_dev *rdev;
1767 rdev = auxiliary_get_drvdata(adev);
1769 rdev->nb.notifier_call = bnxt_re_netdev_event;
1770 rc = register_netdevice_notifier(&rdev->nb);
1772 rdev->nb.notifier_call = NULL;
1778 bnxt_re_setup_cc(rdev, true);
1791 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1793 if (!rdev)
1804 ibdev_info(&rdev->ibdev, "Handle device suspend call");
1810 if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
1811 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
1813 bnxt_re_dev_stop(rdev);
1814 bnxt_re_stop_irq(rdev);
1818 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
1819 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
1820 wake_up_all(&rdev->rcfw.cmdq.waitq);
1828 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1830 if (!rdev)
1841 ibdev_info(&rdev->ibdev, "Handle device resume call");