Lines Matching defs:rdev
81 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
85 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
89 cctx = rdev->chip_ctx;
90 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
94 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
98 if (!rdev->chip_ctx)
100 chip_ctx = rdev->chip_ctx;
101 rdev->chip_ctx = NULL;
102 rdev->rcfw.res = NULL;
103 rdev->qplib_res.cctx = NULL;
104 rdev->qplib_res.pdev = NULL;
105 rdev->qplib_res.netdev = NULL;
109 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
115 en_dev = rdev->en_dev;
124 rdev->chip_ctx = chip_ctx;
127 rdev->qplib_res.cctx = rdev->chip_ctx;
128 rdev->rcfw.res = &rdev->qplib_res;
130 bnxt_re_set_drv_mode(rdev, wqe_mode);
136 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
140 bp = netdev_priv(rdev->en_dev->net);
142 rdev->is_virtfn = 1;
150 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
156 attr = &rdev->dev_attr;
157 ctx = &rdev->qplib_ctx;
167 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
169 rdev->qplib_ctx.tqm_ctx.qcount[i] =
170 rdev->dev_attr.tqm_alloc_reqs[i];
209 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
213 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
214 bnxt_re_limit_pf_res(rdev);
216 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
217 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
219 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
233 struct bnxt_re_dev *rdev = p;
235 if (!rdev)
238 rdev->num_vfs = num_vfs;
239 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
240 bnxt_re_set_resource_limits(rdev);
241 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
242 &rdev->qplib_ctx);
248 struct bnxt_re_dev *rdev = p;
250 if (!rdev)
254 bnxt_re_stop_irq(rdev);
255 ib_unregister_device_queued(&rdev->ibdev);
260 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
261 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
265 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
266 nq = &rdev->nq[indx - 1];
275 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
276 struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
277 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
287 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
294 for (indx = 0; indx < rdev->num_msix; indx++)
295 rdev->msix_entries[indx].vector = ent[indx].vector;
300 ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
303 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
304 nq = &rdev->nq[indx - 1];
308 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
330 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
335 if (!rdev)
338 en_dev = rdev->en_dev;
340 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
345 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
350 if (!rdev)
353 en_dev = rdev->en_dev;
356 &bnxt_re_ulp_ops, rdev);
357 rdev->qplib_res.pdev = rdev->en_dev->pdev;
361 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
366 if (!rdev)
369 en_dev = rdev->en_dev;
372 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
377 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
382 if (!rdev)
385 en_dev = rdev->en_dev;
390 rdev->msix_entries,
397 ibdev_warn(&rdev->ibdev,
401 rdev->num_msix = num_msix_got;
406 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
425 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
428 struct bnxt_en_dev *en_dev = rdev->en_dev;
439 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
446 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
451 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
455 struct bnxt_en_dev *en_dev = rdev->en_dev;
465 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
488 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
491 struct bnxt_en_dev *en_dev = rdev->en_dev;
501 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
507 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
513 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
517 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
520 struct bnxt_en_dev *en_dev = rdev->en_dev;
531 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
616 struct bnxt_re_dev *rdev =
619 return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
626 struct bnxt_re_dev *rdev =
629 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
695 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
697 struct ib_device *ibdev = &rdev->ibdev;
706 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
708 ibdev->num_comp_vectors = rdev->num_msix - 1;
709 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
743 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
747 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
748 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
751 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
753 dev_put(rdev->netdev);
754 rdev->netdev = NULL;
756 list_del_rcu(&rdev->list);
765 struct bnxt_re_dev *rdev;
768 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
769 if (!rdev) {
775 rdev->netdev = netdev;
776 dev_hold(rdev->netdev);
777 rdev->en_dev = en_dev;
778 rdev->id = rdev->en_dev->pdev->devfn;
779 INIT_LIST_HEAD(&rdev->qp_list);
780 mutex_init(&rdev->qp_lock);
781 atomic_set(&rdev->qp_count, 0);
782 atomic_set(&rdev->cq_count, 0);
783 atomic_set(&rdev->srq_count, 0);
784 atomic_set(&rdev->mr_count, 0);
785 atomic_set(&rdev->mw_count, 0);
786 rdev->cosq[0] = 0xFFFF;
787 rdev->cosq[1] = 0xFFFF;
790 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
792 return rdev;
842 event.device = &qp->rdev->ibdev;
906 ib_event.device = &srq->rdev->ibdev;
943 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
945 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
946 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
948 rdev->msix_entries[indx].db_offset;
951 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
955 for (i = 1; i < rdev->num_msix; i++)
956 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
958 if (rdev->qplib_res.rcfw)
959 bnxt_qplib_cleanup_res(&rdev->qplib_res);
962 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
968 bnxt_qplib_init_res(&rdev->qplib_res);
970 for (i = 1; i < rdev->num_msix ; i++) {
971 db_offt = bnxt_re_get_nqdb_offset(rdev, i);
972 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
973 i - 1, rdev->msix_entries[i].vector,
977 ibdev_err(&rdev->ibdev,
986 bnxt_qplib_disable_nq(&rdev->nq[i]);
990 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
995 for (i = 0; i < rdev->num_msix - 1; i++) {
996 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
997 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
998 bnxt_qplib_free_nq(&rdev->nq[i]);
999 rdev->nq[i].res = NULL;
1003 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
1005 bnxt_re_free_nq_res(rdev);
1007 if (rdev->qplib_res.dpi_tbl.max) {
1008 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1009 &rdev->qplib_res.dpi_tbl,
1010 &rdev->dpi_privileged);
1012 if (rdev->qplib_res.rcfw) {
1013 bnxt_qplib_free_res(&rdev->qplib_res);
1014 rdev->qplib_res.rcfw = NULL;
1018 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1026 rdev->qplib_res.rcfw = &rdev->rcfw;
1027 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1028 rdev->is_virtfn);
1032 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1033 rdev->netdev, &rdev->dev_attr);
1037 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
1038 &rdev->dpi_privileged,
1039 rdev);
1043 for (i = 0; i < rdev->num_msix - 1; i++) {
1046 nq = &rdev->nq[i];
1048 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1050 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1054 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1056 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1060 rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
1061 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1063 ibdev_err(&rdev->ibdev,
1066 bnxt_qplib_free_nq(&rdev->nq[i]);
1074 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1075 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1076 bnxt_qplib_free_nq(&rdev->nq[i]);
1078 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1079 &rdev->qplib_res.dpi_tbl,
1080 &rdev->dpi_privileged);
1082 bnxt_qplib_free_res(&rdev->qplib_res);
1085 rdev->qplib_res.rcfw = NULL;
1109 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1113 struct bnxt *bp = netdev_priv(rdev->netdev);
1115 struct bnxt_en_dev *en_dev = rdev->en_dev;
1125 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1139 ibdev_warn(&rdev->ibdev,
1141 ibdev_warn(&rdev->ibdev,
1152 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1156 (qp == rdev->gsi_ctx.gsi_sqp);
1159 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1166 mutex_lock(&rdev->qp_lock);
1167 list_for_each_entry(qp, &rdev->qp_list, list) {
1169 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1174 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1181 mutex_unlock(&rdev->qp_lock);
1184 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1186 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1191 if (!ib_device_try_get(&rdev->ibdev))
1209 rdev->qplib_res.netdev->dev_addr);
1212 ib_device_put(&rdev->ibdev);
1216 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1222 netdev = rdev->netdev;
1253 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1260 prio_map = bnxt_re_get_priority_mask(rdev);
1262 if (prio_map == rdev->cur_prio_map)
1264 rdev->cur_prio_map = prio_map;
1266 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1268 ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
1272 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1275 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1277 ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
1278 rdev->cosq[0], rdev->cosq[1]);
1285 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1286 (prio_map != 0 && !rdev->qplib_res.prio)) {
1287 rdev->qplib_res.prio = prio_map ? true : false;
1289 bnxt_re_update_gid(rdev);
1295 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1297 struct bnxt_en_dev *en_dev = rdev->en_dev;
1304 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1313 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1317 rdev->qplib_ctx.hwrm_intf_ver =
1324 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1330 rc = bnxt_re_register_ib(rdev);
1335 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1336 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1337 &rdev->active_width);
1338 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1340 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1343 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1348 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1353 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1354 cancel_delayed_work_sync(&rdev->worker);
1357 &rdev->flags))
1358 bnxt_re_cleanup_res(rdev);
1359 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1360 bnxt_re_free_res(rdev);
1362 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1363 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1365 ibdev_warn(&rdev->ibdev,
1367 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1368 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1369 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1370 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1371 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1372 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1374 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1375 rc = bnxt_re_free_msix(rdev);
1377 ibdev_warn(&rdev->ibdev,
1381 bnxt_re_destroy_chip_ctx(rdev);
1382 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1383 rc = bnxt_re_unregister_netdev(rdev);
1385 ibdev_warn(&rdev->ibdev,
1393 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1396 bnxt_re_setup_qos(rdev);
1397 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1400 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1411 rc = bnxt_re_register_netdev(rdev);
1413 ibdev_err(&rdev->ibdev,
1417 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1419 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1421 ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1426 bnxt_re_get_sriov_func_type(rdev);
1428 rc = bnxt_re_request_msix(rdev);
1430 ibdev_err(&rdev->ibdev,
1435 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1437 bnxt_re_query_hwrm_intf_version(rdev);
1442 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1443 &rdev->qplib_ctx,
1446 ibdev_err(&rdev->ibdev,
1451 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1452 creq = &rdev->rcfw.creq;
1458 rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1459 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1461 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1464 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1465 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1466 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1467 vid, db_offt, rdev->is_virtfn,
1470 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1475 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1476 rdev->is_virtfn);
1480 bnxt_re_set_resource_limits(rdev);
1482 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1483 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
1485 ibdev_err(&rdev->ibdev,
1489 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1490 rdev->qplib_ctx.stats.dma_map,
1491 &rdev->qplib_ctx.stats.fw_id);
1493 ibdev_err(&rdev->ibdev,
1498 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1499 rdev->is_virtfn);
1501 ibdev_err(&rdev->ibdev,
1505 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1508 rc = bnxt_re_alloc_res(rdev);
1510 ibdev_err(&rdev->ibdev,
1514 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1515 rc = bnxt_re_init_res(rdev);
1517 ibdev_err(&rdev->ibdev,
1522 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1524 if (!rdev->is_virtfn) {
1525 rc = bnxt_re_setup_qos(rdev);
1527 ibdev_info(&rdev->ibdev,
1530 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1531 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1532 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1537 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1539 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1541 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1543 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1544 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1546 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1548 bnxt_re_dev_uninit(rdev);
1553 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1555 struct bnxt_en_dev *en_dev = rdev->en_dev;
1556 struct net_device *netdev = rdev->netdev;
1558 bnxt_re_dev_remove(rdev);
1564 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1575 ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
1580 *rdev = bnxt_re_dev_add(netdev, en_dev);
1581 if (!*rdev) {
1590 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
1592 bnxt_re_dev_uninit(rdev);
1593 pci_dev_put(rdev->en_dev->pdev);
1594 bnxt_re_dev_unreg(rdev);
1597 static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
1602 rc = bnxt_re_dev_reg(rdev, netdev);
1611 pci_dev_get((*rdev)->en_dev->pdev);
1612 rc = bnxt_re_dev_init(*rdev, wqe_mode);
1614 pci_dev_put((*rdev)->en_dev->pdev);
1615 bnxt_re_dev_unreg(*rdev);
1623 struct bnxt_re_dev *rdev =
1626 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1629 bnxt_re_remove_device(rdev);
1637 struct bnxt_re_dev *rdev;
1641 rdev = re_work->rdev;
1644 rc = bnxt_re_ib_init(rdev);
1646 ibdev_err(&rdev->ibdev,
1649 bnxt_re_remove_device(rdev);
1656 if (!ib_device_try_get(&rdev->ibdev))
1661 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1665 bnxt_re_dev_stop(rdev);
1668 if (!netif_carrier_ok(rdev->netdev))
1669 bnxt_re_dev_stop(rdev);
1670 else if (netif_carrier_ok(rdev->netdev))
1671 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1673 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1674 &rdev->active_width);
1679 ib_device_put(&rdev->ibdev);
1681 put_device(&rdev->ibdev.dev);
1704 struct bnxt_re_dev *rdev;
1713 rdev = bnxt_re_from_netdev(real_dev);
1714 if (!rdev && event != NETDEV_REGISTER)
1722 if (rdev)
1724 rc = bnxt_re_add_device(&rdev, real_dev,
1732 ib_unregister_device_queued(&rdev->ibdev);
1743 get_device(&rdev->ibdev.dev);
1744 re_work->rdev = rdev;
1754 if (rdev && release)
1755 ib_device_put(&rdev->ibdev);
1791 struct bnxt_re_dev *rdev;
1796 list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
1802 if (rdev->is_virtfn)
1803 ib_unregister_device(&rdev->ibdev);