Lines Matching defs:ibdev
89 dev = mpi->ibdev;
116 static int get_port_state(struct ib_device *ibdev,
124 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
165 struct mlx5_ib_dev *ibdev;
167 ibdev = roce->dev;
168 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
175 if (ibdev->is_rep)
202 if (ibdev->is_rep)
203 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
207 && ibdev->ib_active) {
211 if (get_port_state(&ibdev->ib_dev, port_num,
219 ibev.device = &ibdev->ib_dev;
237 mlx5_ib_put_native_port_mdev(ibdev, port_num);
244 struct mlx5_ib_dev *ibdev = to_mdev(device);
248 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
258 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
259 ndev = ibdev->port[port_num - 1].roce.netdev;
262 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
265 mlx5_ib_put_native_port_mdev(ibdev, port_num);
269 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
273 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
279 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
283 return ibdev->mdev;
289 port = &ibdev->port[ib_port_num - 1];
291 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
305 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
307 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
312 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
315 port = &ibdev->port[port_num - 1];
318 mpi = ibdev->port[port_num - 1].mp.mpi;
637 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
639 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
642 if (mlx5_ib_port_link_layer(ibdev, 1) ==
679 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
682 struct mlx5_ib_dev *dev = to_mdev(ibdev);
687 switch (mlx5_get_vport_access_method(ibdev)) {
689 return mlx5_query_mad_ifc_system_image_guid(ibdev,
711 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
714 struct mlx5_ib_dev *dev = to_mdev(ibdev);
717 switch (mlx5_get_vport_access_method(ibdev)) {
719 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
732 static int mlx5_query_vendor_id(struct ib_device *ibdev,
735 struct mlx5_ib_dev *dev = to_mdev(ibdev);
737 switch (mlx5_get_vport_access_method(ibdev)) {
739 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
796 static int mlx5_ib_query_device(struct ib_device *ibdev,
801 struct mlx5_ib_dev *dev = to_mdev(ibdev);
823 err = mlx5_query_system_image_guid(ibdev,
828 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
832 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
1026 if (mlx5_ib_port_link_layer(ibdev, 1) ==
1196 static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1199 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1254 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1281 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1284 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1328 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1342 err = translate_max_vl_num(ibdev, vl_hw_cap,
1349 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1355 switch (mlx5_get_vport_access_method(ibdev)) {
1357 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1361 ret = mlx5_query_hca_port(ibdev, port, props);
1365 ret = mlx5_query_port_roce(ibdev, port, props);
1373 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1394 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1402 ret = mlx5_query_port_roce(ibdev, port, props);
1412 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1415 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1418 switch (mlx5_get_vport_access_method(ibdev)) {
1420 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1431 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1434 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1458 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1461 switch (mlx5_get_vport_access_method(ibdev)) {
1463 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1467 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1473 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1476 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1497 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1536 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1539 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1545 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1562 err = ib_query_port(ibdev, port, &attr);
1751 struct ib_device *ibdev = uctx->device;
1752 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1805 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1837 struct ib_device *ibdev = uctx->device;
1838 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2456 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2471 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2474 err = check_dm_type_support(to_mdev(ibdev), type);
2552 struct ib_device *ibdev = ibpd->device;
2564 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2573 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2714 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2726 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2727 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2728 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2770 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2792 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2799 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2801 schedule_work(&ibdev->delay_drop.delay_drop_work);
2808 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2822 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2836 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2857 struct mlx5_ib_dev *ibdev;
2862 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
2863 if (!ibdev)
2866 ibdev = work->dev;
2872 mlx5_ib_handle_internal_error(ibdev);
2877 if (handle_port_change(ibdev, work->param, &ibev))
2881 handle_general_event(ibdev, work->param, &ibev);
2887 ibev.device = &ibdev->ib_dev;
2889 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2890 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
2894 if (ibdev->ib_active)
2898 ibdev->ib_active = false;
3046 struct ib_device *ibdev;
3051 ibdev = &dev->ib_dev;
3058 devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
3062 devr->p0->device = ibdev;
3070 devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
3097 devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
3119 devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
3185 static u32 get_core_cap_flags(struct ib_device *ibdev,
3188 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3189 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3219 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3223 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3224 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
3228 err = ib_query_port(ibdev, port_num, &attr);
3241 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
3247 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
3255 err = ib_query_port(ibdev, port_num, &attr);
3266 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3269 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3416 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3420 struct mlx5_ib_port *port = &ibdev->port[port_num];
3427 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3430 if (!mpi->ibdev) {
3435 mpi->ibdev = NULL;
3441 mlx5_remove_netdev_notifier(ibdev, port_num);
3463 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
3468 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3471 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3474 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3482 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3483 if (ibdev->port[port_num].mp.mpi) {
3484 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
3486 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3490 ibdev->port[port_num].mp.mpi = mpi;
3491 mpi->ibdev = ibdev;
3493 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3495 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3499 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
3503 err = mlx5_add_netdev_notifier(ibdev, port_num);
3505 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
3513 mlx5_ib_init_cong_debugfs(ibdev, port_num);
3518 mlx5_ib_unbind_slave_port(ibdev, mpi);
3560 mpi->ibdev = dev;
4774 mpi->ibdev->ib_active = true;
4846 if (mpi->ibdev)
4847 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);