Lines Matching defs:ibdev
87 dev = mpi->ibdev;
114 static int get_port_state(struct ib_device *ibdev,
122 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
171 struct mlx5_ib_dev *ibdev;
173 ibdev = roce->dev;
174 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
181 if (ibdev->is_rep)
208 if (ibdev->is_rep)
209 roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
213 ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
214 ibdev->ib_active) {
218 if (get_port_state(&ibdev->ib_dev, port_num,
226 ibev.device = &ibdev->ib_dev;
244 mlx5_ib_put_native_port_mdev(ibdev, port_num);
251 struct mlx5_ib_dev *ibdev = to_mdev(device);
255 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
265 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
266 ndev = ibdev->port[port_num - 1].roce.netdev;
269 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
272 mlx5_ib_put_native_port_mdev(ibdev, port_num);
276 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
280 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
286 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
290 return ibdev->mdev;
296 port = &ibdev->port[ib_port_num - 1];
298 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
312 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
314 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
319 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
322 port = &ibdev->port[port_num - 1];
325 mpi = ibdev->port[port_num - 1].mp.mpi;
658 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
660 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
663 if (mlx5_ib_port_link_layer(ibdev, 1) ==
700 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
703 struct mlx5_ib_dev *dev = to_mdev(ibdev);
708 switch (mlx5_get_vport_access_method(ibdev)) {
710 return mlx5_query_mad_ifc_system_image_guid(ibdev,
732 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
735 struct mlx5_ib_dev *dev = to_mdev(ibdev);
738 switch (mlx5_get_vport_access_method(ibdev)) {
740 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
753 static int mlx5_query_vendor_id(struct ib_device *ibdev,
756 struct mlx5_ib_dev *dev = to_mdev(ibdev);
758 switch (mlx5_get_vport_access_method(ibdev)) {
760 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
817 static int mlx5_ib_query_device(struct ib_device *ibdev,
822 struct mlx5_ib_dev *dev = to_mdev(ibdev);
844 err = mlx5_query_system_image_guid(ibdev,
851 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
1038 if (mlx5_ib_port_link_layer(ibdev, 1) ==
1218 static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1221 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1276 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1303 static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1306 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1350 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1364 err = translate_max_vl_num(ibdev, vl_hw_cap,
1371 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1377 switch (mlx5_get_vport_access_method(ibdev)) {
1379 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1383 ret = mlx5_query_hca_port(ibdev, port, props);
1387 ret = mlx5_query_port_roce(ibdev, port, props);
1395 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1416 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1419 return mlx5_query_port_roce(ibdev, port, props);
1422 static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1432 static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1435 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1438 switch (mlx5_get_vport_access_method(ibdev)) {
1440 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1451 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1454 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1478 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1481 switch (mlx5_get_vport_access_method(ibdev)) {
1483 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1487 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1493 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1496 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1517 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1556 static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1559 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1565 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1582 err = ib_query_port(ibdev, port, &attr);
1774 struct ib_device *ibdev = uctx->device;
1775 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1807 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1852 struct ib_device *ibdev = uctx->device;
1853 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2355 struct ib_device *ibdev = ibpd->device;
2367 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2376 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2524 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2536 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2537 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2538 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2580 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2602 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2609 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2611 schedule_work(&ibdev->delay_drop.delay_drop_work);
2618 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2632 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2646 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2667 struct mlx5_ib_dev *ibdev;
2672 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
2673 if (!ibdev)
2676 ibdev = work->dev;
2682 mlx5_ib_handle_internal_error(ibdev);
2687 if (handle_port_change(ibdev, work->param, &ibev))
2691 handle_general_event(ibdev, work->param, &ibev);
2697 ibev.device = &ibdev->ib_dev;
2699 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2700 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
2704 if (ibdev->ib_active)
2708 ibdev->ib_active = false;
2803 struct ib_device *ibdev;
2808 ibdev = &dev->ib_dev;
2813 devr->p0 = ib_alloc_pd(ibdev, 0);
2817 devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2895 static u32 get_core_cap_flags(struct ib_device *ibdev,
2898 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2899 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2929 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2933 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2934 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2938 err = ib_query_port(ibdev, port_num, &attr);
2951 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
2957 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2965 err = ib_query_port(ibdev, port_num, &attr);
2976 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
2979 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3168 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3172 struct mlx5_ib_port *port = &ibdev->port[port_num];
3179 mlx5_core_mp_event_replay(ibdev->mdev,
3186 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3189 if (!mpi->ibdev) {
3194 mpi->ibdev = NULL;
3200 mlx5_mdev_netdev_untrack(ibdev, port_num);
3222 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3227 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3230 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3233 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3242 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3243 if (ibdev->port[port_num].mp.mpi) {
3244 mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3246 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3250 ibdev->port[port_num].mp.mpi = mpi;
3251 mpi->ibdev = ibdev;
3253 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3255 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3259 mlx5_mdev_netdev_track(ibdev, port_num);
3264 mlx5_ib_init_cong_debugfs(ibdev, port_num);
3270 mlx5_core_mp_event_replay(ibdev->mdev,
3277 mlx5_ib_unbind_slave_port(ibdev, mpi);
3319 mpi->ibdev = dev;
4353 mpi->ibdev->ib_active = true;
4375 if (mpi->ibdev)
4376 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);