Lines Matching defs:ibdev

82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
129 struct mlx4_ib_dev *ibdev = to_mdev(device);
134 if (dev->dev.parent != ibdev->ib_dev.dev.parent ||
138 if (mlx4_is_bonded(ibdev->dev)) {
161 struct mlx4_ib_dev *ibdev,
166 struct mlx4_dev *dev = ibdev->dev;
194 struct mlx4_ib_dev *ibdev,
199 struct mlx4_dev *dev = ibdev->dev;
239 struct mlx4_ib_dev *ibdev,
242 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
243 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
257 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
258 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
333 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
349 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
350 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
393 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
399 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
402 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
413 if (mlx4_is_bonded(ibdev->dev))
416 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
435 static int mlx4_ib_query_device(struct ib_device *ibdev,
439 struct mlx4_ib_dev *dev = to_mdev(ibdev);
474 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
563 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
564 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
622 ((mlx4_ib_port_link_layer(ibdev, 1) ==
624 (mlx4_ib_port_link_layer(ibdev, 2) ==
655 static int ib_link_query_port(struct ib_device *ibdev, u32 port,
673 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
676 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
692 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
693 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
694 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
725 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
751 static int eth_link_query_port(struct ib_device *ibdev, u32 port,
755 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
812 int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
819 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
820 ib_link_query_port(ibdev, port, props, netw_view) :
821 eth_link_query_port(ibdev, port, props);
826 static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
830 return __mlx4_ib_query_port(ibdev, port, props, 0);
833 int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
839 struct mlx4_ib_dev *dev = to_mdev(ibdev);
889 static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
892 if (rdma_protocol_ib(ibdev, port))
893 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
897 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
907 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
921 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
924 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
958 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
975 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
978 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
991 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
994 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
997 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1009 if (mlx4_is_slave(to_mdev(ibdev)->dev))
1012 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1013 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1014 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1020 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1025 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1028 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1059 static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1062 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1077 err = ib_query_port(ibdev, port, &attr);
1089 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1096 struct ib_device *ibdev = uctx->device;
1097 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1106 if (ibdev->ops.uverbs_abi_ver ==
1119 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1129 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1135 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1201 struct ib_device *ibdev = ibpd->device;
1204 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1209 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1288 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1297 mlx4_counter_free(ibdev->dev, counter->index);
2097 mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
2099 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2110 mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
2112 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2122 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2126 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2147 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2155 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2179 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2191 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2217 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2219 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2222 bool per_port = !!(ibdev->dev->caps.flags2 &
2225 if (mlx4_is_slave(ibdev->dev))
2234 ib_set_device_ops(&ibdev->ib_dev,
2240 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
2246 mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
2250 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2263 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2268 kfree(ibdev->diag_counters[i].offset);
2269 kfree(ibdev->diag_counters[i].descs);
2274 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2283 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2286 if (!mlx4_is_mfunc(ibdev->dev))
2289 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2290 qp = ibdev->qp1_proxy[port - 1];
2301 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2307 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2322 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2325 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2328 static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
2333 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
2337 if (dev->dev.parent != ibdev->ib_dev.dev.parent)
2348 if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1,
2362 ibev.device = &ibdev->ib_dev;
2374 mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
2381 struct mlx4_ib_dev *ibdev;
2386 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2387 mlx4_ib_scan_netdev(ibdev, dev, event);
2392 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2398 if (mlx4_is_master(ibdev->dev)) {
2399 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2401 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2403 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2405 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2407 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2408 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2409 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2410 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2415 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2417 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2419 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2425 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2429 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2430 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2431 if (!ibdev->eq_table)
2439 ibdev->eq_table[eq] = total_eqs;
2441 &ibdev->eq_table[eq]))
2444 ibdev->eq_table[eq] = -1;
2449 ibdev->eq_table[i++] = -1)
2453 ibdev->ib_dev.num_comp_vectors = eq;
2456 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2459 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2462 if (!ibdev->eq_table)
2466 ibdev->ib_dev.num_comp_vectors = 0;
2469 mlx4_release_eq(dev, ibdev->eq_table[i]);
2471 kfree(ibdev->eq_table);
2472 ibdev->eq_table = NULL;
2475 static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
2479 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2482 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2497 err = ib_query_port(ibdev, port_num, &attr);
2616 struct mlx4_ib_dev *ibdev;
2637 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2638 if (!ibdev) {
2644 iboe = &ibdev->iboe;
2646 err = mlx4_pd_alloc(dev, &ibdev->priv_pdn);
2650 err = mlx4_uar_alloc(dev, &ibdev->priv_uar);
2654 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2656 if (!ibdev->uar_map) {
2660 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2662 ibdev->dev = dev;
2663 ibdev->bond_next_port = 0;
2665 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2666 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2667 ibdev->num_ports = num_ports;
2668 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2669 1 : ibdev->num_ports;
2670 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2671 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2673 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2676 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2678 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2680 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2684 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2687 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2691 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2692 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2696 ibdev->ib_dev.ops.uverbs_abi_ver =
2699 mlx4_ib_alloc_eqs(dev, ibdev);
2703 err = init_node_data(ibdev);
2706 mlx4_init_sl2vl_tbl(ibdev);
2708 for (i = 0; i < ibdev->num_ports; ++i) {
2709 mutex_init(&ibdev->counters_table[i].mutex);
2710 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2714 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2716 mutex_init(&ibdev->qp1_proxy_lock[i]);
2718 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2720 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2738 mlx4_counter_free(ibdev->dev, counter_index);
2744 &ibdev->counters_table[i].counters_list);
2745 ibdev->counters_table[i].default_counter = counter_index;
2750 for (i = 1; i < ibdev->num_ports ; ++i) {
2761 &ibdev->counters_table[i].counters_list);
2762 ibdev->counters_table[i].default_counter =
2769 spin_lock_init(&ibdev->sm_lock);
2770 mutex_init(&ibdev->cap_mask_mutex);
2771 INIT_LIST_HEAD(&ibdev->qp_list);
2772 spin_lock_init(&ibdev->reset_flow_resource_lock);
2774 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2776 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2777 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2779 &ibdev->steer_qpn_base, 0,
2784 ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
2786 if (!ibdev->ib_uc_qpns_bitmap) {
2792 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2793 ibdev->steer_qpn_count);
2795 dev, ibdev->steer_qpn_base,
2796 ibdev->steer_qpn_base +
2797 ibdev->steer_qpn_count - 1);
2801 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2802 ibdev->steer_qpn_count);
2806 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2807 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2809 err = mlx4_ib_alloc_diag_counters(ibdev);
2813 err = ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2818 err = mlx4_ib_mad_init(ibdev);
2822 err = mlx4_ib_init_sriov(ibdev);
2840 ibdev->ib_active = true;
2843 &ibdev->ib_dev);
2845 if (mlx4_is_mfunc(ibdev->dev))
2846 init_pkeys(ibdev);
2849 if (mlx4_is_master(ibdev->dev)) {
2851 if (j == mlx4_master_func_num(ibdev->dev))
2853 if (mlx4_is_slave_active(ibdev->dev, j))
2854 do_slave_init(ibdev, j, 1);
2859 ibdev->mlx_nb.notifier_call = mlx4_ib_event;
2860 err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb);
2863 auxiliary_set_drvdata(adev, ibdev);
2867 if (ibdev->iboe.nb.notifier_call) {
2868 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2870 ibdev->iboe.nb.notifier_call = NULL;
2874 mlx4_ib_close_sriov(ibdev);
2877 mlx4_ib_mad_cleanup(ibdev);
2880 ib_unregister_device(&ibdev->ib_dev);
2883 mlx4_ib_diag_cleanup(ibdev);
2886 bitmap_free(ibdev->ib_uc_qpns_bitmap);
2889 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2890 ibdev->steer_qpn_count);
2892 for (i = 0; i < ibdev->num_ports; ++i)
2893 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2896 mlx4_ib_free_eqs(dev, ibdev);
2897 iounmap(ibdev->uar_map);
2900 mlx4_uar_free(dev, &ibdev->priv_uar);
2903 mlx4_pd_free(dev, ibdev->priv_pdn);
2906 ib_dealloc_device(&ibdev->ib_dev);
2979 struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev);
2983 mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb);
2987 ibdev->ib_active = false;
2990 if (ibdev->iboe.nb.notifier_call) {
2991 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2993 ibdev->iboe.nb.notifier_call = NULL;
2996 mlx4_ib_close_sriov(ibdev);
2997 mlx4_ib_mad_cleanup(ibdev);
2998 ib_unregister_device(&ibdev->ib_dev);
2999 mlx4_ib_diag_cleanup(ibdev);
3001 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3002 ibdev->steer_qpn_count);
3003 bitmap_free(ibdev->ib_uc_qpns_bitmap);
3005 iounmap(ibdev->uar_map);
3006 for (p = 0; p < ibdev->num_ports; ++p)
3007 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3012 mlx4_ib_free_eqs(dev, ibdev);
3014 mlx4_uar_free(dev, &ibdev->priv_uar);
3015 mlx4_pd_free(dev, ibdev->priv_pdn);
3016 ib_dealloc_device(&ibdev->ib_dev);
3019 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3022 struct mlx4_dev *dev = ibdev->dev;
3051 dm[i]->dev = ibdev;
3054 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3055 if (!ibdev->sriov.is_going_down) {
3057 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3058 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3060 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3069 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3082 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3083 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3085 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3126 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3134 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3140 spin_lock_bh(&ibdev->iboe.lock);
3142 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3156 spin_unlock_bh(&ibdev->iboe.lock);
3158 ibev.device = &ibdev->ib_dev;
3191 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3200 ew->ib_dev = ibdev;
3208 struct mlx4_ib_dev *ibdev =
3210 struct mlx4_dev *dev = ibdev->dev;
3223 ew->ib_dev = ibdev;
3241 if (p > ibdev->num_ports)
3244 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3247 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3248 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3249 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3250 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3256 if (p > ibdev->num_ports)
3262 ibdev->ib_active = false;
3264 mlx4_ib_handle_catas_error(ibdev);
3274 ew->ib_dev = ibdev;
3284 do_slave_init(ibdev, p, 1);
3288 for (i = 1; i <= ibdev->num_ports; i++) {
3289 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3291 mlx4_ib_slave_alias_guid_event(ibdev,
3302 for (i = 1; i <= ibdev->num_ports; i++) {
3303 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3305 mlx4_ib_slave_alias_guid_event(ibdev,
3311 do_slave_init(ibdev, p, 0);
3318 ibev.device = &ibdev->ib_dev;
3319 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;