Lines Matching refs:dev
62 struct mlx5_ib_dev *dev;
84 struct mlx5_ib_dev *dev;
87 dev = mpi->ibdev;
89 return dev;
108 struct mlx5_ib_dev *dev = to_mdev(device);
109 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
128 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
137 for (i = 0; i < dev->num_ports; i++) {
138 port = &dev->port[i];
173 ibdev = roce->dev;
184 if (ndev->dev.parent == mdev->device)
475 struct mlx5_ib_dev *dev = to_mdev(device);
486 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
492 mdev = dev->mdev;
501 if (dev->is_rep)
518 if (!dev->is_rep && dev->mdev->roce.roce_en) {
523 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
529 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
542 if (dev->lag_active) {
565 mlx5_ib_put_native_port_mdev(dev, port_num);
569 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
600 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
603 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
636 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
642 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
645 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
647 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
648 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
670 static void get_atomic_caps(struct mlx5_ib_dev *dev,
675 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
677 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
692 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
695 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
697 get_atomic_caps(dev, atomic_size_qp, props);
703 struct mlx5_ib_dev *dev = to_mdev(ibdev);
704 struct mlx5_core_dev *mdev = dev->mdev;
735 struct mlx5_ib_dev *dev = to_mdev(ibdev);
736 struct mlx5_core_dev *mdev = dev->mdev;
756 struct mlx5_ib_dev *dev = to_mdev(ibdev);
764 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
771 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
777 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
779 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
782 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
786 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
803 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
807 if (mlx5_use_mad_ifc(dev))
808 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
812 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
822 struct mlx5_ib_dev *dev = to_mdev(ibdev);
823 struct mlx5_core_dev *mdev = dev->mdev;
849 props->max_pkeys = dev->pkey_table_len;
855 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
856 (fw_rev_min(dev->mdev) << 16) |
857 fw_rev_sub(dev->mdev);
879 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
893 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
900 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
941 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
942 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
950 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
951 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
963 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
1002 get_atomic_caps_qp(dev, props);
1013 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1015 props->odp_caps = dev->odp_caps;
1041 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1043 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1046 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1063 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1073 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1075 MLX5_CAP_GEN(dev->mdev,
1082 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1164 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1221 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1234 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1306 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1307 struct mlx5_core_dev *mdev = dev->mdev;
1395 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1399 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1404 mdev = dev->mdev;
1410 mlx5_ib_put_native_port_mdev(dev, port);
1435 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1436 struct mlx5_core_dev *mdev = dev->mdev;
1454 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1460 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1466 mdev = dev->mdev;
1473 mlx5_ib_put_native_port_mdev(dev, port);
1496 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1512 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1522 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1530 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1539 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1551 mlx5_ib_put_native_port_mdev(dev, port_num);
1559 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1574 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1577 return set_port_caps_atomic(dev, port, change_mask, value);
1580 mutex_lock(&dev->cap_mask_mutex);
1589 err = mlx5_set_port_caps(dev->mdev, port, tmp);
1592 mutex_unlock(&dev->cap_mask_mutex);
1596 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1598 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1611 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1628 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1640 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1641 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1649 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1657 err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1662 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1672 if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1674 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1679 static void deallocate_uars(struct mlx5_ib_dev *dev,
1689 mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1693 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1697 mutex_lock(&dev->lb.mutex);
1699 dev->lb.user_td++;
1701 dev->lb.qps++;
1703 if (dev->lb.user_td == 2 ||
1704 dev->lb.qps == 1) {
1705 if (!dev->lb.enabled) {
1706 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1707 dev->lb.enabled = true;
1711 mutex_unlock(&dev->lb.mutex);
1716 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1718 mutex_lock(&dev->lb.mutex);
1720 dev->lb.user_td--;
1722 dev->lb.qps--;
1724 if (dev->lb.user_td == 1 &&
1725 dev->lb.qps == 0) {
1726 if (dev->lb.enabled) {
1727 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1728 dev->lb.enabled = false;
1732 mutex_unlock(&dev->lb.mutex);
1735 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1740 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1743 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1747 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1748 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1749 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1752 return mlx5_ib_enable_lb(dev, true, false);
1755 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1758 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1761 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1763 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1764 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1765 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1768 mlx5_ib_disable_lb(dev, true, false);
1775 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1779 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1780 resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
1785 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1786 if (dev->wc_support)
1787 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1790 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1791 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1792 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1793 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1794 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1796 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1798 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1799 MLX5_CAP_GEN(dev->mdev,
1803 resp->num_ports = dev->num_ports;
1808 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1812 if (dev->mdev->clock_info)
1829 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1832 if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1833 rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1834 rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1840 if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1853 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1865 if (!dev->ib_active)
1891 err = mlx5_ib_devx_create(dev, true);
1907 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1928 err = allocate_uars(dev, context);
1933 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1942 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1957 print_lib_caps(dev, context->lib_caps);
1959 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1960 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
1964 1, &dev->port[port].roce.tx_port_affinity));
1970 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1973 deallocate_uars(dev, context);
1983 mlx5_ib_devx_destroy(dev, context->devx_uid);
2015 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2019 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2021 deallocate_uars(dev, context);
2026 mlx5_ib_devx_destroy(dev, context->devx_uid);
2029 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2034 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2036 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2039 static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2044 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2047 return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2092 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2107 if (!dev->mdev->clock_info)
2111 virt_to_page(dev->mdev->clock_info));
2117 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2118 struct mlx5_var_table *var_table = &dev->var_table;
2124 mlx5_ib_dm_mmap_free(dev, mentry);
2134 mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2143 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2170 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2192 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2195 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2205 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2213 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2216 mlx5_ib_warn(dev, "UAR alloc failed\n");
2223 pfn = uar_index2pfn(dev, uar_index);
2224 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2229 mlx5_ib_err(dev,
2243 mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2246 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2262 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2305 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2313 if (!dev->wc_support)
2318 return uar_mmap(dev, command, vma, context);
2335 pfn = (dev->mdev->iseg_base +
2343 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2346 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2394 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2403 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2407 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2409 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2417 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2423 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2425 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2431 static int init_node_data(struct mlx5_ib_dev *dev)
2435 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2439 dev->mdev->rev_id = dev->mdev->pdev->revision;
2441 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2447 struct mlx5_ib_dev *dev =
2450 return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2457 struct mlx5_ib_dev *dev =
2460 return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2467 struct mlx5_ib_dev *dev =
2470 return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2477 struct mlx5_ib_dev *dev =
2480 return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2487 struct mlx5_ib_dev *dev =
2491 dev->mdev->board_id);
2593 err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
2595 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2676 ibdev = work->dev;
2723 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
2752 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2758 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2761 for (port = 1; port <= dev->num_ports; port++) {
2762 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2763 dev->port_caps[port - 1].has_smi = true;
2766 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2769 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2773 dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
2779 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2783 rdma_for_each_port (&dev->ib_dev, port)
2784 mlx5_query_ext_port_caps(dev, port);
2799 static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2801 struct mlx5_ib_resources *devr = &dev->devr;
2808 ibdev = &dev->ib_dev;
2810 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2823 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2827 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2863 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2865 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2873 static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2875 struct mlx5_ib_resources *devr = &dev->devr;
2889 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2890 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2898 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2900 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2901 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2902 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2933 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2943 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2978 struct mlx5_ib_dev *dev =
2981 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
2982 fw_rev_sub(dev->mdev));
2985 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
2987 struct mlx5_core_dev *mdev = dev->mdev;
3006 dev->flow_db->lag_demux_ft = ft;
3007 dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3008 dev->lag_active = true;
3016 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3018 struct mlx5_core_dev *mdev = dev->mdev;
3020 if (dev->lag_active) {
3021 dev->lag_active = false;
3023 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3024 dev->flow_db->lag_demux_ft = NULL;
3072 static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
3074 struct mlx5_roce *roce = &dev->port[port_num].roce;
3077 mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3078 mlx5_core_uplink_netdev_event_replay(dev->mdev);
3081 static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
3083 struct mlx5_roce *roce = &dev->port[port_num].roce;
3085 mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3089 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3093 if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3094 err = mlx5_nic_vport_enable_roce(dev->mdev);
3099 err = mlx5_eth_lag_init(dev);
3106 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3107 mlx5_nic_vport_disable_roce(dev->mdev);
3112 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3114 mlx5_eth_lag_cleanup(dev);
3115 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3116 mlx5_nic_vport_disable_roce(dev->mdev);
3153 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3281 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3283 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3284 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3290 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3293 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3294 &dev->sys_image_guid);
3298 err = mlx5_nic_vport_enable_roce(dev->mdev);
3303 for (i = 0; i < dev->num_ports; i++) {
3311 mlx5_nic_vport_disable_roce(dev->mdev);
3316 mpi->mdev = dev->mdev;
3317 mpi->sys_image_guid = dev->sys_image_guid;
3318 dev->port[i].mp.mpi = mpi;
3319 mpi->ibdev = dev;
3326 if (dev->sys_image_guid == mpi->sys_image_guid &&
3328 bound = mlx5_ib_bind_slave_port(dev, mpi);
3334 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
3340 mlx5_ib_dbg(dev, "no free port found for port %d\n",
3344 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3349 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3351 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3352 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3356 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3360 for (i = 0; i < dev->num_ports; i++) {
3361 if (dev->port[i].mp.mpi) {
3364 kfree(dev->port[i].mp.mpi);
3365 dev->port[i].mp.mpi = NULL;
3367 mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3369 list_add_tail(&dev->port[i].mp.mpi->list,
3371 mlx5_ib_unbind_slave_port(dev,
3372 dev->port[i].mp.mpi);
3377 mlx5_ib_dbg(dev, "removing from devlist\n");
3378 list_del(&dev->ib_dev_list);
3381 mlx5_nic_vport_disable_roce(dev->mdev);
3519 struct mlx5_ib_dev *dev = to_mdev(device);
3521 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3530 struct mlx5_ib_dev *dev;
3538 dev = to_mdev(c->ibucontext.device);
3539 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
3544 entry->address = uar_index2paddress(dev, uar_index);
3557 mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
3671 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
3673 mlx5_ib_cleanup_multiport_master(dev);
3674 WARN_ON(!xa_empty(&dev->odp_mkeys));
3675 mutex_destroy(&dev->cap_mask_mutex);
3676 WARN_ON(!xa_empty(&dev->sig_mrs));
3677 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
3678 mlx5r_macsec_dealloc_gids(dev);
3681 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
3683 struct mlx5_core_dev *mdev = dev->mdev;
3686 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
3687 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
3688 dev->ib_dev.phys_port_cnt = dev->num_ports;
3689 dev->ib_dev.dev.parent = mdev->device;
3690 dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
3692 for (i = 0; i < dev->num_ports; i++) {
3693 spin_lock_init(&dev->port[i].mp.mpi_lock);
3694 rwlock_init(&dev->port[i].roce.netdev_lock);
3695 dev->port[i].roce.dev = dev;
3696 dev->port[i].roce.native_port_num = i + 1;
3697 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3700 err = mlx5r_cmd_query_special_mkeys(dev);
3704 err = mlx5r_macsec_init_gids_and_devlist(dev);
3708 err = mlx5_ib_init_multiport_master(dev);
3712 err = set_has_smi_cap(dev);
3716 err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
3720 if (mlx5_use_mad_ifc(dev))
3721 get_ext_port_caps(dev);
3723 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3725 mutex_init(&dev->cap_mask_mutex);
3726 INIT_LIST_HEAD(&dev->qp_list);
3727 spin_lock_init(&dev->reset_flow_resource_lock);
3728 xa_init(&dev->odp_mkeys);
3729 xa_init(&dev->sig_mrs);
3730 atomic_set(&dev->mkey_var, 0);
3732 spin_lock_init(&dev->dm.lock);
3733 dev->dm.dev = mdev;
3736 mlx5r_macsec_dealloc_gids(dev);
3738 mlx5_ib_cleanup_multiport_master(dev);
3742 static int mlx5_ib_enable_driver(struct ib_device *dev)
3744 struct mlx5_ib_dev *mdev = to_mdev(dev);
3850 static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
3852 struct mlx5_core_dev *mdev = dev->mdev;
3853 struct mlx5_var_table *var_table = &dev->var_table;
3862 var_table->hw_start_addr = dev->mdev->bar_addr +
3875 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
3877 bitmap_free(dev->var_table.bitmap);
3880 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
3882 struct mlx5_core_dev *mdev = dev->mdev;
3887 ib_set_device_ops(&dev->ib_dev,
3891 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
3893 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3896 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
3899 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
3902 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3904 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
3906 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
3909 dev->ib_dev.driver_def = mlx5_ib_defs;
3911 err = init_node_data(dev);
3915 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3916 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3917 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3918 mutex_init(&dev->lb.mutex);
3920 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3922 err = mlx5_ib_init_var_table(dev);
3927 dev->ib_dev.use_cq_dim = true;
3937 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
3939 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
3949 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
3951 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
3967 static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
3969 struct mlx5_core_dev *mdev = dev->mdev;
3979 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
3981 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3984 mlx5_mdev_netdev_track(dev, port_num);
3986 err = mlx5_enable_eth(dev);
3993 mlx5_mdev_netdev_untrack(dev, port_num);
3997 static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
3999 struct mlx5_core_dev *mdev = dev->mdev;
4008 mlx5_disable_eth(dev);
4010 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4011 mlx5_mdev_netdev_untrack(dev, port_num);
4015 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4017 mlx5_ib_init_cong_debugfs(dev,
4018 mlx5_core_native_port_num(dev->mdev) - 1);
4022 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
4024 mlx5_ib_cleanup_cong_debugfs(dev,
4025 mlx5_core_native_port_num(dev->mdev) - 1);
4028 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
4030 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4031 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
4034 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
4036 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4039 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4043 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4047 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4049 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4054 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4056 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4057 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4060 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4064 if (!mlx5_lag_is_active(dev->mdev))
4068 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4071 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4073 mlx5_mkey_cache_cleanup(dev);
4074 mlx5r_umr_resource_cleanup(dev);
4077 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4079 ib_unregister_device(&dev->ib_dev);
4082 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4086 ret = mlx5r_umr_resource_init(dev);
4090 ret = mlx5_mkey_cache_init(dev);
4092 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4096 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4100 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4103 mutex_init(&dev->delay_drop.lock);
4104 dev->delay_drop.dev = dev;
4105 dev->delay_drop.activate = false;
4106 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4107 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4108 atomic_set(&dev->delay_drop.rqs_cnt, 0);
4109 atomic_set(&dev->delay_drop.events_cnt, 0);
4114 root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4115 dev->delay_drop.dir_debugfs = root;
4118 &dev->delay_drop.events_cnt);
4120 &dev->delay_drop.rqs_cnt);
4121 debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
4126 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4128 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4131 cancel_work_sync(&dev->delay_drop.delay_drop_work);
4132 if (!dev->delay_drop.dir_debugfs)
4135 debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
4136 dev->delay_drop.dir_debugfs = NULL;
4139 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
4141 dev->mdev_events.notifier_call = mlx5_ib_event;
4142 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4144 mlx5r_macsec_event_register(dev);
4149 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4151 mlx5r_macsec_event_unregister(dev);
4152 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4155 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4159 dev->ib_active = false;
4165 profile->stage[stage].cleanup(dev);
4168 kfree(dev->port);
4169 ib_dealloc_device(&dev->ib_dev);
4172 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
4178 dev->profile = profile;
4182 err = profile->stage[i].init(dev);
4188 dev->ib_active = true;
4196 profile->stage[i].cleanup(dev);
4330 struct mlx5_ib_dev *dev;
4347 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
4348 if (dev->sys_image_guid == mpi->sys_image_guid)
4349 bound = mlx5_ib_bind_slave_port(dev, mpi);
4352 rdma_roce_rescan_device(&dev->ib_dev);
4391 struct mlx5_ib_dev *dev;
4398 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4399 if (!dev)
4401 dev->port = kcalloc(num_ports, sizeof(*dev->port),
4403 if (!dev->port) {
4404 ib_dealloc_device(&dev->ib_dev);
4408 dev->mdev = mdev;
4409 dev->num_ports = num_ports;
4416 ret = __mlx5_ib_add(dev, profile);
4418 kfree(dev->port);
4419 ib_dealloc_device(&dev->ib_dev);
4423 auxiliary_set_drvdata(adev, dev);
4429 struct mlx5_ib_dev *dev;
4431 dev = auxiliary_get_drvdata(adev);
4432 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);