Lines Matching defs:mdev
109 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
170 struct mlx5_core_dev *mdev;
174 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
175 if (!mdev)
184 if (ndev->dev.parent == mdev->device)
200 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
253 struct mlx5_core_dev *mdev;
255 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
256 if (!mdev)
259 ndev = mlx5_lag_get_roce_netdev(mdev);
282 struct mlx5_core_dev *mdev = NULL;
286 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
290 return ibdev->mdev;
300 mdev = mpi->mdev;
309 return mdev;
319 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
477 struct mlx5_core_dev *mdev;
486 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
487 if (!mdev) {
492 mdev = dev->mdev;
502 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
505 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
518 if (!dev->is_rep && dev->mdev->roce.roce_en) {
523 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
525 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
529 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
603 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
642 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
647 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
648 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
675 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
677 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
695 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
704 struct mlx5_core_dev *mdev = dev->mdev;
714 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
718 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
736 struct mlx5_core_dev *mdev = dev->mdev;
744 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
764 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
782 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
786 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
812 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
823 struct mlx5_core_dev *mdev = dev->mdev;
828 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
829 bool raw_support = !mlx5_core_mp_enabled(mdev);
855 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
856 (fw_rev_min(dev->mdev) << 16) |
857 fw_rev_sub(dev->mdev);
863 if (MLX5_CAP_GEN(mdev, pkv))
865 if (MLX5_CAP_GEN(mdev, qkv))
867 if (MLX5_CAP_GEN(mdev, apm))
869 if (MLX5_CAP_GEN(mdev, xrc))
871 if (MLX5_CAP_GEN(mdev, imaicl)) {
874 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
879 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
881 if (MLX5_CAP_GEN(mdev, sho)) {
890 if (MLX5_CAP_GEN(mdev, block_lb_mc))
893 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
894 if (MLX5_CAP_ETH(mdev, csum_cap)) {
900 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
905 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
936 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
941 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
942 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
946 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
947 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
950 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
951 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
958 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
960 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
963 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
966 if (MLX5_CAP_GEN(mdev, end_pad))
969 props->vendor_part_id = mdev->pdev->device;
970 props->hw_ver = mdev->pdev->revision;
974 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
975 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
976 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
978 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
985 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
986 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
987 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
988 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
989 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
990 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
991 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
992 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
993 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
997 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1001 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1004 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1005 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1009 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1035 if (mlx5_core_is_vf(mdev))
1041 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1043 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1046 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1049 if (MLX5_CAP_GEN(mdev, tag_matching)) {
1051 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1053 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1057 if (MLX5_CAP_GEN(mdev, tag_matching) &&
1058 MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1063 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1073 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1075 MLX5_CAP_GEN(dev->mdev,
1082 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1090 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1091 MLX5_CAP_GEN(mdev, qos)) {
1093 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1095 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1098 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1099 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1108 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1112 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1123 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1127 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1129 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1138 if (MLX5_CAP_ETH(mdev, swp)) {
1142 if (MLX5_CAP_ETH(mdev, swp_csum))
1146 if (MLX5_CAP_ETH(mdev, swp_lso))
1159 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1164 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1181 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1184 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1187 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1190 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1193 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1202 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1205 MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1307 struct mlx5_core_dev *mdev = dev->mdev;
1323 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1334 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1335 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1336 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1345 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1352 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1356 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1360 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1396 struct mlx5_core_dev *mdev;
1399 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1400 if (!mdev) {
1404 mdev = dev->mdev;
1408 count = mlx5_core_reserved_gids_count(mdev);
1436 struct mlx5_core_dev *mdev = dev->mdev;
1443 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1455 struct mlx5_core_dev *mdev;
1460 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1461 if (!mdev) {
1466 mdev = dev->mdev;
1470 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1512 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1526 struct mlx5_core_dev *mdev;
1530 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1531 if (!mdev)
1534 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1547 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1574 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1589 err = mlx5_set_port_caps(dev->mdev, port, tmp);
1641 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1657 err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1672 if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1689 mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1706 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1727 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1740 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1743 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1747 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1748 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1749 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1758 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1761 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1763 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1764 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1765 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1779 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1785 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1787 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1790 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1791 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1792 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1793 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1794 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1796 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1798 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1799 MLX5_CAP_GEN(dev->mdev,
1808 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1812 if (dev->mdev->clock_info)
1829 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1832 if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1833 rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1834 rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1840 if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1942 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1960 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
2034 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2036 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2044 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2047 return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2107 if (!dev->mdev->clock_info)
2111 virt_to_page(dev->mdev->clock_info));
2134 mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2213 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2243 mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2335 pfn = (dev->mdev->iseg_base +
2367 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2376 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2386 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2389 return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2407 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2423 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2439 dev->mdev->rev_id = dev->mdev->pdev->revision;
2450 return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2460 return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2470 return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2480 return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2491 dev->mdev->board_id);
2758 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2762 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2766 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2810 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2823 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2827 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2863 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2865 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2889 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2890 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2900 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2901 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2902 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2943 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2981 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
2982 fw_rev_sub(dev->mdev));
2987 struct mlx5_core_dev *mdev = dev->mdev;
2988 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
2993 if (!ns || !mlx5_lag_is_active(mdev))
2996 err = mlx5_cmd_create_vport_lag(mdev);
3007 dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3012 mlx5_cmd_destroy_vport_lag(mdev);
3018 struct mlx5_core_dev *mdev = dev->mdev;
3026 mlx5_cmd_destroy_vport_lag(mdev);
3077 mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3078 mlx5_core_uplink_netdev_event_replay(dev->mdev);
3085 mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3094 err = mlx5_nic_vport_enable_roce(dev->mdev);
3107 mlx5_nic_vport_disable_roce(dev->mdev);
3116 mlx5_nic_vport_disable_roce(dev->mdev);
3126 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3171 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3179 mlx5_core_mp_event_replay(ibdev->mdev,
3182 mlx5_core_mp_event_replay(mpi->mdev,
3198 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3220 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3236 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3255 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3262 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3266 key = mpi->mdev->priv.adev_idx;
3267 mlx5_core_mp_event_replay(mpi->mdev,
3270 mlx5_core_mp_event_replay(ibdev->mdev,
3283 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3290 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3293 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3298 err = mlx5_nic_vport_enable_roce(dev->mdev);
3311 mlx5_nic_vport_disable_roce(dev->mdev);
3316 mpi->mdev = dev->mdev;
3327 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
3332 dev_dbg(mpi->mdev->device,
3351 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3356 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3381 mlx5_nic_vport_disable_roce(dev->mdev);
3521 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3539 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
3557 mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
3683 struct mlx5_core_dev *mdev = dev->mdev;
3689 dev->ib_dev.dev.parent = mdev->device;
3723 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3733 dev->dm.dev = mdev;
3744 struct mlx5_ib_dev *mdev = to_mdev(dev);
3747 ret = mlx5_ib_test_wc(mdev);
3748 mlx5_ib_dbg(mdev, "Write-Combining %s",
3749 mdev->wc_support ? "supported" : "not supported");
3852 struct mlx5_core_dev *mdev = dev->mdev;
3858 log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3860 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3862 var_table->hw_start_addr = dev->mdev->bar_addr +
3863 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
3882 struct mlx5_core_dev *mdev = dev->mdev;
3885 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
3890 if (mlx5_core_is_pf(mdev))
3893 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3895 if (MLX5_CAP_GEN(mdev, imaicl))
3898 if (MLX5_CAP_GEN(mdev, xrc))
3901 if (MLX5_CAP_DEV_MEM(mdev, memic) ||
3902 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3915 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3916 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3917 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3920 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3969 struct mlx5_core_dev *mdev = dev->mdev;
3975 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3981 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3999 struct mlx5_core_dev *mdev = dev->mdev;
4004 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4010 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4018 mlx5_core_native_port_num(dev->mdev) - 1);
4025 mlx5_core_native_port_num(dev->mdev) - 1);
4030 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4031 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
4036 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4043 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4047 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4049 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4056 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4057 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4064 if (!mlx5_lag_is_active(dev->mdev))
4068 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4114 root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4142 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4152 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4328 struct mlx5_core_dev *mdev = idev->mdev;
4338 mpi->mdev = mdev;
4339 err = mlx5_query_nic_vport_system_image_guid(mdev,
4360 dev_dbg(mdev->device,
4387 struct mlx5_core_dev *mdev = idev->mdev;
4393 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4396 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4397 MLX5_CAP_GEN(mdev, num_vhca_ports));
4408 dev->mdev = mdev;
4411 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))