Lines Matching defs:dev
29 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
33 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
37 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
178 struct mlx5_ib_dev *dev;
607 struct mlx5_ib_dev *dev;
701 struct mlx5_ib_dev *dev;
756 struct mlx5_ib_dev *dev;
770 struct mlx5_ib_dev *dev;
809 struct mlx5_ib_dev *dev;
845 int (*init)(struct mlx5_ib_dev *dev);
846 void (*cleanup)(struct mlx5_ib_dev *dev);
876 struct mlx5_ib_dev *dev;
887 struct mlx5_core_dev *dev;
944 struct mlx5_ib_dev *dev;
1148 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1149 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1214 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
1223 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1224 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1237 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1240 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1244 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1245 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1247 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1249 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1274 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1288 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1317 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1320 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1335 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1338 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1339 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1358 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1361 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1364 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1447 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1449 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1453 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1456 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1462 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1466 static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
1477 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1484 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
1495 static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
1502 MLX5_CAP_GEN(dev->mdev, atomic) &&
1503 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1507 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
1508 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
1512 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
1513 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
1519 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1521 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1523 return dev->lag_active ||
1524 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1525 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));