Lines Matching defs:mdev

326 	if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) ||
327 !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome))
440 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
475 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
478 MLX5_CAP_GEN(dev->mdev,
605 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
607 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
617 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
621 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
644 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
646 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
658 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
661 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
1094 mlx5_db_free(dev->mdev, &qp->db);
1096 mlx5_frag_buf_free(dev->mdev, &qp->buf);
1118 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1131 err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
1132 &qp->buf, dev->mdev->priv.numa_node);
1164 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
1178 err = mlx5_db_alloc(dev->mdev, &qp->db);
1209 mlx5_db_free(dev->mdev, &qp->db);
1215 mlx5_frag_buf_free(dev->mdev, &qp->buf);
1241 mlx5_lag_is_lacp_owner(dev->mdev))
1246 return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
1252 mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1293 u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format);
1301 u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format);
1310 u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format);
1382 if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
1390 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1391 MLX5_CAP_ETH(dev->mdev, swp))
1516 mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1556 err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1630 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
1631 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
1699 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1876 err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1883 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1893 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
1894 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
1941 MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
1967 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
1968 u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
1976 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
1978 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
2000 struct mlx5_core_dev *mdev = dev->mdev;
2029 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
2039 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2057 if (MLX5_CAP_GEN(mdev, ece_support))
2079 struct mlx5_core_dev *mdev = dev->mdev;
2113 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2127 if (MLX5_CAP_GEN(mdev, ece_support))
2184 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2202 if (MLX5_CAP_GEN(mdev, ece_support))
2240 struct mlx5_core_dev *mdev = dev->mdev;
2280 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2298 if (MLX5_CAP_GEN(mdev, ece_support))
2379 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2405 if (MLX5_CAP_GEN(mdev, ece_support))
2441 struct mlx5_core_dev *mdev = dev->mdev;
2522 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2530 MLX5_CAP_GEN(mdev, go_back_n))
2738 if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
2752 if (MLX5_CAP_GEN(dev->mdev, ece_support))
2769 if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
2775 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2875 struct mlx5_core_dev *mdev = dev->mdev;
2904 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels),
2909 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2911 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2914 cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
2915 MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
2916 MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
2930 MLX5_CAP_GEN(mdev, qp_packet_based), qp);
2977 struct mlx5_core_dev *mdev = dev->mdev;
2988 mlx5_get_flow_namespace(dev->mdev,
2993 MLX5_CAP_GEN(mdev, sho), qp);
2996 MLX5_CAP_GEN(mdev, block_lb_mc), qp);
2998 MLX5_CAP_GEN(mdev, cd), qp);
3000 MLX5_CAP_GEN(mdev, cd), qp);
3002 MLX5_CAP_GEN(mdev, cd), qp);
3007 MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
3009 cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
3015 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
3016 MLX5_CAP_ETH(mdev, scatter_fcs);
3020 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
3021 MLX5_CAP_ETH(mdev, vlan_cap);
3028 MLX5_CAP_GEN(mdev, end_pad), qp);
3442 stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
3592 return modify_raw_packet_eth_prio(dev->mdev,
3801 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
3811 err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
3954 err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3966 err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
3990 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3999 (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
4034 mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
4049 struct mlx5_core_dev *mdev)
4069 return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
4088 return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
4099 return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
4195 MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
4215 MLX5_CAP_GEN(dev->mdev, log_max_msg));
4354 MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
4365 MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
4382 MLX5_CAP_GEN(dev->mdev, ece_support) ?
4509 if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
4546 if (mlx5_lag_is_active(dev->mdev))
4584 err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
4588 if (MLX5_CAP_GEN(dev->mdev, ece_support))
4627 log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
4629 log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
4632 log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
4634 log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
4825 err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
4848 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
5137 if (!MLX5_CAP_GEN(dev->mdev, xrc))
5140 return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
5148 return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
5232 if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
5264 has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
5266 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5275 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
5315 if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
5337 if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
5377 if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
5397 MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
5525 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
5528 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
5553 err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
5571 mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5580 return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5631 if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
5632 MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5654 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
5664 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
5692 struct mlx5_core_dev *mdev = dev->mdev;
5700 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5752 struct mlx5_core_dev *mdev = dev->mdev;
5755 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5781 struct mlx5_core_dev *mdev = dev->mdev;
5784 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5818 if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {