Lines Matching refs:attr

494 static int sq_overhead(struct ib_qp_init_attr *attr)
498 switch (attr->qp_type) {
523 if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
546 static int calc_send_wqe(struct ib_qp_init_attr *attr)
551 size = sq_overhead(attr);
555 if (attr->cap.max_inline_data) {
557 attr->cap.max_inline_data;
560 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
561 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
568 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
572 if (attr->qp_type == IB_QPT_RC)
577 else if (attr->qp_type == IB_QPT_XRC_INI)
584 max_sge = (wqe_size - sq_overhead(attr)) /
587 return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
591 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
597 if (!attr->cap.max_send_wr)
600 wqe_size = calc_send_wqe(attr);
611 qp->max_inline_data = wqe_size - sq_overhead(attr) -
613 attr->cap.max_inline_data = qp->max_inline_data;
615 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
619 attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
625 qp->sq.max_gs = get_send_sge(attr, wqe_size);
626 if (qp->sq.max_gs < attr->cap.max_send_sge)
629 attr->cap.max_send_sge = qp->sq.max_gs;
631 attr->cap.max_send_wr = qp->sq.max_post;
640 struct ib_qp_init_attr *attr)
665 if (attr->qp_type == IB_QPT_RAW_PACKET ||
677 static int qp_has_rq(struct ib_qp_init_attr *attr)
679 if (attr->qp_type == IB_QPT_XRC_INI ||
680 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
681 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
682 !attr->cap.max_recv_wr)
943 struct ib_qp_init_attr *attr, u32 **in,
997 err = set_user_buf_size(dev, qp, ucmd, base, attr);
1030 uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
1219 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1221 if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
1710 struct ib_qp_init_attr *attr;
1719 struct ib_qp_init_attr *init_attr = params->attr;
1995 struct ib_qp_init_attr *attr = params->attr;
2007 if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2035 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
2064 qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
2072 struct ib_qp_init_attr *init_attr = params->attr;
2233 struct ib_qp_init_attr *init_attr = params->attr;
2436 struct ib_qp_init_attr *attr = params->attr;
2458 if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2463 qp->has_rq = qp_has_rq(attr);
2464 err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
2470 err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
2474 if (is_sqp(attr->qp_type))
2475 qp->port = attr->port_num;
2482 if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
2496 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
2503 if (attr->srq) {
2506 to_msrq(attr->srq)->msrq.srqn);
2513 if (attr->send_cq)
2514 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
2516 if (attr->recv_cq)
2517 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
2541 get_cqs(qp->type, attr->send_cq, attr->recv_cq,
2733 struct ib_qp_init_attr *attr = params->attr;
2748 MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
2749 MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
2756 int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
2766 static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
2769 if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
2772 switch (attr->qp_type) {
2792 *type = attr->qp_type;
2796 mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
2801 struct ib_qp_init_attr *attr,
2809 if (attr->rwq_ind_tbl)
2812 switch (attr->qp_type) {
2822 if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
2828 if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
2831 attr->qp_type);
2839 WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
2873 void *ucmd, struct ib_qp_init_attr *attr)
2879 if (attr->rwq_ind_tbl)
2938 if (attr->rwq_ind_tbl && cond) {
2974 struct ib_qp_init_attr *attr)
2978 int create_flags = attr->create_flags;
2984 if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
3109 err = mlx5_ib_create_gsi(pd, qp, params->attr);
3138 params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
3140 params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
3148 struct ib_qp_init_attr *attr)
3154 ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
3157 ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
3162 ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
3240 int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
3250 err = check_qp_type(dev, attr, &type);
3254 err = check_valid_flow(dev, pd, attr, udata);
3260 params.attr = attr;
3261 params.is_rss_raw = !!attr->rwq_ind_tbl;
3284 err = process_vendor_flags(dev, qp, params.ucmd, attr);
3292 err = process_create_flags(dev, qp, attr);
3296 err = check_qp_attr(dev, qp, attr);
3352 const struct ib_qp_attr *attr, int attr_mask,
3360 dest_rd_atomic = attr->max_dest_rd_atomic;
3365 access_flags = attr->qp_access_flags;
3519 const struct ib_qp_attr *attr, bool alt)
3529 alt ? attr->alt_pkey_index : attr->pkey_index);
3557 attr->dest_qp_num);
3589 alt ? attr->alt_timeout : attr->timeout);
4013 const struct ib_qp_attr *attr,
4032 else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
4034 mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
4103 const struct ib_qp_attr *attr, int attr_mask,
4176 switch (attr->path_mig_state) {
4189 tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
4207 if (attr->path_mtu < IB_MTU_256 ||
4208 attr->path_mtu > IB_MTU_4096) {
4209 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
4213 MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
4219 MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
4225 MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
4233 MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
4236 err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
4237 attr_mask & IB_QP_PORT ? attr->port_num :
4239 attr_mask, 0, attr, false);
4245 MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
4248 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
4249 attr->alt_port_num,
4252 0, attr, true);
4269 MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
4272 MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
4274 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
4275 MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
4278 MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
4280 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
4282 ilog2(attr->max_dest_rd_atomic));
4285 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
4291 MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
4294 MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
4297 MLX5_SET(qpc, qpc, q_key, attr->qkey);
4303 u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
4347 raw_qp_param.port = attr->port_num;
4350 raw_qp_param.rl.rate = attr->rate_limit;
4353 if (attr->rate_limit &&
4364 if (attr->rate_limit &&
4396 qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
4398 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
4400 qp->port = attr->port_num;
4402 qp->trans_qp.alt_port = attr->alt_port_num;
4491 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4506 new_state = attr->qp_state;
4525 if (attr->port_num == 0 ||
4526 attr->port_num > dev->num_ports) {
4528 attr->port_num, dev->num_ports);
4531 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
4533 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
4535 if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
4545 MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
4550 MLX5_SET(dctc, dctc, port, attr->port_num);
4552 set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
4572 MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
4573 MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
4574 MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
4575 MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
4576 MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
4577 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
4578 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
4579 MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
4620 static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
4639 attr->max_rd_atomic > log_max_ra_res) {
4641 attr->max_rd_atomic);
4646 attr->max_dest_rd_atomic > log_max_ra_req) {
4648 attr->max_dest_rd_atomic);
4654 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4695 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
4700 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4704 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4705 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
4728 (attr->port_num == 0 ||
4729 attr->port_num > dev->num_ports)) {
4731 attr->port_num, dev->num_ports);
4736 attr->pkey_index >= dev->pkey_table_len) {
4737 mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
4741 if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
4749 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
5740 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5754 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5775 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5783 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);