Lines Matching refs:attr_mask

3036 				const struct ib_qp_attr *attr, int attr_mask,
3043 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3048 if (attr_mask & IB_QP_ACCESS_FLAGS)
3200 int attr_mask, u32 path_flags,
3209 if (attr_mask & IB_QP_PKEY_INDEX)
3234 (attr_mask & IB_QP_DEST_QPN))
3267 if (attr_mask & IB_QP_TIMEOUT)
3683 int attr_mask, u8 init,
3701 else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
3772 const struct ib_qp_attr *attr, int attr_mask,
3841 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
3857 tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
3874 } else if (attr_mask & IB_QP_PATH_MTU) {
3886 if (attr_mask & IB_QP_DEST_QPN)
3892 if (attr_mask & IB_QP_PKEY_INDEX)
3900 if (attr_mask & IB_QP_PORT)
3903 if (attr_mask & IB_QP_AV) {
3905 attr_mask & IB_QP_PORT ? attr->port_num :
3907 attr_mask, 0, attr, false);
3912 if (attr_mask & IB_QP_TIMEOUT)
3915 if (attr_mask & IB_QP_ALT_PATH) {
3918 attr_mask | IB_QP_PKEY_INDEX |
3936 if (attr_mask & IB_QP_RNR_RETRY)
3939 if (attr_mask & IB_QP_RETRY_CNT)
3942 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
3945 if (attr_mask & IB_QP_SQ_PSN)
3948 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
3952 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
3953 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
3958 if (attr_mask & IB_QP_MIN_RNR_TIMER)
3961 if (attr_mask & IB_QP_RQ_PSN)
3964 if (attr_mask & IB_QP_QKEY)
3971 u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
4001 optpar |= ib_mask_to_mlx5_opt(attr_mask);
4014 if (attr_mask & IB_QP_PORT)
4017 if (attr_mask & IB_QP_RATE_LIMIT) {
4063 if (attr_mask & IB_QP_ACCESS_FLAGS)
4065 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4067 if (attr_mask & IB_QP_PORT)
4069 if (attr_mask & IB_QP_ALT_PATH)
4121 enum ib_qp_attr_mask attr_mask)
4127 return is_valid_mask(attr_mask, req, opt);
4130 return is_valid_mask(attr_mask, req, opt);
4133 return is_valid_mask(attr_mask, req, opt);
4137 return is_valid_mask(attr_mask, req, opt);
4142 return is_valid_mask(attr_mask, req, opt);
4145 return is_valid_mask(attr_mask, req, opt);
4147 return is_valid_mask(attr_mask, req, opt);
4160 int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4170 if (!(attr_mask & IB_QP_STATE))
4190 if (!is_valid_mask(attr_mask, required, 0))
4238 if (!is_valid_mask(attr_mask, required, 0))
4289 int attr_mask, enum ib_qp_type qp_type)
4306 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4313 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4323 int attr_mask, struct ib_udata *udata)
4361 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
4367 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4371 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4372 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
4375 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4379 if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
4380 mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
4381 attr_mask);
4387 attr_mask)) {
4388 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4389 cur_state, new_state, ibqp->qp_type, attr_mask);
4392 !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
4393 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4394 cur_state, new_state, qp_type, attr_mask);
4398 if ((attr_mask & IB_QP_PORT) &&
4406 if (attr_mask & IB_QP_PKEY_INDEX) {
4407 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4416 if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
4424 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,