Lines Matching refs:wr

9 #include "wr.h"
104 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
111 if (wr->send_flags & IB_SEND_IP_CSUM)
115 if (wr->opcode == IB_WR_LSO) {
116 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
153 const struct ib_send_wr *wr)
155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
321 const struct ib_send_wr *wr)
323 const struct mlx5_umr_wr *umrwr = umr_wr(wr);
328 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
337 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
344 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
346 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
353 if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
355 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
358 if (!wr->num_sge)
403 const struct ib_send_wr *wr)
405 const struct mlx5_umr_wr *umrwr = umr_wr(wr);
408 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
427 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
449 static __be32 send_ieth(const struct ib_send_wr *wr)
451 switch (wr->opcode) {
454 return wr->ex.imm_data;
457 return cpu_to_be32(wr->ex.invalidate_rkey);
481 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
493 for (i = 0; i < wr->num_sge; i++) {
494 size_t len = wr->sg_list[i].length;
495 void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
782 const struct ib_reg_wr *wr = reg_wr(send_wr);
783 struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
791 unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
814 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
820 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
857 const struct ib_reg_wr *wr,
861 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
866 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
870 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) {
877 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
893 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
944 const struct ib_send_wr *wr, unsigned int *idx,
955 (*ctrl)->imm = send_ieth(wr);
969 const struct ib_send_wr *wr, unsigned int *idx, int *size,
972 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
973 wr->send_flags & IB_SEND_SIGNALED,
974 wr->send_flags & IB_SEND_SOLICITED);
1008 static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
1010 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
1015 static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
1020 (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
1024 static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
1029 (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
1030 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
1034 const struct ib_send_wr *wr,
1045 err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
1057 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
1066 const struct ib_send_wr *wr,
1081 mr = to_mmr(reg_wr(wr)->mr);
1089 reg_pi_wr.access = reg_wr(wr)->access;
1098 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
1101 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
1125 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
1130 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
1134 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
1140 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
1153 const struct ib_send_wr *wr,
1160 switch (wr->opcode) {
1164 handle_rdma_op(wr, seg, size);
1175 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
1180 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
1187 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
1203 static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
1205 switch (wr->opcode) {
1208 handle_rdma_op(wr, seg, size);
1216 const struct ib_send_wr *wr, void **seg,
1219 set_datagram_seg(*seg, wr);
1225 static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
1228 set_datagram_seg(*seg, wr);
1241 set_eth_seg(wr, qp, seg, size, cur_edge);
1247 const struct ib_send_wr *wr,
1253 if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
1255 mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
1260 (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
1261 err = set_reg_umr_segment(dev, *seg, wr);
1267 set_reg_mkey_segment(dev, *seg, wr);
1275 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1298 *bad_wr = wr;
1303 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
1310 for (nreq = 0; wr; nreq++, wr = wr->next) {
1311 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
1314 *bad_wr = wr;
1318 num_sge = wr->num_sge;
1322 *bad_wr = wr;
1326 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
1331 *bad_wr = wr;
1335 if (wr->opcode == IB_WR_REG_MR ||
1336 wr->opcode == IB_WR_REG_MR_INTEGRITY) {
1340 if (wr->send_flags & IB_SEND_FENCE) {
1357 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
1361 *bad_wr = wr;
1363 } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
1369 handle_qpt_uc(wr, &seg, &size);
1375 *bad_wr = wr;
1380 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
1383 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
1386 err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
1396 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
1397 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
1400 *bad_wr = wr;
1407 if (unlikely(!wr->sg_list[i].length))
1412 wr->sg_list + i);
1419 finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
1420 fence, mlx5_ib_opcode[wr->opcode]);
1459 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1475 *bad_wr = wr;
1480 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
1486 for (nreq = 0; wr; nreq++, wr = wr->next) {
1489 *bad_wr = wr;
1493 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1495 *bad_wr = wr;
1503 for (i = 0; i < wr->num_sge; i++)
1504 set_data_ptr_seg(scat + i, wr->sg_list + i);
1517 qp->rq.wrid[ind] = wr->wr_id;