Lines Matching refs:wr
9 #include "wr.h"
54 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
61 if (wr->send_flags & IB_SEND_IP_CSUM)
65 if (wr->opcode == IB_WR_LSO) {
66 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
103 const struct ib_send_wr *wr)
105 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
107 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
108 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
228 static __be32 send_ieth(const struct ib_send_wr *wr)
230 switch (wr->opcode) {
233 return wr->ex.imm_data;
236 return cpu_to_be32(wr->ex.invalidate_rkey);
260 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
272 for (i = 0; i < wr->num_sge; i++) {
273 size_t len = wr->sg_list[i].length;
274 void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
561 const struct ib_reg_wr *wr = reg_wr(send_wr);
562 struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
570 unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
593 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
599 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
636 const struct ib_reg_wr *wr,
640 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
645 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
652 if (!mlx5r_umr_can_reconfig(dev, 0, wr->access)) {
659 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
675 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
750 const struct ib_send_wr *wr, unsigned int *idx, int *size,
754 send_ieth(wr), wr->send_flags & IB_SEND_SIGNALED,
755 wr->send_flags & IB_SEND_SOLICITED);
787 static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
789 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
794 static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
799 (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
803 static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
808 (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
809 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
813 const struct ib_send_wr *wr,
825 send_ieth(wr), false, true);
836 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
845 const struct ib_send_wr *wr,
860 mr = to_mmr(reg_wr(wr)->mr);
868 reg_pi_wr.access = reg_wr(wr)->access;
878 wr->wr_id, nreq, fence, MLX5_OPCODE_UMR);
880 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
904 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
909 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
913 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
919 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
932 const struct ib_send_wr *wr,
939 switch (wr->opcode) {
943 handle_rdma_op(wr, seg, size);
954 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
959 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
966 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
982 static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
984 switch (wr->opcode) {
987 handle_rdma_op(wr, seg, size);
995 const struct ib_send_wr *wr, void **seg,
998 set_datagram_seg(*seg, wr);
1004 static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
1007 set_datagram_seg(*seg, wr);
1020 set_eth_seg(wr, qp, seg, size, cur_edge);
1051 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1073 *bad_wr = wr;
1078 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
1082 for (nreq = 0; wr; nreq++, wr = wr->next) {
1083 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
1086 *bad_wr = wr;
1090 num_sge = wr->num_sge;
1094 *bad_wr = wr;
1098 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
1103 *bad_wr = wr;
1107 if (wr->opcode == IB_WR_REG_MR ||
1108 wr->opcode == IB_WR_REG_MR_INTEGRITY) {
1112 if (wr->send_flags & IB_SEND_FENCE) {
1129 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
1133 *bad_wr = wr;
1135 } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
1141 handle_qpt_uc(wr, &seg, &size);
1147 *bad_wr = wr;
1152 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
1155 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
1162 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
1163 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
1166 *bad_wr = wr;
1173 if (unlikely(!wr->sg_list[i].length))
1178 wr->sg_list + i);
1185 mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id,
1186 nreq, fence, mlx5_ib_opcode[wr->opcode]);
1206 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1222 *bad_wr = wr;
1227 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
1233 for (nreq = 0; wr; nreq++, wr = wr->next) {
1236 *bad_wr = wr;
1240 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1242 *bad_wr = wr;
1250 for (i = 0; i < wr->num_sge; i++)
1251 set_data_ptr_seg(scat + i, wr->sg_list + i);
1264 qp->rq.wrid[ind] = wr->wr_id;