Lines Matching refs:wqe

14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
18 struct rxe_send_wqe *wqe, int npsn)
23 int to_send = (wqe->dma.resid > qp->mtu) ?
24 qp->mtu : wqe->dma.resid;
26 qp->req.opcode = next_opcode(qp, wqe,
27 wqe->wr.opcode);
29 if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 wqe->dma.resid -= to_send;
31 wqe->dma.sge_offset += to_send;
33 advance_dma_data(&wqe->dma, to_send);
40 struct rxe_send_wqe *wqe;
58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 mask = wr_opcode_mask(wqe->wr.opcode, qp);
61 if (wqe->state == wqe_state_posted)
64 if (wqe->state == wqe_state_done)
67 wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 wqe->wr.wr.atomic.remote_addr :
70 wqe->wr.wr.rdma.remote_addr :
74 wqe->dma.resid = wqe->dma.length;
75 wqe->dma.cur_sge = 0;
76 wqe->dma.sge_offset = 0;
83 npsn = (qp->comp.psn - wqe->first_psn) &
85 retry_first_write_send(qp, wqe, npsn);
89 npsn = (wqe->dma.length - wqe->dma.resid) /
91 wqe->iova += npsn * qp->mtu;
95 wqe->state = wqe_state_posted;
121 struct rxe_send_wqe *wqe;
129 wqe = queue_addr_from_index(q, cons);
139 if (wqe && ((index != cons) ||
140 (wqe->state != wqe_state_posted)))
177 struct rxe_send_wqe *wqe;
182 wqe = __req_next_wqe(qp);
183 if (wqe == NULL)
188 (wqe->state != wqe_state_processing))) {
194 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
195 return wqe;
199 * rxe_wqe_is_fenced - check if next wqe is fenced
201 * @wqe: the next wqe
203 * Returns: 1 if wqe needs to wait
204 * 0 if wqe is ready to go
206 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
212 if (wqe->wr.opcode == IB_WR_LOCAL_INV)
220 return (wqe->wr.send_flags & IB_SEND_FENCE) &&
354 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
357 int fits = (wqe->dma.resid <= qp->mtu);
384 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
388 if (wqe->has_rd_atomic)
396 wqe->has_rd_atomic = 1;
416 struct rxe_send_wqe *wqe,
422 struct rxe_send_wr *ibwr = &wqe->wr;
462 reth_set_va(pkt, wqe->iova);
463 reth_set_len(pkt, wqe->dma.resid);
477 atmeth_set_va(pkt, wqe->iova);
499 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
509 if (wqe->wr.send_flags & IB_SEND_INLINE) {
510 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
514 wqe->dma.resid -= payload;
515 wqe->dma.sge_offset += payload;
517 err = copy_data(qp->pd, 0, &wqe->dma,
530 wqe->dma.resid = 0;
534 memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
535 wqe->dma.resid -= payload;
542 struct rxe_send_wqe *wqe,
547 wqe->state = wqe_state_pending;
549 wqe->state = wqe_state_processing;
554 struct rxe_send_wqe *wqe,
559 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
566 wqe->first_psn = qp->req.psn;
567 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
571 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
576 static void save_state(struct rxe_send_wqe *wqe,
581 rollback_wqe->state = wqe->state;
582 rollback_wqe->first_psn = wqe->first_psn;
583 rollback_wqe->last_psn = wqe->last_psn;
584 rollback_wqe->dma = wqe->dma;
588 static void rollback_state(struct rxe_send_wqe *wqe,
593 wqe->state = rollback_wqe->state;
594 wqe->first_psn = rollback_wqe->first_psn;
595 wqe->last_psn = rollback_wqe->last_psn;
596 wqe->dma = rollback_wqe->dma;
615 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
617 u8 opcode = wqe->wr.opcode;
623 rkey = wqe->wr.ex.invalidate_rkey;
630 wqe->status = IB_WC_LOC_QP_OP_ERR;
635 ret = rxe_reg_fast_mr(qp, wqe);
637 wqe->status = IB_WC_LOC_QP_OP_ERR;
642 ret = rxe_bind_mw(qp, wqe);
644 wqe->status = IB_WC_MW_BIND_ERR;
649 rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
650 wqe->status = IB_WC_LOC_QP_OP_ERR;
654 wqe->state = wqe_state_done;
655 wqe->status = IB_WC_SUCCESS;
672 struct rxe_send_wqe *wqe;
693 wqe = __req_next_wqe(qp);
695 if (wqe)
725 wqe = req_next_wqe(qp);
726 if (unlikely(!wqe))
729 if (rxe_wqe_is_fenced(qp, wqe)) {
734 if (wqe->mask & WR_LOCAL_OP_MASK) {
735 err = rxe_do_local_ops(qp, wqe);
756 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
758 wqe->status = IB_WC_LOC_QP_OP_ERR;
765 if (check_init_depth(qp, wqe))
771 wqe->dma.resid : 0;
781 wqe->first_psn = qp->req.psn;
782 wqe->last_psn = qp->req.psn;
787 wqe->state = wqe_state_done;
788 wqe->status = IB_WC_SUCCESS;
800 pkt.wqe = wqe;
802 /* save wqe state before we build and send packet */
803 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
808 wqe->status = IB_WC_LOC_QP_OP_ERR;
812 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
815 wqe->status = IB_WC_LOC_QP_OP_ERR;
821 err = finish_packet(qp, av, wqe, &pkt, skb, payload);
825 wqe->status = IB_WC_LOC_PROT_ERR;
827 wqe->status = IB_WC_LOC_QP_OP_ERR;
837 /* update wqe state as though we had sent it */
838 update_wqe_state(qp, wqe, &pkt);
839 update_wqe_psn(qp, wqe, &pkt, payload);
844 wqe->status = IB_WC_LOC_QP_OP_ERR;
848 /* the packet was dropped so reset wqe to the state
851 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
872 /* update wqe_index for each wqe completion */
874 wqe->state = wqe_state_error;