Lines Matching refs:wqe
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
18 struct rxe_send_wqe *wqe,
24 int to_send = (wqe->dma.resid > qp->mtu) ?
25 qp->mtu : wqe->dma.resid;
27 qp->req.opcode = next_opcode(qp, wqe,
28 wqe->wr.opcode);
30 if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 wqe->dma.resid -= to_send;
32 wqe->dma.sge_offset += to_send;
34 advance_dma_data(&wqe->dma, to_send);
37 wqe->iova += qp->mtu;
43 struct rxe_send_wqe *wqe;
56 wqe = addr_from_index(qp->sq.queue, wqe_index);
57 mask = wr_opcode_mask(wqe->wr.opcode, qp);
59 if (wqe->state == wqe_state_posted)
62 if (wqe->state == wqe_state_done)
65 wqe->iova = (mask & WR_ATOMIC_MASK) ?
66 wqe->wr.wr.atomic.remote_addr :
68 wqe->wr.wr.rdma.remote_addr :
72 wqe->dma.resid = wqe->dma.length;
73 wqe->dma.cur_sge = 0;
74 wqe->dma.sge_offset = 0;
81 npsn = (qp->comp.psn - wqe->first_psn) &
83 retry_first_write_send(qp, wqe, mask, npsn);
87 npsn = (wqe->dma.length - wqe->dma.resid) /
89 wqe->iova += npsn * qp->mtu;
93 wqe->state = wqe_state_posted;
107 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
123 if (wqe && ((qp->req.wqe_index !=
125 (wqe->state != wqe_state_posted))) {
150 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
154 (wqe->state != wqe_state_processing)))
157 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
163 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
164 return wqe;
290 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
293 int fits = (wqe->dma.resid <= qp->mtu);
321 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
325 if (wqe->has_rd_atomic)
333 wqe->has_rd_atomic = 1;
352 struct rxe_send_wqe *wqe,
358 struct rxe_send_wr *ibwr = &wqe->wr;
379 pkt->wqe = wqe;
410 reth_set_va(pkt, wqe->iova);
411 reth_set_len(pkt, wqe->dma.resid);
421 atmeth_set_va(pkt, wqe->iova);
443 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
457 if (wqe->wr.send_flags & IB_SEND_INLINE) {
458 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
463 wqe->dma.resid -= paylen;
464 wqe->dma.sge_offset += paylen;
466 err = copy_data(qp->pd, 0, &wqe->dma,
488 struct rxe_send_wqe *wqe,
493 wqe->state = wqe_state_pending;
495 wqe->state = wqe_state_processing;
500 struct rxe_send_wqe *wqe,
505 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
512 wqe->first_psn = qp->req.psn;
513 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
517 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
522 static void save_state(struct rxe_send_wqe *wqe,
527 rollback_wqe->state = wqe->state;
528 rollback_wqe->first_psn = wqe->first_psn;
529 rollback_wqe->last_psn = wqe->last_psn;
533 static void rollback_state(struct rxe_send_wqe *wqe,
538 wqe->state = rollback_wqe->state;
539 wqe->first_psn = rollback_wqe->first_psn;
540 wqe->last_psn = rollback_wqe->last_psn;
544 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
564 struct rxe_send_wqe *wqe;
593 wqe = req_next_wqe(qp);
594 if (unlikely(!wqe))
597 if (wqe->mask & WR_REG_MASK) {
598 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
603 wqe->wr.ex.invalidate_rkey >> 8);
606 wqe->wr.ex.invalidate_rkey);
607 wqe->state = wqe_state_error;
608 wqe->status = IB_WC_MW_BIND_ERR;
613 wqe->state = wqe_state_done;
614 wqe->status = IB_WC_SUCCESS;
615 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
616 struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
619 rmr->access = wqe->wr.wr.reg.access;
620 rmr->ibmr.lkey = wqe->wr.wr.reg.key;
621 rmr->ibmr.rkey = wqe->wr.wr.reg.key;
622 rmr->iova = wqe->wr.wr.reg.mr->iova;
623 wqe->state = wqe_state_done;
624 wqe->status = IB_WC_SUCCESS;
628 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
650 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
652 wqe->status = IB_WC_LOC_QP_OP_ERR;
658 if (check_init_depth(qp, wqe))
663 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
673 wqe->first_psn = qp->req.psn;
674 wqe->last_psn = qp->req.psn;
679 wqe->state = wqe_state_done;
680 wqe->status = IB_WC_SUCCESS;
688 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
694 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
701 * To prevent a race on wqe access between requester and completer,
702 * wqe members state and psn need to be set before calling
706 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
707 update_wqe_state(qp, wqe, &pkt);
708 update_wqe_psn(qp, wqe, &pkt, payload);
713 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
723 update_state(qp, wqe, &pkt, payload);
728 wqe->status = IB_WC_LOC_PROT_ERR;
729 wqe->state = wqe_state_error;