Lines Matching refs:wqe
139 struct rxe_send_wqe *wqe;
144 wqe = queue_head(qp->sq.queue);
145 *wqe_p = wqe;
148 if (!wqe || wqe->state == wqe_state_posted)
152 if (wqe->state == wqe_state_done)
156 if (wqe->state == wqe_state_error)
172 struct rxe_send_wqe *wqe)
179 diff = psn_compare(pkt->psn, wqe->last_psn);
181 if (wqe->state == wqe_state_pending) {
182 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
198 if (pkt->psn == wqe->last_psn)
202 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
211 struct rxe_send_wqe *wqe)
233 if ((pkt->psn == wqe->first_psn &&
236 (wqe->first_psn == wqe->last_psn &&
262 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
263 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
264 wqe->status = IB_WC_FATAL_ERR;
276 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
277 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
311 wqe->status = IB_WC_REM_INV_REQ_ERR;
315 wqe->status = IB_WC_REM_ACCESS_ERR;
319 wqe->status = IB_WC_REM_OP_ERR;
324 wqe->status = IB_WC_REM_OP_ERR;
342 struct rxe_send_wqe *wqe)
347 &wqe->dma, payload_addr(pkt),
352 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
360 struct rxe_send_wqe *wqe)
367 &wqe->dma, &atomic_orig,
375 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
383 wc->wr_id = wqe->wr.wr_id;
384 wc->status = wqe->status;
385 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
386 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
387 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
389 wc->byte_len = wqe->dma.length;
394 uwc->wr_id = wqe->wr.wr_id;
395 uwc->status = wqe->status;
396 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
397 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
398 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
400 uwc->byte_len = wqe->dma.length;
413 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
419 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
420 wqe->status != IB_WC_SUCCESS) {
421 make_send_cqe(qp, wqe, &cqe);
428 if (wqe->wr.opcode == IB_WR_SEND ||
429 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
430 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
445 struct rxe_send_wqe *wqe)
449 if (wqe->has_rd_atomic) {
450 wqe->has_rd_atomic = 0;
481 do_complete(qp, wqe);
491 struct rxe_send_wqe *wqe)
493 if (pkt && wqe->state == wqe_state_pending) {
494 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
495 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
505 do_complete(qp, wqe);
513 struct rxe_send_wqe *wqe;
520 while ((wqe = queue_head(qp->sq.queue))) {
522 wqe->status = IB_WC_WR_FLUSH_ERR;
523 do_complete(qp, wqe);
534 struct rxe_send_wqe *wqe = NULL;
574 state = get_wqe(qp, pkt, &wqe);
578 state = check_psn(qp, pkt, wqe);
582 state = check_ack(qp, pkt, wqe);
586 state = do_read(qp, pkt, wqe);
590 state = do_atomic(qp, pkt, wqe);
594 if (wqe->state == wqe_state_pending &&
595 wqe->last_psn == pkt->psn)
602 state = complete_ack(qp, pkt, wqe);
606 state = complete_wqe(qp, pkt, wqe);
635 if (qp->comp.timeout_retry && wqe) {
666 if (!wqe || (wqe->state == wqe_state_posted))
713 wqe->status = IB_WC_RETRY_EXC_ERR;
736 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
742 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
743 do_complete(qp, wqe);