Lines Matching refs:wqe
150 struct rxe_send_wqe *wqe;
155 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
156 *wqe_p = wqe;
159 if (!wqe || wqe->state == wqe_state_posted)
163 if (wqe->state == wqe_state_done)
167 if (wqe->state == wqe_state_error)
183 struct rxe_send_wqe *wqe)
190 diff = psn_compare(pkt->psn, wqe->last_psn);
192 if (wqe->state == wqe_state_pending) {
193 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
209 if (pkt->psn == wqe->last_psn)
217 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
226 struct rxe_send_wqe *wqe)
252 if ((pkt->psn == wqe->first_psn &&
255 (wqe->first_psn == wqe->last_psn &&
277 if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE)
284 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
285 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV &&
286 wqe->wr.opcode != IB_WR_FLUSH) {
287 wqe->status = IB_WC_FATAL_ERR;
299 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
300 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
334 wqe->status = IB_WC_REM_INV_REQ_ERR;
338 wqe->status = IB_WC_REM_ACCESS_ERR;
342 wqe->status = IB_WC_REM_OP_ERR;
347 wqe->status = IB_WC_REM_OP_ERR;
365 struct rxe_send_wqe *wqe)
370 &wqe->dma, payload_addr(pkt),
373 wqe->status = IB_WC_LOC_PROT_ERR;
377 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
385 struct rxe_send_wqe *wqe)
392 &wqe->dma, &atomic_orig,
395 wqe->status = IB_WC_LOC_PROT_ERR;
402 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
411 wc->wr_id = wqe->wr.wr_id;
412 wc->status = wqe->status;
415 uwc->wr_id = wqe->wr.wr_id;
416 uwc->status = wqe->status;
420 if (wqe->status == IB_WC_SUCCESS) {
422 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
423 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
424 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
426 wc->byte_len = wqe->dma.length;
428 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
429 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
430 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
432 uwc->byte_len = wqe->dma.length;
435 if (wqe->status != IB_WC_WR_FLUSH_ERR)
437 wqe->status);
449 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
457 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
458 wqe->status != IB_WC_SUCCESS);
461 make_send_cqe(qp, wqe, &cqe);
468 if (wqe->wr.opcode == IB_WR_SEND ||
469 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
470 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
510 struct rxe_send_wqe *wqe)
512 if (wqe->has_rd_atomic) {
513 wqe->has_rd_atomic = 0;
524 do_complete(qp, wqe);
534 struct rxe_send_wqe *wqe)
536 if (pkt && wqe->state == wqe_state_pending) {
537 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
538 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
548 do_complete(qp, wqe);
565 /* complete send wqe with flush error */
566 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
574 uwc->wr_id = wqe->wr.wr_id;
578 wc->wr_id = wqe->wr.wr_id;
591 * if unable to complete a wqe, i.e. cq is full, stop
596 struct rxe_send_wqe *wqe;
604 while ((wqe = queue_head(q, q->type))) {
606 err = flush_send_wqe(qp, wqe);
650 struct rxe_send_wqe *wqe = NULL;
694 state = get_wqe(qp, pkt, &wqe);
698 state = check_psn(qp, pkt, wqe);
702 state = check_ack(qp, pkt, wqe);
706 state = do_read(qp, pkt, wqe);
710 state = do_atomic(qp, pkt, wqe);
714 if (wqe->state == wqe_state_pending &&
715 wqe->last_psn == pkt->psn)
722 state = complete_ack(qp, pkt, wqe);
726 state = complete_wqe(qp, pkt, wqe);
750 if (qp->comp.timeout_retry && wqe) {
768 if (!wqe || (wqe->state == wqe_state_posted))
801 wqe->status = IB_WC_RETRY_EXC_ERR;
825 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
831 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
832 do_complete(qp, wqe);