Lines Matching refs:req
89 /* The MR is returned to the req's MR free list instead
96 * @req: request to reset
101 * NB: This is safe only as long as none of @req's MRs are
105 void frwr_reset(struct rpcrdma_req *req)
109 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
372 * @req: prepared RPC Call
383 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
385 struct ib_send_wr *post_wr, *send_wr = &req->rl_wr;
393 list_for_each_entry(mr, &req->rl_registered, mr_list) {
406 if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) {
415 trace_xprtrdma_post_send(req);
418 trace_xprtrdma_post_send_err(r_xprt, req, ret);
486 * frwr_unmap_sync - invalidate memory regions that were registered for @req
488 * @req: rpcrdma_req with a non-empty list of MRs to process
496 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
510 mr = rpcrdma_mr_pop(&req->rl_registered);
528 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
556 trace_xprtrdma_post_linv_err(req, rc);
593 * frwr_unmap_async - invalidate memory regions that were registered for @req
595 * @req: rpcrdma_req with a non-empty list of MRs to process
602 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
613 mr = rpcrdma_mr_pop(&req->rl_registered);
631 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
649 trace_xprtrdma_post_linv_err(req, rc);
656 rpcrdma_unpin_rqst(req->rl_reply);