Lines Matching refs:mr
49 struct rpcrdma_mr *mr)
51 struct rpc_rdma_cid *cid = &mr->mr_cid;
54 cid->ci_completion_id = mr->mr_ibmr->res.id;
57 static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
59 if (mr->mr_device) {
60 trace_xprtrdma_mr_unmap(mr);
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
62 mr->mr_dir);
63 mr->mr_device = NULL;
69 * @mr: MR allocated by frwr_mr_init
72 void frwr_mr_release(struct rpcrdma_mr *mr)
76 frwr_mr_unmap(mr->mr_xprt, mr);
78 rc = ib_dereg_mr(mr->mr_ibmr);
80 trace_xprtrdma_frwr_dereg(mr, rc);
81 kfree(mr->mr_sg);
82 kfree(mr);
85 static void frwr_mr_put(struct rpcrdma_mr *mr)
87 frwr_mr_unmap(mr->mr_xprt, mr);
92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
107 struct rpcrdma_mr *mr;
109 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
110 frwr_mr_put(mr);
116 * @mr: generic MR to prepare for FRWR
121 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
137 mr->mr_xprt = r_xprt;
138 mr->mr_ibmr = frmr;
139 mr->mr_device = NULL;
140 INIT_LIST_HEAD(&mr->mr_list);
141 init_completion(&mr->mr_linv_done);
142 frwr_cid_init(ep, mr);
145 mr->mr_sg = sg;
150 trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr));
276 * @mr: MR to fill in
282 * On success, @mr is filled in.
287 struct rpcrdma_mr *mr)
298 sg_set_page(&mr->mr_sg[i], seg->mr_page,
309 mr->mr_dir = rpcrdma_data_dir(writing);
310 mr->mr_nents = i;
312 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
313 mr->mr_dir);
316 mr->mr_device = ep->re_id->device;
318 ibmr = mr->mr_ibmr;
319 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
328 reg_wr = &mr->mr_regwr;
329 reg_wr->mr = ibmr;
335 mr->mr_handle = ibmr->rkey;
336 mr->mr_length = ibmr->length;
337 mr->mr_offset = ibmr->iova;
338 trace_xprtrdma_mr_map(mr);
343 trace_xprtrdma_frwr_sgerr(mr, i);
347 trace_xprtrdma_frwr_maperr(mr, n);
361 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
364 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid);
387 struct rpcrdma_mr *mr;
393 list_for_each_entry(mr, &req->rl_registered, mr_list) {
394 trace_xprtrdma_mr_fastreg(mr);
396 mr->mr_cqe.done = frwr_wc_fastreg;
397 mr->mr_regwr.wr.next = post_wr;
398 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
399 mr->mr_regwr.wr.num_sge = 0;
400 mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
401 mr->mr_regwr.wr.send_flags = 0;
402 post_wr = &mr->mr_regwr.wr;
423 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
430 struct rpcrdma_mr *mr;
432 list_for_each_entry(mr, mrs, mr_list)
433 if (mr->mr_handle == rep->rr_inv_rkey) {
434 list_del_init(&mr->mr_list);
435 trace_xprtrdma_mr_reminv(mr);
436 frwr_mr_put(mr);
441 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
444 frwr_mr_put(mr);
456 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
459 trace_xprtrdma_wc_li(wc, &mr->mr_cid);
460 frwr_mr_done(wc, mr);
475 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
478 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid);
479 frwr_mr_done(wc, mr);
480 complete(&mr->mr_linv_done);
501 struct rpcrdma_mr *mr;
510 mr = rpcrdma_mr_pop(&req->rl_registered);
512 trace_xprtrdma_mr_localinv(mr);
515 last = &mr->mr_invwr;
517 last->wr_cqe = &mr->mr_cqe;
522 last->ex.invalidate_rkey = mr->mr_handle;
528 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
530 mr = container_of(last, struct rpcrdma_mr, mr_invwr);
537 reinit_completion(&mr->mr_linv_done);
551 wait_for_completion(&mr->mr_linv_done);
572 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
576 trace_xprtrdma_wc_li_done(wc, &mr->mr_cid);
579 rep = mr->mr_req->rl_reply;
588 frwr_mr_put(mr);
606 struct rpcrdma_mr *mr;
613 mr = rpcrdma_mr_pop(&req->rl_registered);
615 trace_xprtrdma_mr_localinv(mr);
618 last = &mr->mr_invwr;
620 last->wr_cqe = &mr->mr_cqe;
625 last->ex.invalidate_rkey = mr->mr_handle;
631 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
673 struct rpcrdma_mr *mr;
675 mr = rpcrdma_mr_get(r_xprt);
676 if (!mr)
678 mr->mr_req = NULL;
679 ep->re_write_pad_mr = mr;
684 if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr)))
686 trace_xprtrdma_mr_fastreg(mr);
688 mr->mr_cqe.done = frwr_wc_fastreg;
689 mr->mr_regwr.wr.next = NULL;
690 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
691 mr->mr_regwr.wr.num_sge = 0;
692 mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
693 mr->mr_regwr.wr.send_flags = 0;
695 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);