Lines Matching refs:mr
27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
29 switch (mr->ibmr.type) {
35 if (iova < mr->ibmr.iova ||
36 iova + length > mr->ibmr.iova + mr->ibmr.length) {
37 rxe_dbg_mr(mr, "iova/length out of range");
43 rxe_dbg_mr(mr, "mr type not supported\n");
48 static void rxe_mr_init(int access, struct rxe_mr *mr)
50 u32 key = mr->elem.index << 8 | rxe_get_next_key(-1);
57 mr->lkey = mr->ibmr.lkey = key;
58 mr->rkey = mr->ibmr.rkey = key;
60 mr->access = access;
61 mr->ibmr.page_size = PAGE_SIZE;
62 mr->page_mask = PAGE_MASK;
63 mr->page_shift = PAGE_SHIFT;
64 mr->state = RXE_MR_STATE_INVALID;
67 void rxe_mr_init_dma(int access, struct rxe_mr *mr)
69 rxe_mr_init(access, mr);
71 mr->state = RXE_MR_STATE_VALID;
72 mr->ibmr.type = IB_MR_TYPE_DMA;
75 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
80 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
82 return iova & (mr_page_size(mr) - 1);
94 static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
96 XA_STATE(xas, &mr->page_list, 0);
99 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
111 rxe_dbg_mr(mr, "Page can't be persistent\n");
130 int access, struct rxe_mr *mr)
135 rxe_mr_init(access, mr);
137 xa_init(&mr->page_list);
141 rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
146 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
152 mr->umem = umem;
153 mr->ibmr.type = IB_MR_TYPE_USER;
154 mr->state = RXE_MR_STATE_VALID;
159 static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
161 XA_STATE(xas, &mr->page_list, 0);
165 xa_init(&mr->page_list);
183 mr->num_buf = num_buf;
188 int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
193 rxe_mr_init(RXE_ACCESS_REMOTE, mr);
195 err = rxe_mr_alloc(mr, max_pages);
199 mr->state = RXE_MR_STATE_FREE;
200 mr->ibmr.type = IB_MR_TYPE_MEM_REG;
210 struct rxe_mr *mr = to_rmr(ibmr);
212 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
216 rxe_dbg_mr(mr, "Page cannot be persistent\n");
220 if (unlikely(mr->nbuf == mr->num_buf))
223 err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
227 mr->nbuf++;
234 struct rxe_mr *mr = to_rmr(ibmr);
235 unsigned int page_size = mr_page_size(mr);
237 mr->nbuf = 0;
238 mr->page_shift = ilog2(page_size);
239 mr->page_mask = ~((u64)page_size - 1);
240 mr->page_offset = mr->ibmr.iova & (page_size - 1);
245 static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
248 unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
249 unsigned long index = rxe_mr_iova_to_index(mr, iova);
255 page = xa_load(&mr->page_list, index);
260 mr_page_size(mr) - page_offset);
277 static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
304 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
312 if (WARN_ON(!mr))
315 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
316 rxe_mr_copy_dma(mr, iova, addr, length, dir);
320 err = mr_check_range(mr, iova, length);
322 rxe_dbg_mr(mr, "iova out of range");
326 return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
344 struct rxe_mr *mr = NULL;
357 mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
358 if (!mr) {
368 if (mr) {
369 rxe_put(mr);
370 mr = NULL;
382 mr = lookup_mr(pd, access, sge->lkey,
384 if (!mr) {
398 err = rxe_mr_copy(mr, iova, addr, bytes, dir);
412 if (mr)
413 rxe_put(mr);
418 if (mr)
419 rxe_put(mr);
424 int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
433 /* mr must be valid even if length is zero */
434 if (WARN_ON(!mr))
440 if (mr->ibmr.type == IB_MR_TYPE_DMA)
443 err = mr_check_range(mr, iova, length);
448 index = rxe_mr_iova_to_index(mr, iova);
449 page = xa_load(&mr->page_list, index);
450 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
454 mr_page_size(mr) - page_offset);
471 int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
479 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
480 rxe_dbg_mr(mr, "mr not in valid state");
484 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
491 err = mr_check_range(mr, iova, sizeof(value));
493 rxe_dbg_mr(mr, "iova out of range");
496 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
497 index = rxe_mr_iova_to_index(mr, iova);
498 page = xa_load(&mr->page_list, index);
504 rxe_dbg_mr(mr, "iova not aligned");
529 int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
536 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
537 rxe_dbg_mr(mr, "mr not in valid state");
541 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
549 err = mr_check_range(mr, iova, sizeof(value));
551 rxe_dbg_mr(mr, "iova out of range");
554 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
555 index = rxe_mr_iova_to_index(mr, iova);
556 page = xa_load(&mr->page_list, index);
563 rxe_dbg_mr(mr, "misaligned address");
577 int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
619 struct rxe_mr *mr;
623 mr = rxe_pool_get_index(&rxe->mr_pool, index);
624 if (!mr)
627 if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
628 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
629 mr_pd(mr) != pd || ((access & mr->access) != access) ||
630 mr->state != RXE_MR_STATE_VALID)) {
631 rxe_put(mr);
632 mr = NULL;
635 return mr;
641 struct rxe_mr *mr;
645 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
646 if (!mr) {
652 remote = mr->access & RXE_ACCESS_REMOTE;
653 if (remote ? (key != mr->rkey) : (key != mr->lkey)) {
654 rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
655 key, (remote ? mr->rkey : mr->lkey));
660 if (atomic_read(&mr->num_mw) > 0) {
661 rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
666 if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
667 rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
672 mr->state = RXE_MR_STATE_FREE;
676 rxe_put(mr);
682 * user is expected to hold a reference on the ib mr until the
686 * the ib mr keys in sync with rxe mr keys.
690 struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
695 if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
696 rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
700 /* user can only register mr with qp in same protection domain */
701 if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
702 rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
707 if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
708 rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
709 key, mr->lkey);
713 mr->access = access;
714 mr->lkey = key;
715 mr->rkey = key;
716 mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
717 mr->state = RXE_MR_STATE_VALID;
724 struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
726 rxe_put(mr_pd(mr));
727 ib_umem_release(mr->umem);
729 if (mr->ibmr.type != IB_MR_TYPE_DMA)
730 xa_destroy(&mr->page_list);