Lines Matching refs:mr
53 #include "mr.h"
117 static void rvt_deinit_mregion(struct rvt_mregion *mr)
119 int i = mr->mapsz;
121 mr->mapsz = 0;
123 kfree(mr->map[--i]);
124 percpu_ref_exit(&mr->refcount);
129 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
132 complete(&mr->comp);
135 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
141 mr->mapsz = 0;
144 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
146 if (!mr->map[i])
148 mr->mapsz++;
150 init_completion(&mr->comp);
152 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
156 atomic_set(&mr->lkey_invalid, 0);
157 mr->pd = pd;
158 mr->max_segs = count;
161 rvt_deinit_mregion(mr);
167 * @mr: memory region that this lkey protects
172 * Increments mr reference count as required.
174 * Sets the lkey field mr for non-dma regions.
177 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
183 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
186 rvt_get_mr(mr);
195 mr->lkey_published = 1;
197 rcu_assign_pointer(dev->dma_mr, mr);
198 rvt_get_mr(mr);
222 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
225 if (mr->lkey == 0) {
226 mr->lkey |= 1 << 8;
229 mr->lkey_published = 1;
231 rcu_assign_pointer(rkt->table[r], mr);
237 rvt_put_mr(mr);
245 * @mr: mr to free from tables
247 static void rvt_free_lkey(struct rvt_mregion *mr)
250 u32 lkey = mr->lkey;
252 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
258 if (mr->lkey_published) {
259 mr->lkey_published = 0;
262 rvt_put_mr(mr);
265 if (!mr->lkey_published)
268 mr->lkey_published = 0;
276 percpu_ref_kill(&mr->refcount);
281 struct rvt_mr *mr;
287 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
288 if (!mr)
291 rval = rvt_init_mregion(&mr->mr, pd, count, 0);
295 * ib_reg_phys_mr() will initialize mr->ibmr except for
298 rval = rvt_alloc_lkey(&mr->mr, 0);
301 mr->ibmr.lkey = mr->mr.lkey;
302 mr->ibmr.rkey = mr->mr.lkey;
304 return mr;
307 rvt_deinit_mregion(&mr->mr);
309 kfree(mr);
310 mr = ERR_PTR(rval);
314 static void __rvt_free_mr(struct rvt_mr *mr)
316 rvt_free_lkey(&mr->mr);
317 rvt_deinit_mregion(&mr->mr);
318 kfree(mr);
330 struct rvt_mr *mr;
337 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
338 if (!mr) {
343 rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
349 rval = rvt_alloc_lkey(&mr->mr, 1);
355 mr->mr.access_flags = acc;
356 ret = &mr->ibmr;
361 rvt_deinit_mregion(&mr->mr);
363 kfree(mr);
381 struct rvt_mr *mr;
396 mr = __rvt_alloc_mr(n, pd);
397 if (IS_ERR(mr)) {
398 ret = (struct ib_mr *)mr;
402 mr->mr.user_base = start;
403 mr->mr.iova = virt_addr;
404 mr->mr.length = length;
405 mr->mr.offset = ib_umem_offset(umem);
406 mr->mr.access_flags = mr_access_flags;
407 mr->umem = umem;
409 mr->mr.page_shift = PAGE_SHIFT;
420 mr->mr.map[m]->segs[n].vaddr = vaddr;
421 mr->mr.map[m]->segs[n].length = PAGE_SIZE;
422 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
428 return &mr->ibmr;
431 __rvt_free_mr(mr);
450 struct rvt_mregion *mr = (struct rvt_mregion *)v;
453 if (mr->pd != qp->ibqp.pd)
455 rvt_qp_mr_clean(qp, mr->lkey);
460 * @mr - the MR that is being deregistered
463 * to the lkey noted in mr.
465 static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
467 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
469 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
474 * @mr - the megion
483 static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
486 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
488 if (mr->lkey) {
489 /* avoid dma mr */
490 rvt_dereg_clean_qps(mr);
491 /* @mr was indexed on rcu protected @lkey_table */
495 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
499 t, mr, mr->pd, mr->lkey,
500 atomic_long_read(&mr->refcount.data->count));
501 rvt_get_mr(mr);
509 * @mr - the mregion
512 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
514 return mr && lkey == mr->lkey;
518 * rvt_ss_has_lkey - is mr in sge tests
533 rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
536 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
552 struct rvt_mr *mr = to_imr(ibmr);
555 rvt_free_lkey(&mr->mr);
557 rvt_put_mr(&mr->mr); /* will set completion if last */
558 ret = rvt_check_refs(&mr->mr, __func__);
561 rvt_deinit_mregion(&mr->mr);
562 ib_umem_release(mr->umem);
563 kfree(mr);
579 struct rvt_mr *mr;
584 mr = __rvt_alloc_mr(max_num_sg, pd);
585 if (IS_ERR(mr))
586 return (struct ib_mr *)mr;
588 return &mr->ibmr;
600 struct rvt_mr *mr = to_imr(ibmr);
601 u32 ps = 1 << mr->mr.page_shift;
602 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
605 if (unlikely(mapped_segs == mr->mr.max_segs))
610 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
611 mr->mr.map[m]->segs[n].length = ps;
612 mr->mr.length += ps;
613 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
625 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
632 struct rvt_mr *mr = to_imr(ibmr);
635 mr->mr.length = 0;
636 mr->mr.page_shift = PAGE_SHIFT;
638 mr->mr.user_base = ibmr->iova;
639 mr->mr.iova = ibmr->iova;
640 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
641 mr->mr.length = (size_t)ibmr->length;
658 struct rvt_mr *mr = to_imr(ibmr);
660 if (qp->ibqp.pd != mr->mr.pd)
664 if (!mr->mr.lkey || mr->umem)
667 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
672 mr->mr.lkey = key;
673 mr->mr.access_flags = access;
674 mr->mr.iova = ibmr->iova;
675 atomic_set(&mr->mr.lkey_invalid, 0);
692 struct rvt_mregion *mr;
698 mr = rcu_dereference(
700 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
703 atomic_set(&mr->lkey_invalid, 1);
725 if (last_sge && sge->lkey == last_sge->mr->lkey &&
728 if (unlikely((sge->addr - last_sge->mr->user_base +
729 sge->length > last_sge->mr->length)))
761 struct rvt_mregion *mr;
777 mr = rcu_dereference(dev->dma_mr);
778 if (!mr)
780 rvt_get_mr(mr);
783 isge->mr = mr;
794 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
795 if (!mr)
797 rvt_get_mr(mr);
798 if (!READ_ONCE(mr->lkey_published))
801 if (unlikely(atomic_read(&mr->lkey_invalid) ||
802 mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
805 off = sge->addr - mr->user_base;
806 if (unlikely(sge->addr < mr->user_base ||
807 off + sge->length > mr->length ||
808 (mr->access_flags & acc) != acc))
812 off += mr->offset;
813 if (mr->page_shift) {
821 entries_spanned_by_off = off >> mr->page_shift;
822 off -= (entries_spanned_by_off << mr->page_shift);
828 while (off >= mr->map[m]->segs[n].length) {
829 off -= mr->map[m]->segs[n].length;
837 isge->mr = mr;
838 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
839 isge->length = mr->map[m]->segs[n].length - off;
847 rvt_put_mr(mr);
872 struct rvt_mregion *mr;
887 mr = rcu_dereference(rdi->dma_mr);
888 if (!mr)
890 rvt_get_mr(mr);
893 sge->mr = mr;
902 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
903 if (!mr)
905 rvt_get_mr(mr);
906 /* insure mr read is before test */
907 if (!READ_ONCE(mr->lkey_published))
909 if (unlikely(atomic_read(&mr->lkey_invalid) ||
910 mr->lkey != rkey || qp->ibqp.pd != mr->pd))
913 off = vaddr - mr->iova;
914 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
915 (mr->access_flags & acc) == 0))
919 off += mr->offset;
920 if (mr->page_shift) {
928 entries_spanned_by_off = off >> mr->page_shift;
929 off -= (entries_spanned_by_off << mr->page_shift);
935 while (off >= mr->map[m]->segs[n].length) {
936 off -= mr->map[m]->segs[n].length;
944 sge->mr = mr;
945 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
946 sge->length = mr->map[m]->segs[n].length - off;
953 rvt_put_mr(mr);