Lines Matching defs:umem_odp
288 struct ib_umem_odp *umem_odp =
303 mutex_lock(&umem_odp->umem_mutex);
306 * If npages is zero then umem_odp->private may not be setup yet. This
309 if (!umem_odp->npages)
311 mr = umem_odp->private;
313 start = max_t(u64, ib_umem_start(umem_odp), range->start);
314 end = min_t(u64, ib_umem_end(umem_odp), range->end);
322 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
323 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
330 if (umem_odp->dma_list[idx] &
365 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
367 if (unlikely(!umem_odp->npages && mr->parent))
370 mutex_unlock(&umem_odp->umem_mutex);
535 struct ib_umem_odp *umem_odp;
539 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
540 if (IS_ERR(umem_odp))
541 return ERR_CAST(umem_odp);
551 imr->umem = &umem_odp->umem;
554 imr->umem = &umem_odp->umem;
580 ib_umem_odp_release(umem_odp);
740 struct ib_umem_odp *umem_odp;
755 umem_odp = to_ib_umem_odp(mtt->umem);
756 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
759 ret = pagefault_real_mr(mtt, umem_odp, user_va, len,