Lines Matching refs:uiomr
85 int dmasync, struct usnic_uiom_reg *uiomr)
87 struct list_head *chunk_list = &uiomr->chunk_list;
123 uiomr->owning_mm = mm = current->mm;
183 mmgrab(uiomr->owning_mm);
210 struct usnic_uiom_reg *uiomr,
219 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
220 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
235 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
240 struct usnic_uiom_reg *uiomr)
250 struct usnic_uiom_pd *pd = uiomr->pd;
251 long int va = uiomr->va & PAGE_MASK;
254 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
255 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
333 struct usnic_uiom_reg *uiomr;
354 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
355 if (!uiomr)
358 uiomr->va = va_base;
359 uiomr->offset = offset;
360 uiomr->length = size;
361 uiomr->writable = writable;
362 uiomr->pd = pd;
365 uiomr);
384 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
403 return uiomr;
410 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
412 mmdrop(uiomr->owning_mm);
414 kfree(uiomr);
418 static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
420 mmdrop(uiomr->owning_mm);
421 kfree(uiomr);
424 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
426 return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
429 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
431 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
433 atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
434 __usnic_uiom_release_tail(uiomr);