Lines Matching refs:range
30 struct hmm_range *range;
41 struct hmm_range *range, unsigned long cpu_flags)
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
46 range->hmm_pfns[i] = cpu_flags;
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
59 * or whenever there is no page directory covering the virtual address range.
88 struct hmm_range *range = hmm_vma_walk->range;
92 * consider the default flags requested for the range. The API can
96 * fault a range with specific flags. For the latter one it is a
100 pfn_req_flags &= range->pfn_flags_mask;
101 pfn_req_flags |= range->default_flags;
123 struct hmm_range *range = hmm_vma_walk->range;
132 if (!((range->default_flags | range->pfn_flags_mask) &
149 struct hmm_range *range = hmm_vma_walk->range;
154 i = (addr - range->start) >> PAGE_SHIFT;
156 hmm_pfns = &range->hmm_pfns[i];
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
166 return hmm_pfns_fill(addr, end, range, 0);
174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
190 struct hmm_range *range = hmm_vma_walk->range;
196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
213 static inline bool hmm_is_device_private_entry(struct hmm_range *range,
218 range->dev_private_owner;
221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
234 struct hmm_range *range = hmm_vma_walk->range;
256 if (hmm_is_device_private_entry(range, entry)) {
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
325 struct hmm_range *range = hmm_vma_walk->range;
327 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
344 return hmm_pfns_fill(start, end, range, 0);
350 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
380 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
399 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
413 struct hmm_range *range = hmm_vma_walk->range;
442 i = (addr - range->start) >> PAGE_SHIFT;
444 hmm_pfns = &range->hmm_pfns[i];
446 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
478 struct hmm_range *range = hmm_vma_walk->range;
489 i = (start - range->start) >> PAGE_SHIFT;
490 pfn_req_flags = range->hmm_pfns[i];
491 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
502 range->hmm_pfns[i] = pfn | cpu_flags;
515 struct hmm_range *range = hmm_vma_walk->range;
530 * If a fault is requested for an unsupported range then it is a hard
534 range->hmm_pfns +
535 ((start - range->start) >> PAGE_SHIFT),
539 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
554 * hmm_range_fault - try to fault some address in a virtual address range
555 * @range: argument structure
562 * -EPERM: Invalid permission (e.g., asking for write and range is read
564 * -EBUSY: The range has been invalidated and the caller needs to wait for
572 int hmm_range_fault(struct hmm_range *range)
575 .range = range,
576 .last = range->start,
578 struct mm_struct *mm = range->notifier->mm;
584 /* If range is no longer valid force retry. */
585 if (mmu_interval_check_retry(range->notifier,
586 range->notifier_seq))
588 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,