Lines Matching refs:range

32 	struct hmm_range	*range;
43 struct hmm_range *range, unsigned long cpu_flags)
45 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
48 range->hmm_pfns[i] = cpu_flags;
53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
54 * @addr: range virtual start address (inclusive)
55 * @end: range virtual end address (exclusive)
61 * or whenever there is no page directory covering the virtual address range.
90 struct hmm_range *range = hmm_vma_walk->range;
94 * consider the default flags requested for the range. The API can
98 * fault a range with specific flags. For the latter one it is a
102 pfn_req_flags &= range->pfn_flags_mask;
103 pfn_req_flags |= range->default_flags;
125 struct hmm_range *range = hmm_vma_walk->range;
134 if (!((range->default_flags | range->pfn_flags_mask) &
151 struct hmm_range *range = hmm_vma_walk->range;
156 i = (addr - range->start) >> PAGE_SHIFT;
158 hmm_pfns = &range->hmm_pfns[i];
164 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
168 return hmm_pfns_fill(addr, end, range, 0);
176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
192 struct hmm_range *range = hmm_vma_walk->range;
198 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
215 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
228 struct hmm_range *range = hmm_vma_walk->range;
252 range->dev_private_owner) {
288 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
326 struct hmm_range *range = hmm_vma_walk->range;
328 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
345 return hmm_pfns_fill(start, end, range, 0);
351 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
380 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
401 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
415 struct hmm_range *range = hmm_vma_walk->range;
443 i = (addr - range->start) >> PAGE_SHIFT;
445 hmm_pfns = &range->hmm_pfns[i];
447 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
479 struct hmm_range *range = hmm_vma_walk->range;
490 i = (start - range->start) >> PAGE_SHIFT;
491 pfn_req_flags = range->hmm_pfns[i];
492 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
516 range->hmm_pfns[i] = pfn | cpu_flags;
529 struct hmm_range *range = hmm_vma_walk->range;
544 * If a fault is requested for an unsupported range then it is a hard
548 range->hmm_pfns +
549 ((start - range->start) >> PAGE_SHIFT),
553 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
569 * hmm_range_fault - try to fault some address in a virtual address range
570 * @range: argument structure
577 * -EPERM: Invalid permission (e.g., asking for write and range is read
579 * -EBUSY: The range has been invalidated and the caller needs to wait for
587 int hmm_range_fault(struct hmm_range *range)
590 .range = range,
591 .last = range->start,
593 struct mm_struct *mm = range->notifier->mm;
599 /* If range is no longer valid force retry. */
600 if (mmu_interval_check_retry(range->notifier,
601 range->notifier_seq))
603 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,