Lines Matching refs:range
570 * the regions for this range.
585 /* In this loop, we essentially handle an entry for the range
590 /* Skip irrelevant regions that start before our range. */
600 /* When we find a region that starts beyond our range, we've
620 /* Handle the case where our range extends beyond
689 * Add the huge page range represented by [f, t) to the reserve
690 * map. Regions will be taken from the cache to fill in this range.
692 * call to region_chg with the same range, but in some cases the cache will not
731 /* region_add operation of range 1 should never need to
754 * huge pages in the specified range [f, t) are NOT currently
757 * map to add the specified range [f, t). region_chg does
768 * reservation map for the range [f, t). This number is greater or equal to
779 /* Count how many hugepages in this range are NOT represented. */
804 * NOTE: The range arguments [f, t) are not needed or used in this
818 * Delete the specified range [f, t) from the reserve map. If the
842 * Skip regions before the range to be deleted. file_region
846 * at the beginning of the range to be deleted.
956 * that intersect with the range [f, t).
2372 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2375 * part of it lies within the given range.
2739 /* region_add calls of range 1 should never fail. */
2749 /* region_add calls of range 1 should never fail. */
2762 /* region_add calls of range 1 should never fail. */
2778 * As subsequent fault on such a range will not use reserves.
3507 * page range allocation.
5046 struct mmu_notifier_range range;
5051 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5054 mmu_notifier_invalidate_range_start(&range);
5217 mmu_notifier_invalidate_range_end(&range);
5264 struct mmu_notifier_range range;
5267 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5269 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5272 * range.
5274 flush_cache_range(vma, range.start, range.end);
5276 mmu_notifier_invalidate_range_start(&range);
5306 flush_hugetlb_tlb_range(vma, range.start, range.end);
5309 mmu_notifier_invalidate_range_end(&range);
5391 * page is being unmapped, not a range. Ensure the page we
5473 * unmapped range will be asynchrously deleted. If the page
5490 struct mmu_notifier_range range;
5493 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5495 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5496 mmu_notifier_invalidate_range_start(&range);
5501 mmu_notifier_invalidate_range_end(&range);
5579 struct mmu_notifier_range range;
5633 * of the full address range.
5712 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
5714 mmu_notifier_invalidate_range_start(&range);
5737 mmu_notifier_invalidate_range_end(&range);
5891 /* Check for page in userfault range */
5979 /* Check for page in userfault range. */
6577 struct mmu_notifier_range range;
6584 * start/end. Set range.start/range.end to cover the maximum possible
6585 * range if PMD sharing is possible.
6587 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6589 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6592 flush_cache_range(vma, range.start, range.end);
6594 mmu_notifier_invalidate_range_start(&range);
6689 * did unshare a page of pmds, flush the range corresponding to the pud.
6692 flush_hugetlb_tlb_range(vma, range.start, range.end);
6704 mmu_notifier_invalidate_range_end(&range);
6724 VM_WARN(1, "%s called with a negative range\n", __func__);
6818 * pages in this range were added to the reserve
6962 * Determine if start,end range within vma could be mapped by shared pmd.
6963 * If yes, adjust start and end to cover range associated with possible
6973 * vma needs to span at least one aligned PUD size, and the range
6980 /* Extend the range to be PUD aligned for a worst case scenario */
7311 struct mmu_notifier_range range;
7327 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7329 mmu_notifier_invalidate_range_start(&range);
7347 mmu_notifier_invalidate_range_end(&range);
7352 * within the specific vma for a hugetlbfs memory range.