Lines Matching refs:range

695  * already present in the new task to be cleared in the whole range
1149 struct mmu_notifier_range range;
1185 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1187 mmu_notifier_invalidate_range_start(&range);
1215 mmu_notifier_invalidate_range_end(&range);
1530 * unmap_vmas - unmap a range of memory covered by a list of vma's
1543 * range after unmap_vmas() returns. So the only responsibility here is to
1551 struct mmu_notifier_range range;
1553 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1555 mmu_notifier_invalidate_range_start(&range);
1558 mmu_notifier_invalidate_range_end(&range);
1562 * zap_page_range - remove user pages in a given range
1572 struct mmu_notifier_range range;
1576 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1578 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1580 mmu_notifier_invalidate_range_start(&range);
1581 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1582 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1583 mmu_notifier_invalidate_range_end(&range);
1584 tlb_finish_mmu(&tlb, start, range.end);
1588 * zap_page_range_single - remove user pages in a given range
1594 * The range must fit into one VMA.
1599 struct mmu_notifier_range range;
1603 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1605 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1607 mmu_notifier_invalidate_range_start(&range);
1608 unmap_single_vma(&tlb, vma, address, range.end, details);
1609 mmu_notifier_invalidate_range_end(&range);
1610 tlb_finish_mmu(&tlb, address, range.end);
1621 * The entire address range must be fully contained within the vma.
1879 * __vm_map_pages - maps range of kernel pages into user vma
1885 * This allows drivers to map range of kernel pages into a user vma.
1915 * vm_map_pages - maps range of kernel pages starts with non zero offset
1940 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2201 * maps a range of physical memory into the requested pages. the old
2379 * driver just needs to give us the physical memory range to be mapped,
2877 struct mmu_notifier_range range;
2913 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
2916 mmu_notifier_invalidate_range_start(&range);
2999 mmu_notifier_invalidate_range_only_end(&range);
3261 * Typically, for efficiency, the range of nearby pages has already been
3317 * address_space corresponding to the specified byte range in the underlying
4874 struct mmu_notifier_range *range, pte_t **ptepp,
4902 if (range) {
4903 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4906 mmu_notifier_invalidate_range_start(range);
4914 if (range)
4915 mmu_notifier_invalidate_range_end(range);
4921 if (range) {
4922 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4925 mmu_notifier_invalidate_range_start(range);
4934 if (range)
4935 mmu_notifier_invalidate_range_end(range);