Lines Matching defs:end

88 		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
315 pr_emerg("tree range: %px start %lx end %lx\n", vma,
374 unsigned long addr, unsigned long end)
380 for_each_vma_range(vmi, vma, end) {
382 unsigned long vm_end = min(end, vma->vm_end);
484 * space until vma start or end is updated.
625 * @end: The exclusive end of the vma
629 * Expand @vma to @start and @end. Can expand off the start and end. Will
630 * expand over @next if it's different from @vma and @end == @next->vm_end.
637 unsigned long start, unsigned long end, pgoff_t pgoff,
645 if (next && (vma != next) && (end == next->vm_end)) {
658 next != vma && end > next->vm_start);
660 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
663 vma_iter_config(vmi, start, end);
668 vma_adjust_trans_huge(vma, start, end, 0);
670 vma->vm_end = end;
688 * @end: The new end
693 unsigned long start, unsigned long end, pgoff_t pgoff)
697 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
702 vma_iter_config(vmi, end, vma->vm_end);
711 vma_adjust_trans_huge(vma, start, end, 0);
715 vma->vm_end = end;
772 * We don't check here for the merged mmap wrapping around the end of pagecache
818 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
822 * In most cases - when called for mmap, brk or mremap - [addr,end) is
858 * or other rmap walkers (if working on addresses beyond the "end"
871 unsigned long end, unsigned long vm_flags,
887 unsigned long vma_end = end;
888 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
899 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
902 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
903 next = vma_lookup(mm, end);
929 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
930 VM_WARN_ON(addr >= end);
959 if (end == curr->vm_end) { /* case 7 */
963 adj_start = (end - curr->vm_start);
1868 * @end_addr: The exclusive end user address.
2345 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2353 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2364 * VMA Iterator will point to the end VMA.
2458 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2463 * @end: The aligned end address to munmap.
2474 unsigned long end, struct list_head *uf, bool unlock)
2501 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2515 /* Does it split the end? */
2516 if (next->vm_end > end) {
2517 error = __split_vma(vmi, next, end, 0);
2541 error = userfaultfd_unmap_prep(next, start, end, uf);
2548 BUG_ON(next->vm_start > end);
2550 } for_each_vma_range(*vmi, next, end);
2562 for_each_vma_range(*vmi, vma_mas, end) {
2575 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2595 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2612 mas_for_each(&mas_detach, next, end)
2642 unsigned long end;
2648 end = start + PAGE_ALIGN(len);
2649 if (end == start)
2658 arch_unmap(mm, start, end);
2661 vma = vma_find(vmi, end);
2668 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2696 unsigned long end = addr + len;
2697 unsigned long merge_start = addr, merge_end = end;
2710 nr_pages = count_vma_pages_range(mm, addr, end);
2741 if (next && next->vm_start == end && !vma_policy(next) &&
2784 vma_iter_config(&vmi, addr, end);
2786 vma->vm_end = end;
2811 vma_iter_config(&vmi, addr, end);
3062 * @end: The end of the address to unmap
3073 unsigned long start, unsigned long end, struct list_head *uf,
3078 arch_unmap(mm, start, end);
3079 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3312 * end up with (unless mremap moves it elsewhere before that