Lines Matching refs:end

71 	/* Add 1 for NUL terminator at the end of the anon_name->name */
137 unsigned long end, unsigned long new_flags,
150 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
168 if (end != vma->vm_end) {
171 error = __split_vma(mm, vma, end, 0);
192 unsigned long end, struct mm_walk *walk)
201 for (index = start; index != end; index += PAGE_SIZE) {
231 unsigned long start, unsigned long end,
235 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
266 unsigned long start, unsigned long end)
275 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
281 force_shm_swapin_readahead(vma, start, end,
306 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
313 unsigned long addr, unsigned long end,
332 unsigned long next = pmd_addr_end(addr, end);
403 for (; addr < end; pte++, addr += PAGE_SIZE) {
494 unsigned long addr, unsigned long end)
502 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
527 unsigned long addr, unsigned long end)
535 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
578 unsigned long end, struct mm_walk *walk)
590 next = pmd_addr_end(addr, end);
602 for (; addr != end; pte++, addr += PAGE_SIZE) {
728 range.end = min(vma->vm_end, end_addr);
729 if (range.end <= vma->vm_start)
732 range.start, range.end);
735 tlb_gather_mmu(&tlb, mm, range.start, range.end);
740 walk_page_range(vma->vm_mm, range.start, range.end,
744 tlb_finish_mmu(&tlb, range.start, range.end);
769 unsigned long start, unsigned long end)
771 zap_page_range(vma, start, end - start);
777 unsigned long start, unsigned long end,
786 if (!userfaultfd_remove(vma, start, end)) {
807 if (end > vma->vm_end) {
809 * Don't fail if end > vma->vm_end. If the old
817 * end-vma->vm_end range, but the manager can
820 end = vma->vm_end;
822 VM_WARN_ON(start >= end);
826 return madvise_dontneed_single_vma(vma, start, end);
828 return madvise_free_single_vma(vma, start, end);
839 unsigned long start, unsigned long end)
870 if (userfaultfd_remove(vma, start, end)) {
876 offset, end - start);
889 unsigned long start, unsigned long end,
898 return madvise_remove(vma, prev, start, end);
900 return madvise_willneed(vma, prev, start, end);
902 return madvise_cold(vma, prev, start, end);
904 return madvise_pageout(vma, prev, start, end);
907 return madvise_dontneed_free(vma, prev, start, end, behavior);
944 error = ksm_madvise(vma, start, end, behavior, &new_flags);
958 error = madvise_update_vma(vma, prev, start, end, new_flags,
977 unsigned long start, unsigned long end)
986 for (; start < end; start += size) {
1076 * Walk the vmas in range [start,end), and call the visit function on each one.
1077 * The visit function will get start and end parameters that cover the overlap
1085 unsigned long end, unsigned long arg,
1088 unsigned long end, unsigned long arg))
1096 * If the interval [start,end) covers some unmapped address
1097 * ranges, just ignore them, but return -ENOMEM at the end.
1107 /* Still start < end. */
1111 /* Here start < (end|vma->vm_end). */
1115 if (start >= end)
1119 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1121 if (end < tmp)
1122 tmp = end;
1124 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1131 if (start >= end)
1145 unsigned long start, unsigned long end,
1154 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1169 unsigned long end;
1180 end = start + len;
1181 if (end < start)
1184 if (end == start)
1187 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1259 unsigned long end;
1278 end = start + len;
1279 if (end < start)
1282 if (end == start)
1299 error = madvise_walk_vmas(mm, start, end, behavior,