Lines Matching refs:start
369 * The function expects that the struct page corresponding to @start address is
377 * @start + PAGE_SIZE when no page could be added by the pte walk.
381 unsigned long start, unsigned long end)
391 pte = get_locked_pte(vma->vm_mm, start, &ptl);
393 end = pgd_addr_end(start, end);
394 end = p4d_addr_end(start, end);
395 end = pud_addr_end(start, end);
396 end = pmd_addr_end(start, end);
399 start += PAGE_SIZE;
400 while (start < end) {
404 page = vm_normal_page(vma, start, *pte);
424 start += PAGE_SIZE;
429 return start;
435 * @start - start address in @vma of the range
451 unsigned long start, unsigned long end)
455 while (start < end) {
470 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
498 * pte walk. This will also update start to
502 start = __munlock_pagevec_fill(&pvec, vma,
503 zone, start, end);
509 start += page_increm * PAGE_SIZE;
525 unsigned long start, unsigned long end, vm_flags_t newflags)
540 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
541 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
549 if (start != vma->vm_start) {
550 ret = split_vma(mm, vma, start, 1);
565 nr_pages = (end - start) >> PAGE_SHIFT;
581 munlock_vma_pages_range(vma, start, end);
588 static int apply_vma_lock_flags(unsigned long start, size_t len,
595 VM_BUG_ON(offset_in_page(start));
597 end = start + len;
598 if (end < start)
600 if (end == start)
602 vma = find_vma(current->mm, start);
603 if (!vma || vma->vm_start > start)
607 if (start > vma->vm_start)
610 for (nstart = start ; ; ) {
645 unsigned long start, size_t len)
653 vma = find_vma(mm, start);
658 if (start >= vma->vm_end)
660 if (start + len <= vma->vm_start)
663 if (start > vma->vm_start)
664 count -= (start - vma->vm_start);
665 if (start + len < vma->vm_end) {
666 count += start + len - vma->vm_start;
676 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
682 start = untagged_addr(start);
687 len = PAGE_ALIGN(len + (offset_in_page(start)));
688 start &= PAGE_MASK;
706 start, len);
711 error = apply_vma_lock_flags(start, len, flags);
717 error = __mm_populate(start, len, 0);
723 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
725 return do_mlock(start, len, VM_LOCKED);
728 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
738 return do_mlock(start, len, vm_flags);
741 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
745 start = untagged_addr(start);
747 len = PAGE_ALIGN(len + (offset_in_page(start)));
748 start &= PAGE_MASK;
752 ret = apply_vma_lock_flags(start, len, 0);