Lines Matching refs:start
232 range.start = 0;
253 if (!range.start) {
286 if (range.start)
341 unsigned long start = addr;
358 flush_tlb_range(vma, start, end);
364 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
373 pages = hugetlb_change_protection(vma, start, end, newprot);
375 pages = change_protection_range(vma, start, end, newprot,
410 unsigned long start, unsigned long end, unsigned long newflags)
414 long nrpages = (end - start) >> PAGE_SHIFT;
435 error = walk_page_range(current->mm, start, end,
464 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
465 *pprev = vma_merge(mm, *pprev, start, end, newflags,
476 if (start != vma->vm_start) {
477 error = split_vma(mm, vma, start, 1);
497 change_protection(vma, start, end, vma->vm_page_prot,
506 populate_vma_page_range(vma, start, end, NULL);
522 static int do_mprotect_pkey(unsigned long start, size_t len,
537 start = untagged_addr(start);
540 CALL_HCK_LITE_HOOK(find_jit_memory_lhck, current, start, len, &error);
551 if (start & ~PAGE_MASK)
556 end = start + len;
557 if (end <= start)
559 if (!arch_validate_prot(prot, start))
575 vma = find_vma(current->mm, start);
583 start = vma->vm_start;
588 if (vma->vm_start > start)
597 if (start > vma->vm_start)
600 for (nstart = start ; ; ) {
664 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
667 return do_mprotect_pkey(start, len, prot, -1);
672 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
675 return do_mprotect_pkey(start, len, prot, pkey);