/kernel/linux/linux-6.6/mm/kmsan/ |
H A D | init.c | 32 u64 nstart = (u64)start, nend = (u64)end, cstart, cend; in kmsan_record_future_shadow_range() local 36 KMSAN_WARN_ON((nstart >= nend) || !nstart || !nend); in kmsan_record_future_shadow_range() 37 nstart = ALIGN_DOWN(nstart, PAGE_SIZE); in kmsan_record_future_shadow_range() 50 if ((cstart < nstart && cend < nstart) || in kmsan_record_future_shadow_range() 54 start_end_pairs[i].start = min(nstart, cstart); in kmsan_record_future_shadow_range() 61 start_end_pairs[future_index].start = nstart; in kmsan_record_future_shadow_range()
|
/kernel/linux/linux-5.10/mm/ |
H A D | mlock.c | 591 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local 610 for (nstart = start ; ; ) { in apply_vma_lock_flags() 615 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in apply_vma_lock_flags() 619 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags() 622 nstart = tmp; in apply_vma_lock_flags() 623 if (nstart < prev->vm_end) in apply_vma_lock_flags() 624 nstart = prev->vm_end; in apply_vma_lock_flags() 625 if (nstart >= end) in apply_vma_lock_flags() 629 if (!vma || vma->vm_start != nstart) { in apply_vma_lock_flags()
|
H A D | mprotect.c | 525 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local 600 for (nstart = start ; ; ) { in do_mprotect_pkey() 605 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in do_mprotect_pkey() 642 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey() 645 nstart = tmp; in do_mprotect_pkey() 647 if (nstart < prev->vm_end) in do_mprotect_pkey() 648 nstart = prev->vm_end; in do_mprotect_pkey() 649 if (nstart >= end) in do_mprotect_pkey() 653 if (!vma || vma->vm_start != nstart) { in do_mprotect_pkey()
|
H A D | gup.c | 1450 unsigned long end, nstart, nend; in __mm_populate() local 1457 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate() 1459 * We want to fault in pages for [nstart; end) address range. in __mm_populate() 1465 vma = find_vma(mm, nstart); in __mm_populate() 1466 } else if (nstart >= vma->vm_end) in __mm_populate() 1471 * Set [nstart; nend) to intersection of desired address in __mm_populate() 1477 if (nstart < vma->vm_start) in __mm_populate() 1478 nstart in __mm_populate() [all...] |
H A D | huge_memory.c | 2345 unsigned long nstart = next->vm_start; in vma_adjust_trans_huge() local 2346 nstart += adjust_next; in vma_adjust_trans_huge() 2347 if (nstart & ~HPAGE_PMD_MASK && in vma_adjust_trans_huge() 2348 (nstart & HPAGE_PMD_MASK) >= next->vm_start && in vma_adjust_trans_huge() 2349 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) in vma_adjust_trans_huge() 2350 split_huge_pmd_address(next, nstart, false, NULL); in vma_adjust_trans_huge()
|
/kernel/linux/linux-6.6/mm/ |
H A D | mprotect.c | 694 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local 770 nstart = start; in do_mprotect_pkey() 823 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey() 828 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey() 833 nstart = tmp; in do_mprotect_pkey()
|
H A D | mlock.c | 480 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local 499 nstart = start; in apply_vma_lock_flags() 510 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in apply_vma_lock_flags() 514 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags() 518 nstart = tmp; in apply_vma_lock_flags()
|
H A D | gup.c | 1740 unsigned long end, nstart, nend; in __mm_populate() local 1747 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate() 1749 * We want to fault in pages for [nstart; end) address range. in __mm_populate() 1755 vma = find_vma_intersection(mm, nstart, end); in __mm_populate() 1756 } else if (nstart >= vma->vm_end) in __mm_populate() 1762 * Set [nstart; nend) to intersection of desired address in __mm_populate() 1768 if (nstart < vma->vm_start) in __mm_populate() 1769 nstart in __mm_populate() [all...] |
H A D | huge_memory.c | 2341 unsigned long nstart = next->vm_start; in vma_adjust_trans_huge() local 2342 nstart += adjust_next; in vma_adjust_trans_huge() 2343 split_huge_pmd_if_needed(next, nstart); in vma_adjust_trans_huge()
|