Home
last modified time | relevance | path

Searched refs:new_vma (Results 1 - 25 of 26) sorted by relevance

12

/kernel/linux/linux-5.10/arch/x86/entry/vdso/
H A Dvma.c73 struct vm_area_struct *new_vma) in vdso_fix_landing()
84 regs->ip = new_vma->vm_start + vdso_land; in vdso_fix_landing()
90 struct vm_area_struct *new_vma) in vdso_mremap()
92 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; in vdso_mremap()
98 vdso_fix_landing(image, new_vma); in vdso_mremap()
99 current->mm->context.vdso = (void __user *)new_vma->vm_start; in vdso_mremap()
105 struct vm_area_struct *new_vma) in vvar_mremap()
107 const struct vdso_image *image = new_vma->vm_mm->context.vdso_image; in vvar_mremap()
108 unsigned long new_size = new_vma in vvar_mremap()
72 vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) vdso_fix_landing() argument
89 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
104 vvar_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vvar_mremap() argument
[all...]
/kernel/linux/linux-5.10/mm/
H A Dmremap.c117 struct vm_area_struct *new_vma, pmd_t *new_pmd, in move_ptes()
138 * - During mremap(), new_vma is often known to be placed after vma in move_ptes()
178 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); in move_ptes()
255 unsigned long old_addr, struct vm_area_struct *new_vma, in move_page_tables()
322 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables()
324 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, in move_page_tables()
340 struct vm_area_struct *new_vma; in move_vma() local
362 * so KSM can come around to merge on vma and new_vma afterwards. in move_vma()
370 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
372 if (!new_vma) in move_vma()
115 move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) move_ptes() argument
254 move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks) move_page_tables() argument
[all...]
H A Dmmap.c3355 struct vm_area_struct *new_vma, *prev; in copy_vma() local
3370 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3373 if (new_vma) { in copy_vma()
3375 * Source vma may have been merged into new_vma in copy_vma()
3377 if (unlikely(vma_start >= new_vma->vm_start && in copy_vma()
3378 vma_start < new_vma->vm_end)) { in copy_vma()
3391 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); in copy_vma()
3392 *vmap = vma = new_vma; in copy_vma()
3394 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3396 new_vma in copy_vma()
3478 special_mapping_mremap(struct vm_area_struct *new_vma) special_mapping_mremap() argument
[all...]
/kernel/linux/linux-6.6/arch/powerpc/kernel/
H A Dvdso.c61 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma, in vdso_mremap() argument
64 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; in vdso_mremap()
69 current->mm->context.vdso = (void __user *)new_vma->vm_start; in vdso_mremap()
74 static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) in vdso32_mremap() argument
76 return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start); in vdso32_mremap()
79 static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) in vdso64_mremap() argument
81 return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start); in vdso64_mremap()
/kernel/linux/linux-5.10/arch/arm64/kernel/
H A Dvdso.c83 struct vm_area_struct *new_vma) in __vdso_remap()
85 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; in __vdso_remap()
92 current->mm->context.vdso = (void *)new_vma->vm_start; in __vdso_remap()
223 struct vm_area_struct *new_vma) in vvar_mremap()
225 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; in vvar_mremap()
284 struct vm_area_struct *new_vma) in aarch32_vdso_mremap()
286 return __vdso_remap(VDSO_ABI_AA32, sm, new_vma); in aarch32_vdso_mremap()
457 struct vm_area_struct *new_vma) in vdso_mremap()
81 __vdso_remap(enum vdso_abi abi, const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) __vdso_remap() argument
222 vvar_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vvar_mremap() argument
283 aarch32_vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) aarch32_vdso_mremap() argument
456 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
[all...]
/kernel/linux/linux-6.6/mm/
H A Dmremap.c138 struct vm_area_struct *new_vma, pmd_t *new_pmd, in move_ptes()
160 * - During mremap(), new_vma is often known to be placed after vma in move_ptes()
208 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); in move_ptes()
493 unsigned long old_addr, struct vm_area_struct *new_vma, in move_page_tables()
508 return move_hugetlb_page_tables(vma, new_vma, old_addr, in move_page_tables()
571 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables()
574 new_vma, new_pmd, new_addr, need_rmap_locks) < 0) in move_page_tables()
591 struct vm_area_struct *new_vma; in move_vma() local
626 * so KSM can come around to merge on vma and new_vma afterwards. in move_vma()
640 new_vma in move_vma()
136 move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) move_ptes() argument
492 move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks) move_page_tables() argument
[all...]
H A Dmmap.c3344 struct vm_area_struct *new_vma, *prev; in copy_vma() local
3357 new_vma = find_vma_prev(mm, addr, &prev); in copy_vma()
3358 if (new_vma && new_vma->vm_start < addr + len) in copy_vma()
3361 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3364 if (new_vma) { in copy_vma()
3366 * Source vma may have been merged into new_vma in copy_vma()
3368 if (unlikely(vma_start >= new_vma->vm_start && in copy_vma()
3369 vma_start < new_vma->vm_end)) { in copy_vma()
3382 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); in copy_vma()
3478 special_mapping_mremap(struct vm_area_struct *new_vma) special_mapping_mremap() argument
[all...]
H A Dhugetlb.c1234 * It has already been transferred to new_vma. in clear_vma_resv_huge_pages()
1237 * which copies vma into new_vma and unmaps vma. After the copy in clear_vma_resv_huge_pages()
1238 * operation both new_vma and vma share a reference to the resv_map in clear_vma_resv_huge_pages()
1241 * the reservation still lives on in new_vma, so simply decrement the in clear_vma_resv_huge_pages()
5253 struct vm_area_struct *new_vma, in move_hugetlb_page_tables()
5298 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); in move_hugetlb_page_tables()
5252 move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) move_hugetlb_page_tables() argument
/kernel/linux/linux-5.10/arch/arm/kernel/
H A Dvdso.c51 struct vm_area_struct *new_vma) in vdso_mremap()
53 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; in vdso_mremap()
62 current->mm->context.vdso = new_vma->vm_start; in vdso_mremap()
50 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
H A Dprocess.c389 struct vm_area_struct *new_vma) in sigpage_mremap()
391 current->mm->context.sigpage = new_vma->vm_start; in sigpage_mremap()
388 sigpage_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) sigpage_mremap() argument
/kernel/linux/linux-6.6/arch/x86/entry/vdso/
H A Dvma.c76 struct vm_area_struct *new_vma) in vdso_fix_landing()
87 regs->ip = new_vma->vm_start + vdso_land; in vdso_fix_landing()
93 struct vm_area_struct *new_vma) in vdso_mremap()
97 vdso_fix_landing(image, new_vma); in vdso_mremap()
98 current->mm->context.vdso = (void __user *)new_vma->vm_start; in vdso_mremap()
75 vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) vdso_fix_landing() argument
92 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
/kernel/linux/linux-5.10/include/linux/
H A Dmm_inline.h171 struct vm_area_struct *new_vma) in dup_anon_vma_name()
176 new_vma->anon_name = anon_vma_name_reuse(anon_name); in dup_anon_vma_name()
213 struct vm_area_struct *new_vma) {} in dup_anon_vma_name()
170 dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) dup_anon_vma_name() argument
212 dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) dup_anon_vma_name() argument
H A Dmm_types.h825 struct vm_area_struct *new_vma);
H A Dmm.h1878 unsigned long old_addr, struct vm_area_struct *new_vma,
/kernel/linux/linux-6.6/arch/arm64/kernel/
H A Dvdso.c79 struct vm_area_struct *new_vma) in vdso_mremap()
81 current->mm->context.vdso = (void *)new_vma->vm_start; in vdso_mremap()
249 struct vm_area_struct *new_vma) in aarch32_sigpage_mremap()
251 current->mm->context.sigpage = (void *)new_vma->vm_start; in aarch32_sigpage_mremap()
78 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
248 aarch32_sigpage_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) aarch32_sigpage_mremap() argument
/kernel/linux/linux-6.6/arch/loongarch/kernel/
H A Dvdso.c43 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) in vdso_mremap() argument
45 current->mm->context.vdso = (void *)(new_vma->vm_start); in vdso_mremap()
/kernel/linux/linux-5.10/arch/loongarch/kernel/
H A Dvdso.c45 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) in vdso_mremap() argument
47 current->mm->context.vdso = (void *)(new_vma->vm_start); in vdso_mremap()
/kernel/linux/linux-6.6/arch/arm/kernel/
H A Dvdso.c51 struct vm_area_struct *new_vma) in vdso_mremap()
53 current->mm->context.vdso = new_vma->vm_start; in vdso_mremap()
50 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
H A Dprocess.c383 struct vm_area_struct *new_vma) in sigpage_mremap()
385 current->mm->context.sigpage = new_vma->vm_start; in sigpage_mremap()
382 sigpage_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) sigpage_mremap() argument
/kernel/linux/linux-6.6/arch/riscv/kernel/
H A Dvdso.c59 struct vm_area_struct *new_vma) in vdso_mremap()
61 current->mm->context.vdso = (void *)new_vma->vm_start; in vdso_mremap()
58 vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) vdso_mremap() argument
/kernel/linux/linux-6.6/include/linux/
H A Dmm_inline.h399 struct vm_area_struct *new_vma) in dup_anon_vma_name()
404 new_vma->anon_name = anon_vma_name_reuse(anon_name); in dup_anon_vma_name()
440 struct vm_area_struct *new_vma) {} in dup_anon_vma_name()
398 dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) dup_anon_vma_name() argument
439 dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) dup_anon_vma_name() argument
H A Dhugetlb.h131 struct vm_area_struct *new_vma,
348 struct vm_area_struct *new_vma, in move_hugetlb_page_tables()
347 move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) move_hugetlb_page_tables() argument
H A Dmm_types.h1224 struct vm_area_struct *new_vma);
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/
H A Di915_gem.c989 new_vma: in i915_gem_object_ggtt_pin_ww()
1006 goto new_vma; in i915_gem_object_ggtt_pin_ww()
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/
H A Di915_gem.c952 new_vma: in i915_gem_object_ggtt_pin_ww()
980 goto new_vma; in i915_gem_object_ggtt_pin_ww()

Completed in 39 milliseconds

12