Lines Matching refs:new_addr

139 		unsigned long new_addr, bool need_rmap_locks)
178 new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
190 new_pte++, new_addr += PAGE_SIZE) {
208 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
210 set_pte_at(mm, new_addr, new_pte, pte);
237 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
296 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
305 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
345 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
354 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
384 set_pud_at(mm, new_addr, new_pud, pud);
394 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
416 unsigned long new_addr)
441 next = (new_addr + size) & mask;
442 if (extent > next - new_addr)
443 extent = next - new_addr;
452 unsigned long old_addr, unsigned long new_addr,
463 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
467 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
472 move_huge_pmd(vma, old_addr, new_addr, old_entry,
477 move_huge_pud(vma, old_addr, new_addr, old_entry,
494 unsigned long new_addr, unsigned long len,
509 new_addr, len);
516 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
522 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
527 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
532 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
539 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
544 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
548 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
555 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
565 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
574 new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
585 unsigned long new_len, unsigned long new_addr,
640 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
648 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
662 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
666 old_addr = new_addr;
667 new_addr = err;
714 return new_addr;
743 return new_addr;
801 unsigned long new_addr, unsigned long new_len, bool *locked,
811 if (offset_in_page(new_addr))
814 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
818 if (addr + old_len > new_addr && new_addr + new_len > addr)
825 * (new_addr, and old_addr), because userspace will not know the
829 * Worst-scenario case is when both vma's (new_addr and old_addr) get
839 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
870 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
878 new_addr = ret;
880 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
910 unsigned long, new_addr)
979 if (new_addr & ~huge_page_mask(h))
991 ret = mremap_to(addr, old_len, new_addr, new_len,
1070 new_addr = addr;
1087 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1091 if (IS_ERR_VALUE(new_addr)) {
1092 ret = new_addr;
1096 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1104 mm_populate(new_addr + old_len, new_len - old_len);