Lines Matching defs:address
211 .address = addr,
224 linear_page_index(vma, pvmw.address);
262 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
264 hugepage_add_anon_rmap(new, vma, pvmw.address);
270 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
273 page_add_anon_rmap(new, vma, pvmw.address, false);
284 update_mmu_cache(vma, pvmw.address, pvmw.pte);
346 unsigned long address)
349 pte_t *ptep = pte_offset_map(pmd, address);
892 /* No write method for the address space */
1630 * Resolves the given address to a struct page, isolates it from the LRU and
1731 * Migrate an array of page address onto an array of nodes and fill
1937 * Move a list of pages in the address space of the currently executing
2149 unsigned long address,
2157 unsigned long start = address & HPAGE_PMD_MASK;
2180 /* flush the cache before copying using the kernel virtual address */
2232 update_mmu_cache_pmd(vma, address, &entry);
2264 update_mmu_cache_pmd(vma, address, &entry);
2511 * This will walk the CPU page table. For each virtual address backed by a
2762 * Prepare to migrate a range of memory virtual address range by collecting all
2763 * the pages backing each virtual address in the range, saving them inside the
2787 * address. For this the caller simply has to allocate device memory and