Lines Matching refs:addr
510 static int smaps_pte_hole(unsigned long addr, unsigned long end,
517 linear_page_index(vma, addr),
526 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
531 smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
536 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
547 page = vm_normal_page(vma, addr, ptent);
572 smaps_pte_hole_lookup(addr, walk);
583 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
593 page = vm_normal_page_pmd(vma, addr, *pmd);
617 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
623 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
632 smaps_pmd_entry(pmd, addr, walk);
637 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
642 for (; addr != end; pte++, addr += PAGE_SIZE)
643 smaps_pte_entry(pte, addr, walk);
735 unsigned long addr, unsigned long end,
744 page = vm_normal_page(vma, addr, ptent);
1093 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1103 page = vm_normal_page(vma, addr, pte);
1110 unsigned long addr, pte_t *pte)
1123 if (pte_is_pinned(vma, addr, ptent))
1125 old_pte = ptep_modify_prot_start(vma, addr, pte);
1128 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1131 set_pte_at(vma->vm_mm, addr, pte, ptent);
1136 unsigned long addr, pte_t *pte)
1143 unsigned long addr, pmd_t *pmdp)
1149 old = pmdp_invalidate(vma, addr, pmdp);
1158 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1161 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1166 unsigned long addr, pmd_t *pmdp)
1171 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1183 clear_soft_dirty_pmd(vma, addr, pmd);
1193 pmdp_test_and_clear_young(vma, addr, pmd);
1201 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1206 for (; addr != end; pte++, addr += PAGE_SIZE) {
1210 clear_soft_dirty(vma, addr, pte);
1217 page = vm_normal_page(vma, addr, ptent);
1222 ptep_test_and_clear_young(vma, addr, pte);
1370 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1383 unsigned long addr = start;
1386 while (addr < end) {
1387 struct vm_area_struct *vma = find_vma(walk->mm, addr);
1397 for (; addr < hole_end; addr += PAGE_SIZE) {
1398 err = add_to_pagemap(addr, &pme, pm);
1409 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1410 err = add_to_pagemap(addr, &pme, pm);
1420 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1430 page = vm_normal_page(vma, addr, pte);
1473 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1503 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1516 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1534 for (; addr != end; addr += PAGE_SIZE) {
1537 err = add_to_pagemap(addr, &pme, pm);
1556 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1561 for (; addr < end; pte++, addr += PAGE_SIZE) {
1564 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1565 err = add_to_pagemap(addr, &pme, pm);
1579 unsigned long addr, unsigned long end,
1607 ((addr & ~hmask) >> PAGE_SHIFT);
1612 for (; addr != end; addr += PAGE_SIZE) {
1615 err = add_to_pagemap(addr, &pme, pm);
1832 unsigned long addr)
1840 page = vm_normal_page(vma, addr, pte);
1857 unsigned long addr)
1865 page = vm_normal_page_pmd(vma, addr, pmd);
1880 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1894 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1902 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1909 struct page *page = can_gather_numa_stats(ptent, vma, addr);
1914 } while (pte++, addr += PAGE_SIZE, addr != end);
1921 unsigned long addr, unsigned long end, struct mm_walk *walk)
1939 unsigned long addr, unsigned long end, struct mm_walk *walk)