Lines Matching defs:page
388 * page is divided by the number of processes sharing it. So if a
396 * A shift of 12 before division means (assuming 4K page size):
429 struct page *page, unsigned long size, unsigned long pss,
434 if (PageAnon(page))
436 else if (PageSwapBacked(page))
444 if (dirty || PageDirty(page)) {
457 static void smaps_account(struct mem_size_stats *mss, struct page *page,
461 int i, nr = compound ? compound_nr(page) : 1;
466 * of the compound page.
468 if (PageAnon(page)) {
470 if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
476 if (young || page_is_young(page) || PageReferenced(page))
481 * differ page-by-page.
483 * page_count(page) == 1 guarantees the page is mapped exactly once.
484 * If any subpage of the compound page mapped with PTE it would elevate
488 * Without holding the page lock this snapshot can be slightly wrong as
490 * call page_mapcount() even with PTL held if the page is not mapped,
494 if ((page_count(page) == 1) || migration) {
495 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
499 for (i = 0; i < nr; i++, page++) {
500 int mapcount = page_mapcount(page);
504 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
530 struct page *page = NULL;
534 page = vm_normal_page(vma, addr, *pte);
555 page = migration_entry_to_page(swpent);
557 page = device_private_entry_to_page(swpent);
560 page = xa_load(&vma->vm_file->f_mapping->i_pages,
562 if (xa_is_value(page))
567 if (!page)
570 smaps_account(mss, page, false, young, dirty, locked, migration);
580 struct page *page = NULL;
584 /* FOLL_DUMP will return -EFAULT on huge zero page */
585 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
591 page = migration_entry_to_page(entry);
594 if (IS_ERR_OR_NULL(page))
596 if (PageAnon(page))
598 else if (PageSwapBacked(page))
600 else if (is_zone_device_page(page))
605 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
729 struct page *page = NULL;
732 page = vm_normal_page(vma, addr, *pte);
737 page = migration_entry_to_page(swpent);
739 page = device_private_entry_to_page(swpent);
741 if (page) {
742 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
789 * object, so we have to distinguish them during the page walk.
1083 struct page *page;
1091 page = vm_normal_page(vma, addr, pte);
1092 if (!page)
1094 return page_maybe_dma_pinned(page);
1166 struct page *page;
1178 page = pmd_page(*pmd);
1182 test_and_clear_page_young(page);
1183 ClearPageReferenced(page);
1204 page = vm_normal_page(vma, addr, ptent);
1205 if (!page)
1210 test_and_clear_page_young(page);
1211 ClearPageReferenced(page);
1408 struct page *page = NULL;
1415 page = vm_normal_page(vma, addr, pte);
1429 page = migration_entry_to_page(entry);
1433 page = device_private_entry_to_page(entry);
1436 if (page && !PageAnon(page))
1438 if (page && !migration && page_mapcount(page) == 1)
1461 struct page *page = NULL;
1467 page = pmd_page(pmd);
1492 page = migration_entry_to_page(entry);
1496 if (page && !migration && page_mapcount(page) == 1)
1557 struct page *page = pte_page(pte);
1559 if (!PageAnon(page))
1562 if (page_mapcount(page) == 1)
1598 * For each page in the address space, this file contains one 64-bit entry
1601 * Bits 0-54 page frame number (PFN) if present
1605 * Bit 56 page exclusively mapped
1607 * Bit 61 page is file-page or shared-anon
1608 * Bit 62 page swapped
1609 * Bit 63 page present
1611 * If the page is not present but in swap, then the PFN contains an
1612 * encoding of the swap file number and the page's offset into the
1756 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1759 int count = page_mapcount(page);
1762 if (pte_dirty || PageDirty(page))
1765 if (PageSwapCache(page))
1768 if (PageActive(page) || PageUnevictable(page))
1771 if (PageWriteback(page))
1774 if (PageAnon(page))
1780 md->node[page_to_nid(page)] += nr_pages;
1783 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1786 struct page *page;
1792 page = vm_normal_page(vma, addr, pte);
1793 if (!page)
1796 if (PageReserved(page))
1799 nid = page_to_nid(page);
1803 return page;
1807 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1811 struct page *page;
1817 page = vm_normal_page_pmd(vma, addr, pmd);
1818 if (!page)
1821 if (PageReserved(page))
1824 nid = page_to_nid(page);
1828 return page;
1844 struct page *page;
1846 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1847 if (page)
1848 gather_stats(page, md, pmd_dirty(*pmd),
1859 struct page *page = can_gather_numa_stats(*pte, vma, addr);
1860 if (!page)
1862 gather_stats(page, md, pte_dirty(*pte), 1);
1875 struct page *page;
1880 page = pte_page(huge_pte);
1881 if (!page)
1885 gather_stats(page, md, pte_dirty(huge_pte), 1);