Lines Matching defs:page

383  * page is divided by the number of processes sharing it.  So if a
391 * A shift of 12 before division means (assuming 4K page size):
425 struct page *page, unsigned long size, unsigned long pss,
430 if (PageAnon(page))
432 else if (PageSwapBacked(page))
440 if (dirty || PageDirty(page)) {
454 static void smaps_account(struct mem_size_stats *mss, struct page *page,
458 int i, nr = compound ? compound_nr(page) : 1;
463 * of the compound page.
465 if (PageAnon(page)) {
467 if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
471 if (PageKsm(page))
476 if (young || page_is_young(page) || PageReferenced(page))
481 * differ page-by-page.
483 * page_count(page) == 1 guarantees the page is mapped exactly once.
484 * If any subpage of the compound page mapped with PTE it would elevate
488 * Without holding the page lock this snapshot can be slightly wrong as
490 * call page_mapcount() even with PTL held if the page is not mapped,
494 if ((page_count(page) == 1) || migration) {
495 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
499 for (i = 0; i < nr; i++, page++) {
500 int mapcount = page_mapcount(page);
504 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
542 struct page *page = NULL;
547 page = vm_normal_page(vma, addr, ptent);
569 page = pfn_swap_entry_to_page(swpent);
576 if (!page)
579 smaps_account(mss, page, false, young, dirty, locked, migration);
589 struct page *page = NULL;
593 page = vm_normal_page_pmd(vma, addr, *pmd);
599 page = pfn_swap_entry_to_page(entry);
602 if (IS_ERR_OR_NULL(page))
604 if (PageAnon(page))
606 else if (PageSwapBacked(page))
608 else if (is_zone_device_page(page))
613 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
740 struct page *page = NULL;
744 page = vm_normal_page(vma, addr, ptent);
749 page = pfn_swap_entry_to_page(swpent);
751 if (page) {
752 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
798 * object, so we have to distinguish them during the page walk.
1095 struct page *page;
1103 page = vm_normal_page(vma, addr, pte);
1104 if (!page)
1106 return page_maybe_dma_pinned(page);
1178 struct page *page;
1190 page = pmd_page(*pmd);
1194 test_and_clear_page_young(page);
1195 ClearPageReferenced(page);
1217 page = vm_normal_page(vma, addr, ptent);
1218 if (!page)
1223 test_and_clear_page_young(page);
1224 ClearPageReferenced(page);
1423 struct page *page = NULL;
1430 page = vm_normal_page(vma, addr, pte);
1458 page = pfn_swap_entry_to_page(entry);
1463 if (page && !PageAnon(page))
1465 if (page && !migration && page_mapcount(page) == 1)
1488 struct page *page = NULL;
1494 page = pmd_page(pmd);
1527 page = pfn_swap_entry_to_page(entry);
1531 if (page && !migration && page_mapcount(page) == 1)
1593 struct page *page = pte_page(pte);
1595 if (!PageAnon(page))
1598 if (page_mapcount(page) == 1)
1640 * For each page in the address space, this file contains one 64-bit entry
1643 * Bits 0-54 page frame number (PFN) if present
1647 * Bit 56 page exclusively mapped
1650 * Bit 61 page is file-page or shared-anon
1651 * Bit 62 page swapped
1652 * Bit 63 page present
1654 * If the page is not present but in swap, then the PFN contains an
1655 * encoding of the swap file number and the page's offset into the
1804 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1807 int count = page_mapcount(page);
1810 if (pte_dirty || PageDirty(page))
1813 if (PageSwapCache(page))
1816 if (PageActive(page) || PageUnevictable(page))
1819 if (PageWriteback(page))
1822 if (PageAnon(page))
1828 md->node[page_to_nid(page)] += nr_pages;
1831 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1834 struct page *page;
1840 page = vm_normal_page(vma, addr, pte);
1841 if (!page || is_zone_device_page(page))
1844 if (PageReserved(page))
1847 nid = page_to_nid(page);
1851 return page;
1855 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1859 struct page *page;
1865 page = vm_normal_page_pmd(vma, addr, pmd);
1866 if (!page)
1869 if (PageReserved(page))
1872 nid = page_to_nid(page);
1876 return page;
1892 struct page *page;
1894 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1895 if (page)
1896 gather_stats(page, md, pmd_dirty(*pmd),
1909 struct page *page = can_gather_numa_stats(ptent, vma, addr);
1910 if (!page)
1912 gather_stats(page, md, pte_dirty(ptent), 1);
1925 struct page *page;
1930 page = pte_page(huge_pte);
1933 gather_stats(page, md, pte_dirty(huge_pte), 1);