Lines Matching defs:page

18  * Native page reclaim
20 * Lockless page tracking & accounting
35 #include <linux/page-flags.h>
282 * This page will be uncharged in obj_cgroup_release().
523 * mem_cgroup_css_from_page - css of the memcg associated with a page
524 * @page: page of interest
527 * with @page is returned. The returned css remains associated with @page
533 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
537 memcg = page->mem_cgroup;
546 * page_cgroup_ino - return inode number of the memcg a page is charged to
547 * @page: the page
549 * Look up the closest online ancestor of the memory cgroup @page is charged to
550 * and return its inode number or 0 if @page is not charged to any cgroup. It
551 * is safe to call this function without holding a reference to @page.
558 ino_t page_cgroup_ino(struct page *page)
564 memcg = page->mem_cgroup;
569 * In this case the page is shared and doesn't belong
584 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
586 int nid = page_to_nid(page);
598 soft_limit_tree_from_page(struct page *page)
600 int nid = page_to_nid(page);
688 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
694 mctz = soft_limit_tree_from_page(page);
702 mz = mem_cgroup_page_nodeinfo(memcg, page);
960 struct page *page,
963 /* pagein of a big page is an event. So, ignore page size */
1003 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1014 mem_cgroup_update_tree(memcg, page);
1022 * if it races with swapoff, page migration, etc.
1068 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1069 * @page: page from which memcg should be extracted.
1071 * Obtain a reference on page->memcg and returns it if successful. Otherwise
1074 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1076 struct mem_cgroup *memcg = page->mem_cgroup;
1353 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1354 * @page: the page
1355 * @pgdat: pgdat of the page
1357 * This function relies on page->mem_cgroup being stable - see the
1360 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1372 if (page_is_file_lru(page) &&
1373 !is_prot_page(page)) {
1378 memcg = page->mem_cgroup;
1386 mz = mem_cgroup_page_nodeinfo(memcg, page);
1400 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1402 * @lru: index of lru list the page is sitting on
1406 * This function must be called under lru_lock, just before a page is added
1407 * to or just after a page is removed from an lru list (that ordering being
1645 /* The above should easily fit into one page */
1966 * the task to sleep at the end of the page fault when all locks are
2011 * This has to be called at the end of a page fault if the memcg OOM
2019 * the end of the page fault to complete the OOM handling.
2142 * lock_page_memcg - lock a page->mem_cgroup binding
2143 * @page: the page
2149 * for the lifetime of the page; __unlock_page_memcg() is available
2150 * when @page might get freed inside the locked section.
2152 struct mem_cgroup *lock_page_memcg(struct page *page)
2154 struct page *head = compound_head(page); /* rmap on tail pages */
2161 * because page moving starts with an RCU grace period.
2164 * the page state that is going to change is the only thing
2165 * preventing the page itself from being freed. E.g. writeback
2166 * doesn't hold a page reference and relies on PG_writeback to
2189 * unlocked page stat updates happening concurrently. Track
2220 * unlock_page_memcg - unlock a page->mem_cgroup binding
2221 * @page: the page
2223 void unlock_page_memcg(struct page *page)
2225 struct page *head = compound_head(page);
2548 * it was a threshold of 1 page
2649 * This is distinct from memory.max or page allocator behaviour because
2650 * memory.high is currently batched, whereas memory.max and the page
2904 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2906 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2908 * Any of the following ensures page->mem_cgroup stability:
2910 * - the page lock
2915 page->mem_cgroup = memcg;
2927 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2930 unsigned int objects = objs_per_slab_page(s, page);
2935 page_to_nid(page));
2939 if (cmpxchg(&page->obj_cgroups, NULL,
2956 struct page *page;
2961 page = virt_to_head_page(p);
2964 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
2967 * The page->mem_cgroup pointer can be asynchronously changed
2971 if (!page->mem_cgroup)
2975 * Slab objects are accounted individually, not per-page.
2977 * the page->obj_cgroups.
2979 if (page_has_obj_cgroups(page)) {
2983 off = obj_to_index(page->slab_cache, page, p);
2984 objcg = page_obj_cgroups(page)[off];
2991 /* All other pages use page->mem_cgroup */
2992 return page->mem_cgroup;
3113 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3114 * @page: page to charge
3120 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3129 page->mem_cgroup = memcg;
3130 __SetPageKmemcg(page);
3139 * __memcg_kmem_uncharge_page: uncharge a kmem page
3140 * @page: page to uncharge
3143 void __memcg_kmem_uncharge_page(struct page *page, int order)
3145 struct mem_cgroup *memcg = page->mem_cgroup;
3151 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3153 page->mem_cgroup = NULL;
3157 if (PageKmemcg(page))
3158 __ClearPageKmemcg(page);
3309 void split_page_memcg(struct page *head, unsigned int nr)
4677 * trackes ownership per-page while the latter per-inode. This was a
4678 * deliberate design decision because honoring per-page ownership in the
4704 * page - a page whose memcg and writeback ownerships don't match - is
4706 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4717 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4720 struct mem_cgroup *memcg = page->mem_cgroup;
4727 trace_track_foreign_dirty(page, wb);
5183 * Swap-out records and page cache shadow entries need to store memcg
5189 * the cgroup has been destroyed, such as page cache or reclaimable
5645 struct page *page;
5656 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5659 struct page *page = vm_normal_page(vma, addr, ptent);
5661 if (!page || !page_mapped(page))
5663 if (PageAnon(page)) {
5670 if (!get_page_unless_zero(page))
5673 return page;
5677 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5680 struct page *page = NULL;
5687 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5689 * as special swap entry in the CPU page table.
5692 page = device_private_entry_to_page(ent);
5694 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5695 * a refcount of 1 when free (unlike normal page)
5697 if (!page_ref_add_unless(page, 1, 1))
5699 return page;
5709 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5712 return page;
5715 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5722 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5730 /* page is moved even if it's not RSS of this task(page-faulted). */
5731 /* shmem/tmpfs may report page out on swap: account for that too. */
5737 * mem_cgroup_move_account - move account of the page
5738 * @page: the page
5739 * @compound: charge the page as compound or small page
5740 * @from: mem_cgroup which the page is moved from.
5741 * @to: mem_cgroup which the page is moved to. @from != @to.
5743 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5748 static int mem_cgroup_move_account(struct page *page,
5755 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5759 VM_BUG_ON_PAGE(PageLRU(page), page);
5760 VM_BUG_ON(compound && !PageTransHuge(page));
5764 * page->mem_cgroup of its source page while we change it.
5767 if (!trylock_page(page))
5771 if (page->mem_cgroup != from)
5774 pgdat = page_pgdat(page);
5778 lock_page_memcg(page);
5780 if (PageAnon(page)) {
5781 if (page_mapped(page)) {
5784 if (PageTransHuge(page)) {
5794 if (PageSwapBacked(page)) {
5799 if (page_mapped(page)) {
5804 if (PageDirty(page)) {
5805 struct address_space *mapping = page_mapping(page);
5816 if (PageWriteback(page)) {
5824 * It is safe to change page->mem_cgroup here because the page
5827 * that would rely on a stable page->mem_cgroup.
5829 * Note that lock_page_memcg is a memcg lock, not a page lock,
5830 * to save space. As soon as we switch page->mem_cgroup to a
5839 page->mem_cgroup = to;
5846 mem_cgroup_charge_statistics(to, page, nr_pages);
5847 memcg_check_events(to, page);
5848 mem_cgroup_charge_statistics(from, page, -nr_pages);
5849 memcg_check_events(from, page);
5852 unlock_page(page);
5862 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5866 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5867 * move charge. if @target is not NULL, the page is stored in target->page
5872 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5873 * (so ZONE_DEVICE page and thus not on the lru).
5874 * For now we such page is charge like a regular page would be as for all
5876 * regular page.
5886 struct page *page = NULL;
5891 page = mc_handle_present_pte(vma, addr, ptent);
5893 page = mc_handle_swap_pte(vma, ptent, &ent);
5895 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5897 if (!page && !ent.val)
5899 if (page) {
5902 * mem_cgroup_move_account() checks the page is valid or
5905 if (page->mem_cgroup == mc.from) {
5907 if (is_device_private_page(page))
5910 target->page = page;
5913 put_page(page);
5916 * There is a swap entry and a page doesn't exist or isn't charged.
5917 * But we cannot move a tail-page in a THP.
5919 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5937 struct page *page = NULL;
5945 page = pmd_page(pmd);
5946 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5949 if (page->mem_cgroup == mc.from) {
5952 get_page(page);
5953 target->page = page;
5978 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6172 struct page *page;
6182 page = target.page;
6183 if (!isolate_lru_page(page)) {
6184 if (!mem_cgroup_move_account(page, true,
6189 putback_lru_page(page);
6191 put_page(page);
6193 page = target.page;
6194 if (!mem_cgroup_move_account(page, true,
6199 put_page(page);
6222 page = target.page;
6229 if (PageTransCompound(page))
6231 if (!device && isolate_lru_page(page))
6233 if (!mem_cgroup_move_account(page, false,
6240 putback_lru_page(page);
6241 put: /* get_mctgt_type() gets the page */
6242 put_page(page);
6304 * additional charge, the page walk just aborts.
6868 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6869 * @page: page to charge
6873 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6878 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6880 unsigned int nr_pages = thp_nr_pages(page);
6887 if (PageSwapCache(page)) {
6888 swp_entry_t ent = { .val = page_private(page), };
6892 * Every swap fault against a single page tries to charge the
6893 * page, bail as early as possible. shmem_unuse() encounters
6894 * already charged pages, too. page->mem_cgroup is protected
6895 * by the page lock, which serializes swap cache removal, which
6898 VM_BUG_ON_PAGE(!PageLocked(page), page);
6899 if (compound_head(page)->mem_cgroup)
6918 commit_charge(page, memcg);
6921 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6922 memcg_check_events(memcg, page);
6927 * new swapcache page, finish the transfer by uncharging the swap
6929 * it can stick around indefinitely and we'd count the page twice
6934 * correspond 1:1 to page and swap slot lifetimes: we charge the
6935 * page to memory here, and uncharge swap when the slot is freed.
6937 if (do_memsw_account() && PageSwapCache(page)) {
6938 swp_entry_t entry = { .val = page_private(page) };
6941 * let's not wait for it. The page already received a
6958 struct page *dummy_page;
6989 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6993 VM_BUG_ON_PAGE(PageLRU(page), page);
6995 if (!page->mem_cgroup)
7000 * page->mem_cgroup at this point, we have fully
7001 * exclusive access to the page.
7004 if (ug->memcg != page->mem_cgroup) {
7009 ug->memcg = page->mem_cgroup;
7015 nr_pages = compound_nr(page);
7018 if (!PageKmemcg(page)) {
7022 __ClearPageKmemcg(page);
7025 ug->dummy_page = page;
7026 page->mem_cgroup = NULL;
7038 * Note that the list can be a single page->lru; hence the
7043 struct page *page;
7045 page = list_entry(next, struct page, lru);
7046 next = page->lru.next;
7048 uncharge_page(page, &ug);
7056 * mem_cgroup_uncharge - uncharge a page
7057 * @page: page to uncharge
7059 * Uncharge a page previously charged with mem_cgroup_charge().
7061 void mem_cgroup_uncharge(struct page *page)
7068 /* Don't touch page->lru of any random page, pre-check: */
7069 if (!page->mem_cgroup)
7073 uncharge_page(page, &ug);
7078 * mem_cgroup_uncharge_list - uncharge a list of page
7094 * mem_cgroup_migrate - charge a page's replacement
7095 * @oldpage: currently circulating page
7096 * @newpage: replacement page
7098 * Charge @newpage as a replacement page for @oldpage. @oldpage will
7103 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
7118 /* Page cache replacement: new page already charged? */
7127 /* Force-charge the new page. The old one will be freed soon */
7303 * @page: page whose memsw charge to transfer
7306 * Transfer the memsw charge of @page to @entry.
7308 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7314 VM_BUG_ON_PAGE(PageLRU(page), page);
7315 VM_BUG_ON_PAGE(page_count(page), page);
7320 memcg = page->mem_cgroup;
7322 /* Readahead page, never charged */
7332 nr_entries = thp_nr_pages(page);
7338 VM_BUG_ON_PAGE(oldid, page);
7341 page->mem_cgroup = NULL;
7359 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7360 memcg_check_events(memcg, page);
7366 * mem_cgroup_try_charge_swap - try charging swap space for a page
7367 * @page: page being added to swap
7370 * Try to charge @page's memcg for the swap space at @entry.
7374 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7376 unsigned int nr_pages = thp_nr_pages(page);
7384 memcg = page->mem_cgroup;
7386 /* Readahead page, never charged */
7409 VM_BUG_ON_PAGE(oldid, page);
7454 bool mem_cgroup_swap_full(struct page *page)
7458 VM_BUG_ON_PAGE(!PageLocked(page), page);
7465 memcg = page->mem_cgroup;