Lines Matching defs:page

13  * Handles page cache pages in various states.	The tricky part
14 * here is that we can access any page asynchronously in respect to
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/mm/page-types when running a real workload.
41 #include <linux/page-flags.h>
149 * 1: the page is dissolved (if needed) and taken off from buddy,
150 * 0: the page is dissolved (if needed) and not taken off from buddy,
153 static int __page_handle_poison(struct page *page)
157 zone_pcp_disable(page_zone(page));
158 ret = dissolve_free_huge_page(page);
160 ret = take_page_off_buddy(page);
161 zone_pcp_enable(page_zone(page));
166 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
173 if (__page_handle_poison(page) <= 0)
175 * We could fail to take off the target page from buddy
176 * for example due to racy page allocation, but that's
177 * acceptable because soft-offlined page is not broken
184 SetPageHWPoison(page);
186 put_page(page);
187 page_ref_inc(page);
188 num_poisoned_pages_inc(page_to_pfn(page));
206 static int hwpoison_filter_dev(struct page *p)
230 static int hwpoison_filter_flags(struct page *p)
249 * can only guarantee that the page either belongs to the memcg tasks, or is
250 * a freed page.
255 static int hwpoison_filter_task(struct page *p)
266 static int hwpoison_filter_task(struct page *p) { return 0; }
269 int hwpoison_filter(struct page *p)
286 int hwpoison_filter(struct page *p)
295 * Kill all processes that have a poisoned page mapped and then isolate
296 * the page.
299 * Find all processes having the page mapped and kill them.
300 * But we keep a page reference around so that the page is not
302 * Then stash the page away
324 * Send all the processes who have the page mapped a signal.
342 * Signal other processes sharing the page if they have
358 * Unknown page type encountered. Try to check whether it can turn PageLRU by
361 void shake_page(struct page *p)
425 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
427 * memory_failure event. In all other cases, page->index and
428 * page->mapping are sufficient for mapping the page back to its
431 static void __add_to_kill(struct task_struct *tsk, struct page *p,
455 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
457 * contain mappings for the page, but at least one VMA does.
459 * has a mapping for the page.
474 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
494 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
523 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
571 * to be signaled when some page under the process is hwpoisoned.
578 * processes sharing the same error page,if the process is "early kill", the
596 * Collect processes when the error hit an anonymous page.
598 static void collect_procs_anon(struct folio *folio, struct page *page,
610 pgoff = page_to_pgoff(page);
623 if (!page_mapped_in_vma(page, vma))
625 add_to_kill_anon_file(t, page, vma, to_kill);
633 * Collect processes when the error hit a file mapped page.
635 static void collect_procs_file(struct folio *folio, struct page *page,
645 pgoff = page_to_pgoff(page);
655 * the page but the corrupted page is not necessarily
661 add_to_kill_anon_file(t, page, vma, to_kill);
669 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
677 * Collect processes when the error hit a fsdax page.
679 static void collect_procs_fsdax(struct page *page,
695 add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
704 * Collect the processes who have the corrupted page mapped to kill.
706 static void collect_procs(struct folio *folio, struct page *page,
711 if (unlikely(PageKsm(page)))
712 collect_procs_ksm(page, tokill, force_early);
713 else if (PageAnon(page))
714 collect_procs_anon(folio, page, tokill, force_early);
716 collect_procs_file(folio, page, tokill, force_early);
837 * memory_failure() failed to unmap the error page at the first call, or
841 * so this function walks page table to find it. The returned virtual address
843 * process has multiple entries mapping the error page.
876 [MF_MSG_KERNEL] = "reserved kernel page",
877 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
878 [MF_MSG_SLAB] = "kernel slab page",
879 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
880 [MF_MSG_HUGE] = "huge page",
881 [MF_MSG_FREE_HUGE] = "free huge page",
882 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
883 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
884 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
885 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
886 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
887 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
888 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
889 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
890 [MF_MSG_CLEAN_LRU] = "clean LRU page",
891 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
892 [MF_MSG_BUDDY] = "free buddy page",
893 [MF_MSG_DAX] = "dax page",
895 [MF_MSG_UNKNOWN] = "unknown page",
899 * XXX: It is possible that a page is isolated from LRU cache,
900 * and then kept in swap cache or failed to remove from page cache.
901 * The page count will stop it from being freed by unpoison.
904 static int delete_from_lru_cache(struct page *p)
908 * Clear sensible page flags, so that the buddy system won't
909 * complain when the page is unpoison-and-freed.
915 * Poisoned page might never drop its ref count to 0 so we have
921 * drop the page count elevated by isolate_lru_page()
929 static int truncate_error_page(struct page *p, unsigned long pfn,
939 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
963 /* Callback ->action() has to unlock the relevant page inside it. */
964 int (*action)(struct page_state *ps, struct page *p);
968 * Return true if page is still referenced by others, otherwise return
973 static bool has_extra_refcount(struct page_state *ps, struct page *p,
991 * Error hit kernel page.
995 static int me_kernel(struct page_state *ps, struct page *p)
1004 static int me_unknown(struct page_state *ps, struct page *p)
1006 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
1012 * Clean (or cleaned) page cache page.
1014 static int me_pagecache_clean(struct page_state *ps, struct page *p)
1032 * Now truncate the page in the page cache. This is really
1048 * The shmem page is kept in page cache instead of truncating
1069 * Dirty pagecache page
1070 * Issues: when the error hit a hole page the error is not properly
1073 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
1092 * and then through the PageError flag in the page.
1099 * when the page is reread or dropped. If an
1102 * and the page is dropped between then the error
1107 * report through AS_EIO) or when the page is dropped
1123 * Dirty swap cache page is tricky to handle. The page could live both in page
1124 * cache and swap cache(ie. page is freshly swapped in). So it could be
1132 * a later page fault, we know the application is accessing
1136 * Clean swap cache pages can be directly isolated. A later page fault will
1139 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1160 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1179 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1180 * To narrow down kill region to one page, we need to break up pmd.
1182 static int me_huge_page(struct page_state *ps, struct page *p)
1185 struct page *hpage = compound_head(p);
1192 /* The page is kept in page cache. */
1218 * Various page states we can handle.
1220 * A page state is defined by its current page->flags bits.
1223 * This is quite tricky because we can access page at any time
1247 * Could in theory check if slab page is free or if we can drop
1317 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1334 static int page_action(struct page_state *ps, struct page *p,
1339 /* page p should be unlocked after returning from ps->action(). */
1342 /* Could do more checks here if page looks ok */
1344 * Could adjust zone counters here to correct for the missing page.
1350 static inline bool PageHWPoisonTakenOff(struct page *page)
1352 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1355 void SetPageHWPoisonTakenOff(struct page *page)
1357 set_page_private(page, MAGIC_HWPOISON);
1360 void ClearPageHWPoisonTakenOff(struct page *page)
1362 if (PageHWPoison(page))
1363 set_page_private(page, 0);
1367 * Return true if a page type of a given page is supported by hwpoison
1372 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1375 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1378 return PageLRU(page) || is_free_buddy_page(page);
1381 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1383 struct folio *folio = page_folio(page);
1390 if (folio == page_folio(page))
1394 folio = page_folio(page);
1403 if (!HWPoisonHandlable(&folio->page, flags))
1407 if (folio == page_folio(page))
1410 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1417 static int get_any_page(struct page *p, unsigned long flags)
1444 * page, retry.
1459 * A page we cannot handle. Check whether we can turn
1473 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1478 static int __get_unpoison_page(struct page *page)
1480 struct folio *folio = page_folio(page);
1487 if (folio == page_folio(page))
1498 if (PageHWPoisonTakenOff(page))
1501 return get_page_unless_zero(page) ? 1 : 0;
1506 * @p: Raw error page (hit by memory error)
1509 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1510 * error on it, after checking that the error page is in a well-defined state
1511 * (defined as a page-type we can successfully handle the memory error on it,
1512 * such as LRU page and hugetlb page).
1514 * Memory error handling could be triggered at any time on any type of page,
1517 * extra care for the error page's state (as done in __get_hwpoison_page()),
1521 * the given page has PG_hwpoison. So it's never reused for other page
1527 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1529 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1531 static int get_hwpoison_page(struct page *p, unsigned long flags)
1549 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1550 int flags, struct page *hpage)
1577 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1582 * Propagate the dirty bit from PTEs to struct page first, because we
1583 * need this to decide if we should kill or just drop the page.
1585 * be called inside page lock (it's recommended but not enforced).
1594 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1600 * First collect all the processes that have the page
1619 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1626 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1630 * try_to_unmap() might put mlocked page in lru cache, so call
1638 * struct page and all unmaps done we can decide if
1639 * killing is needed or not. Only kill when the page
1653 static int identify_page_state(unsigned long pfn, struct page *p,
1659 * The first check uses the current page flags which may not have any
1660 * relevant information. The second check with the saved page flags is
1661 * carried out only if the first check can't determine the page status.
1676 static int try_to_split_thp_page(struct page *page)
1680 lock_page(page);
1681 ret = split_huge_page(page);
1682 unlock_page(page);
1685 put_page(page);
1718 * The fsdax pages are initialized per base page, and the devdax pages
1721 * hwpoison, such that, if a subpage of a compound page is poisoned,
1722 * simply mark the compound head page is by far sufficient.
1735 * lock_page(), but dax pages do not use the page lock. This
1743 if (hwpoison_filter(&folio->page)) {
1762 * Use this flag as an indication that the dax page has been
1765 SetPageHWPoison(&folio->page);
1769 * different physical page at a given virtual address, so all
1774 collect_procs(folio, &folio->page, &to_kill, true);
1795 struct page *page;
1801 page = NULL;
1802 cookie = dax_lock_mapping_entry(mapping, index, &page);
1805 if (!page)
1808 SetPageHWPoison(page);
1810 collect_procs_fsdax(page, mapping, index, &to_kill);
1811 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1824 * Struct raw_hwp_page represents information about "raw error page",
1829 struct page *page;
1837 bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1841 struct folio *folio = page_folio(page);
1848 return PageHWPoison(page);
1861 if (page == p->page) {
1881 SetPageHWPoison(p->page);
1883 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1890 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
1906 if (p->page == page)
1912 raw_hwp->page = page;
1916 num_poisoned_pages_inc(page_to_pfn(page));
1975 struct page *page = pfn_to_page(pfn);
1976 struct folio *folio = page_folio(page);
1977 int ret = 2; /* fallback to normal page handling */
1998 if (folio_set_hugetlb_hwpoison(folio, page)) {
2028 struct page *p = pfn_to_page(pfn);
2036 if (res == 2) { /* fallback to normal page handling */
2084 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2107 struct page *page;
2112 page = pfn_to_page(pfn);
2113 if (page)
2114 put_page(page);
2150 * memory_failure - Handle memory failure of a page.
2151 * @pfn: Page Number of the corrupted page
2156 * of a page. It tries its best to recover, which includes
2172 struct page *p;
2173 struct page *hpage;
2181 panic("Memory failure on page %lx", pfn);
2225 * 1) it's a free page, and therefore in safe hand:
2227 * 2) it's part of a non-compound high order page.
2229 * R/W the page; let's pray that the page has been
2231 * In fact it's dangerous to directly bump up page count from 0,
2272 * get_hwpoison_page() since they handle either free page
2273 * or unhandlable page. The refcount is bumped iff the
2274 * page is a valid handlable page.
2290 * walked by the page reclaim code, however that's not a big loss.
2297 * We're only intended to deal with the non-Compound page here.
2298 * However, the page could have changed compound pages due to
2300 * handle the page next round.
2316 * We use page flags to determine what action should be taken, but
2318 * example is an mlocked page, where PG_mlocked is cleared by
2319 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
2320 * correctly, we save a copy of the page flags at this time.
2333 * __munlock_folio() may clear a writeback page's LRU flag without
2334 * page_lock. We need wait writeback completion for this page or it
2348 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2393 * memory_failure_queue - Schedule handling memory failure of a page.
2394 * @pfn: Page Number of the corrupted page
2398 * when it detects hardware memory corruption of a page. It schedules
2399 * the recovering of error page, including dropping pages, killing
2490 * unpoison_memory - Unpoison a previously poisoned page
2491 * @pfn: Page number of the to be unpoisoned page
2493 * Software-unpoison a page that has been poisoned by
2504 struct page *p;
2533 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2538 if (folio_test_slab(folio) || PageTable(&folio->page) ||
2539 folio_test_reserved(folio) || PageOffline(&folio->page))
2547 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2553 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2572 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2597 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2604 static bool isolate_page(struct page *page, struct list_head *pagelist)
2608 if (PageHuge(page)) {
2609 isolated = isolate_hugetlb(page_folio(page), pagelist);
2611 bool lru = !__PageMovable(page);
2614 isolated = isolate_lru_page(page);
2616 isolated = isolate_movable_page(page,
2620 list_add(&page->lru, pagelist);
2622 inc_node_page_state(page, NR_ISOLATED_ANON +
2623 page_is_file_lru(page));
2628 * If we succeed to isolate the page, we grabbed another refcount on
2629 * the page, so we can safely drop the one we got from get_any_page().
2630 * If we failed to isolate the page, it means that we cannot go further
2634 put_page(page);
2640 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2641 * If the page is mapped, it migrates the contents over.
2643 static int soft_offline_in_use_page(struct page *page)
2646 unsigned long pfn = page_to_pfn(page);
2647 struct page *hpage = compound_head(page);
2648 char const *msg_page[] = {"page", "hugepage"};
2649 bool huge = PageHuge(page);
2657 if (try_to_split_thp_page(page)) {
2661 hpage = page;
2664 lock_page(page);
2666 wait_on_page_writeback(page);
2667 if (PageHWPoison(page)) {
2668 unlock_page(page);
2669 put_page(page);
2670 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2674 if (!huge && PageLRU(page) && !PageSwapCache(page))
2677 * non dirty unmapped page cache pages.
2679 ret = invalidate_inode_page(page);
2680 unlock_page(page);
2684 page_handle_poison(page, false, true);
2694 if (!page_handle_poison(page, huge, release))
2701 pfn, msg_page[huge], ret, &page->flags);
2706 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2707 pfn, msg_page[huge], page_count(page), &page->flags);
2714 * soft_offline_page - Soft offline a page.
2722 * Soft offline a page, by migration or invalidation,
2724 * a page is not corrupted yet (so it's still valid to access),
2741 struct page *page;
2749 page = pfn_to_online_page(pfn);
2750 if (!page) {
2757 if (PageHWPoison(page)) {
2758 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2766 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2769 if (hwpoison_filter(page)) {
2771 put_page(page);
2778 ret = soft_offline_in_use_page(page);
2780 if (!page_handle_poison(page, true, false)) {