Lines Matching defs:page

127  * corresponding page
185 /* Do not discard the swap header page! */
231 sector_t swap_page_sector(struct page *page)
233 struct swap_info_struct *sis = page_swap_info(page);
238 offset = __page_file_index(page);
580 * counter becomes 0, which means no page in the cluster is in using, we can
1011 * page swap is disabled. Warn and fail the allocation.
1110 * multiple callers probably all tried to get a page from the
1224 * prevent swapoff, such as the folio in swap cache is locked, page
1258 * the page is read from the swap device, the PTE is verified not
1259 * changed with the page table locked to check whether the swap device
1479 struct page *page;
1498 page = vmalloc_to_page(p->swap_map + offset);
1500 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1503 page = list_next_entry(page, lru);
1504 map = kmap_atomic(page);
1585 * another page of the image. On waking from hibernation, the
1602 * free the page cache entry if it is the last user.
1756 struct page *page = folio_file_page(folio, swp_offset(entry));
1757 struct page *swapcache;
1760 bool hwpoisoned = PageHWPoison(page);
1763 swapcache = page;
1764 page = ksm_might_need_to_copy(page, vma, addr);
1765 if (unlikely(!page))
1767 else if (unlikely(PTR_ERR(page) == -EHWPOISON))
1779 if (unlikely(hwpoisoned || !PageUptodate(page))) {
1785 page = swapcache;
1795 * Some architectures may have to restore extra metadata to the page
1799 arch_swap_restore(entry, page_folio(page));
1802 BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
1803 BUG_ON(PageAnon(page) && PageAnonExclusive(page));
1807 get_page(page);
1808 if (page == swapcache) {
1814 * call and have the page locked.
1816 VM_BUG_ON_PAGE(PageWriteback(page), page);
1820 page_add_anon_rmap(page, vma, addr, rmap_flags);
1822 page_add_new_anon_rmap(page, vma, addr);
1823 lru_cache_add_inactive_or_unevictable(page, vma);
1825 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
1836 if (page != swapcache) {
1837 unlock_page(page);
1838 put_page(page);
1880 struct page *page;
1888 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1890 if (page)
1891 folio = page_folio(page);
2113 * swap cache just before we acquired the page lock. The folio
2190 * Add a block range (and the corresponding page range) into this swapdev's
2193 * This function rather assumes that it is called in ascending page order.
2205 * function is called in ascending page order.
2240 * time for locating where on disk a page belongs.
2923 nr_good_pages = maxpages - 1; /* omit header page */
3005 struct page *page = NULL;
3063 page = read_mapping_page(mapping, 0, swap_file);
3064 if (IS_ERR(page)) {
3065 error = PTR_ERR(page);
3068 swap_header = kmap(page);
3159 * perform discards for released swap page-clusters.
3239 if (page && !IS_ERR(page)) {
3240 kunmap(page);
3241 put_page(page);
3375 * might occur if a page table entry has got corrupted.
3417 struct swap_info_struct *page_swap_info(struct page *page)
3419 swp_entry_t entry = page_swap_entry(page);
3428 return page_swap_info(&folio->page)->swap_file->f_mapping;
3432 pgoff_t __page_file_index(struct page *page)
3434 swp_entry_t swap = page_swap_entry(page);
3441 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3442 * page of the original vmalloc'ed swap_map, to hold the continuation count
3447 * on the original swap_map, only referring to a continuation page when the
3451 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3458 struct page *head;
3459 struct page *page;
3460 struct page *list_page;
3467 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3469 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3496 if (!page) {
3506 * Page allocation does not initialize the page's lru field,
3521 * a continuation page, free our allocation and use this one.
3538 list_add_tail(&page->lru, &head->lru);
3539 page = NULL; /* now it's attached, don't free it */
3547 if (page)
3548 __free_page(page);
3554 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3555 * into, carry if so, or else fail until a new continuation page is allocated;
3564 struct page *head;
3565 struct page *page;
3577 page = list_next_entry(head, lru);
3578 map = kmap_atomic(page) + offset;
3589 page = list_next_entry(page, lru);
3590 BUG_ON(page == head);
3591 map = kmap_atomic(page) + offset;
3595 page = list_next_entry(page, lru);
3596 if (page == head) {
3600 map = kmap_atomic(page) + offset;
3601 init_map: *map = 0; /* we didn't zero the page */
3605 while ((page = list_prev_entry(page, lru)) != head) {
3606 map = kmap_atomic(page) + offset;
3619 page = list_next_entry(page, lru);
3620 BUG_ON(page == head);
3621 map = kmap_atomic(page) + offset;
3628 while ((page = list_prev_entry(page, lru)) != head) {
3629 map = kmap_atomic(page) + offset;
3650 struct page *head;
3653 struct page *page, *next;
3655 list_for_each_entry_safe(page, next, &head->lru, lru) {
3656 list_del(&page->lru);
3657 __free_page(page);