Lines Matching refs:page
30 * Regular page slots are stabilized by the page lock even without the tree
83 struct page *page = pvec->pages[i];
86 if (!xa_is_value(page)) {
87 pvec->pages[j++] = page;
99 __clear_shadow_entry(mapping, index, page);
138 * do_invalidatepage - invalidate part or all of a page
139 * @page: the page which is affected
143 * do_invalidatepage() is called when all or part of the page has become
152 void do_invalidatepage(struct page *page, unsigned int offset,
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
157 invalidatepage = page->mapping->a_ops->invalidatepage;
163 (*invalidatepage)(page, offset, length);
167 * If truncate cannot remove the fs-private metadata from the page, the page
171 * We need to bail out if page->mapping is no longer equal to the original
172 * mapping. This happens a) when the VM reclaimed the page while we waited on
174 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
176 static void truncate_cleanup_page(struct page *page)
178 if (page_mapped(page))
179 unmap_mapping_page(page);
181 if (page_has_private(page))
182 do_invalidatepage(page, 0, thp_size(page));
185 * Some filesystems seem to re-dirty the page even after
189 cancel_dirty_page(page);
190 ClearPageMappedToDisk(page);
199 * Returns non-zero if the page was successfully invalidated.
202 invalidate_complete_page(struct address_space *mapping, struct page *page)
206 if (page->mapping != mapping)
209 if (page_has_private(page) && !try_to_release_page(page, 0))
212 ret = remove_mapping(mapping, page);
217 int truncate_inode_page(struct address_space *mapping, struct page *page)
219 VM_BUG_ON_PAGE(PageTail(page), page);
221 if (page->mapping != mapping)
224 truncate_cleanup_page(page);
225 delete_from_page_cache(page);
232 int generic_error_remove_page(struct address_space *mapping, struct page *page)
242 return truncate_inode_page(mapping, page);
247 * Safely invalidate one page from its pagecache mapping.
248 * It only drops clean, unused pages. The page must be locked.
250 * Returns 1 if the page is successfully invalidated, otherwise 0.
252 int invalidate_inode_page(struct page *page)
254 struct address_space *mapping = page_mapping(page);
257 if (PageDirty(page) || PageWriteback(page))
259 if (page_mapped(page))
261 return invalidate_complete_page(mapping, page);
270 * Truncate the page cache, removing the pages that are between
272 * if lstart or lend + 1 is not page aligned).
275 * block on page locks and it will not block on writeback. The second pass
280 * We pass down the cache-hot hint to the page freeing code. Even if the
286 * page aligned properly.
338 struct page *page = pvec.pages[i];
340 /* We rely upon deletion not changing page->index */
345 if (xa_is_value(page))
348 if (!trylock_page(page))
350 WARN_ON(page_to_index(page) != index);
351 if (PageWriteback(page)) {
352 unlock_page(page);
355 if (page->mapping != mapping) {
356 unlock_page(page);
359 pagevec_add(&locked_pvec, page);
372 struct page *page = find_lock_page(mapping, start - 1);
373 if (page) {
376 /* Truncation within a single page */
380 wait_on_page_writeback(page);
381 zero_user_segment(page, partial_start, top);
382 cleancache_invalidate_page(mapping, page);
383 if (page_has_private(page))
384 do_invalidatepage(page, partial_start,
386 unlock_page(page);
387 put_page(page);
391 struct page *page = find_lock_page(mapping, end);
392 if (page) {
393 wait_on_page_writeback(page);
394 zero_user_segment(page, 0, partial_end);
395 cleancache_invalidate_page(mapping, page);
396 if (page_has_private(page))
397 do_invalidatepage(page, 0,
399 unlock_page(page);
400 put_page(page);
404 * If the truncation happened within a single page no pages
430 struct page *page = pvec.pages[i];
432 /* We rely upon deletion not changing page->index */
440 if (xa_is_value(page))
443 lock_page(page);
444 WARN_ON(page_to_index(page) != index);
445 wait_on_page_writeback(page);
446 truncate_inode_page(mapping, page);
447 unlock_page(page);
466 * Note: When this function returns, there can be a page in the process of
543 struct page *page = pvec.pages[i];
545 /* We rely upon deletion not changing page->index */
550 if (xa_is_value(page)) {
552 page);
556 if (!trylock_page(page))
559 WARN_ON(page_to_index(page) != index);
562 if (PageTransTail(page)) {
563 unlock_page(page);
565 } else if (PageTransHuge(page)) {
570 * invalidate the page as the part outside of
574 unlock_page(page);
579 get_page(page);
583 * the huge page.
589 ret = invalidate_inode_page(page);
590 unlock_page(page);
592 * Invalidation is a hint that the page is no longer
596 deactivate_file_page(page);
602 if (PageTransHuge(page))
603 put_page(page);
648 * This is like invalidate_complete_page(), except it ignores the page's
655 invalidate_complete_page2(struct address_space *mapping, struct page *page)
659 if (page->mapping != mapping)
662 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
666 if (PageDirty(page))
669 BUG_ON(page_has_private(page));
670 __delete_from_page_cache(page, NULL);
674 mapping->a_ops->freepage(page);
676 put_page(page); /* pagecache ref */
683 static int do_launder_page(struct address_space *mapping, struct page *page)
685 if (!PageDirty(page))
687 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
689 return mapping->a_ops->launder_page(page);
695 * @start: the page offset 'from' which to invalidate
696 * @end: the page offset 'to' which to invalidate (inclusive)
723 struct page *page = pvec.pages[i];
725 /* We rely upon deletion not changing page->index */
730 if (xa_is_value(page)) {
732 index, page))
737 if (!did_range_unmap && page_mapped(page)) {
739 * If page is mapped, before taking its lock,
747 lock_page(page);
748 WARN_ON(page_to_index(page) != index);
749 if (page->mapping != mapping) {
750 unlock_page(page);
753 wait_on_page_writeback(page);
755 if (page_mapped(page))
756 unmap_mapping_page(page);
757 BUG_ON(page_mapped(page));
759 ret2 = do_launder_page(mapping, page);
761 if (!invalidate_complete_page2(mapping, page))
766 unlock_page(page);
774 * For DAX we invalidate page tables after invalidating page cache. We
775 * could invalidate page tables while invalidating each entry however
777 * work as we have no cheap way to find whether page cache entry didn't
816 * situations such as writepage being called for a page that has already
827 * single-page unmaps. However after this first call, and
870 * write starting after current i_size. We mark the page straddling current
872 * the page. This way filesystem can be sure that page_mkwrite() is called on
873 * the page before user writes to the page via mmap after the i_size has been
876 * The function must be called after i_size is updated so that page fault
877 * coming after we unlock the page will already see the new i_size.
886 struct page *page;
899 page = find_lock_page(inode->i_mapping, index);
901 if (!page)
907 if (page_mkclean(page))
908 set_page_dirty(page);
909 unlock_page(page);
910 put_page(page);
924 * situations such as writepage being called for a page that has already
936 * doing their own page rounding first. Note that unmap_mapping_range