Lines Matching refs:mapping
34 static inline void __clear_shadow_entry(struct address_space *mapping,
37 XA_STATE(xas, &mapping->i_pages, index);
43 mapping->nrexceptional--;
46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
49 xa_lock_irq(&mapping->i_pages);
50 __clear_shadow_entry(mapping, index, entry);
51 xa_unlock_irq(&mapping->i_pages);
59 static void truncate_exceptional_pvec_entries(struct address_space *mapping,
67 if (shmem_mapping(mapping))
77 dax = dax_mapping(mapping);
80 xa_lock_irq(&mapping->i_pages);
95 dax_delete_mapping_entry(mapping, index);
99 __clear_shadow_entry(mapping, index, page);
103 xa_unlock_irq(&mapping->i_pages);
111 static int invalidate_exceptional_entry(struct address_space *mapping,
115 if (shmem_mapping(mapping) || dax_mapping(mapping))
117 clear_shadow_entry(mapping, index, entry);
125 static int invalidate_exceptional_entry2(struct address_space *mapping,
129 if (shmem_mapping(mapping))
131 if (dax_mapping(mapping))
132 return dax_invalidate_mapping_entry_sync(mapping, index);
133 clear_shadow_entry(mapping, index, entry);
157 invalidatepage = page->mapping->a_ops->invalidatepage;
171 * We need to bail out if page->mapping is no longer equal to the original
172 * mapping. This happens a) when the VM reclaimed the page while we waited on
202 invalidate_complete_page(struct address_space *mapping, struct page *page)
206 if (page->mapping != mapping)
212 ret = remove_mapping(mapping, page);
217 int truncate_inode_page(struct address_space *mapping, struct page *page)
221 if (page->mapping != mapping)
232 int generic_error_remove_page(struct address_space *mapping, struct page *page)
234 if (!mapping)
240 if (!S_ISREG(mapping->host->i_mode))
242 return truncate_inode_page(mapping, page);
247 * Safely invalidate one page from its pagecache mapping.
254 struct address_space *mapping = page_mapping(page);
255 if (!mapping)
261 return invalidate_complete_page(mapping, page);
266 * @mapping: mapping to truncate
281 * mapping is large, it is probably the case that the final pages are the most
288 void truncate_inode_pages_range(struct address_space *mapping,
300 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
326 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
355 if (page->mapping != mapping) {
363 delete_from_page_cache_batch(mapping, &locked_pvec);
366 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
372 struct page *page = find_lock_page(mapping, start - 1);
382 cleancache_invalidate_page(mapping, page);
391 struct page *page = find_lock_page(mapping, end);
395 cleancache_invalidate_page(mapping, page);
413 if (!pagevec_lookup_entries(&pvec, mapping, index,
446 truncate_inode_page(mapping, page);
449 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
455 cleancache_invalidate_inode(mapping);
461 * @mapping: mapping to truncate
468 * mapping->nrpages can be non-zero when this function returns even after
469 * truncation of the whole mapping.
471 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
473 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
479 * @mapping: mapping to truncate
486 void truncate_inode_pages_final(struct address_space *mapping)
498 mapping_set_exiting(mapping);
505 nrpages = mapping->nrpages;
507 nrexceptional = mapping->nrexceptional;
516 xa_lock_irq(&mapping->i_pages);
517 xa_unlock_irq(&mapping->i_pages);
524 truncate_inode_pages(mapping, 0);
528 static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
539 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
551 invalidate_exceptional_entry(mapping, index,
616 * @mapping: the address_space which holds the pages to invalidate
629 unsigned long invalidate_mapping_pages(struct address_space *mapping,
632 return __invalidate_mapping_pages(mapping, start, end, NULL);
641 void invalidate_mapping_pagevec(struct address_space *mapping,
644 __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
655 invalidate_complete_page2(struct address_space *mapping, struct page *page)
659 if (page->mapping != mapping)
665 xa_lock_irqsave(&mapping->i_pages, flags);
671 xa_unlock_irqrestore(&mapping->i_pages, flags);
673 if (mapping->a_ops->freepage)
674 mapping->a_ops->freepage(page);
679 xa_unlock_irqrestore(&mapping->i_pages, flags);
683 static int do_launder_page(struct address_space *mapping, struct page *page)
687 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
689 return mapping->a_ops->launder_page(page);
694 * @mapping: the address_space
703 int invalidate_inode_pages2_range(struct address_space *mapping,
714 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
719 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
731 if (!invalidate_exceptional_entry2(mapping,
742 unmap_mapping_pages(mapping, index,
749 if (page->mapping != mapping) {
759 ret2 = do_launder_page(mapping, page);
761 if (!invalidate_complete_page2(mapping, page))
780 if (dax_mapping(mapping)) {
781 unmap_mapping_pages(mapping, start, end - start + 1, false);
784 cleancache_invalidate_inode(mapping);
791 * @mapping: the address_space
798 int invalidate_inode_pages2(struct address_space *mapping)
800 return invalidate_inode_pages2_range(mapping, 0, -1);
821 struct address_space *mapping = inode->i_mapping;
833 unmap_mapping_range(mapping, holebegin, 0, 1);
834 truncate_inode_pages(mapping, newsize);
835 unmap_mapping_range(mapping, holebegin, 0, 1);
929 struct address_space *mapping = inode->i_mapping;
946 unmap_mapping_range(mapping, unmap_start,
948 truncate_inode_pages_range(mapping, lstart, lend);