Lines Matching refs:mapping
31 static inline void __clear_shadow_entry(struct address_space *mapping,
34 XA_STATE(xas, &mapping->i_pages, index);
42 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
45 spin_lock(&mapping->host->i_lock);
46 xa_lock_irq(&mapping->i_pages);
47 __clear_shadow_entry(mapping, index, entry);
48 xa_unlock_irq(&mapping->i_pages);
49 if (mapping_shrinkable(mapping))
50 inode_add_lru(mapping->host);
51 spin_unlock(&mapping->host->i_lock);
59 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
66 if (shmem_mapping(mapping))
76 dax = dax_mapping(mapping);
78 spin_lock(&mapping->host->i_lock);
79 xa_lock_irq(&mapping->i_pages);
92 dax_delete_mapping_entry(mapping, index);
96 __clear_shadow_entry(mapping, index, folio);
100 xa_unlock_irq(&mapping->i_pages);
101 if (mapping_shrinkable(mapping))
102 inode_add_lru(mapping->host);
103 spin_unlock(&mapping->host->i_lock);
112 static int invalidate_exceptional_entry(struct address_space *mapping,
116 if (shmem_mapping(mapping) || dax_mapping(mapping))
118 clear_shadow_entry(mapping, index, entry);
126 static int invalidate_exceptional_entry2(struct address_space *mapping,
130 if (shmem_mapping(mapping))
132 if (dax_mapping(mapping))
133 return dax_invalidate_mapping_entry_sync(mapping, index);
134 clear_shadow_entry(mapping, index, entry);
155 const struct address_space_operations *aops = folio->mapping->a_ops;
167 * We need to bail out if page->mapping is no longer equal to the original
168 * mapping. This happens a) when the VM reclaimed the page while we waited on
189 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
191 if (folio->mapping != mapping)
227 truncate_inode_folio(folio->mapping, folio);
246 truncate_inode_folio(folio->mapping, folio);
253 int generic_error_remove_page(struct address_space *mapping, struct page *page)
257 if (!mapping)
263 if (!S_ISREG(mapping->host->i_mode))
265 return truncate_inode_folio(mapping, page_folio(page));
269 static long mapping_evict_folio(struct address_space *mapping,
281 return remove_mapping(mapping, folio);
288 * Safely invalidate one page from its pagecache mapping.
297 struct address_space *mapping = folio_mapping(folio);
300 if (!mapping)
302 return mapping_evict_folio(mapping, folio);
307 * @mapping: mapping to truncate
322 * mapping is large, it is probably the case that the final pages are the most
329 void truncate_inode_pages_range(struct address_space *mapping,
341 if (mapping_empty(mapping))
363 while (index < end && find_lock_entries(mapping, &index, end - 1,
365 truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
368 delete_from_page_cache_batch(mapping, &fbatch);
376 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
390 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
403 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
424 truncate_inode_folio(mapping, folio);
427 truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
435 * @mapping: mapping to truncate
439 * mapping->invalidate_lock.
443 * mapping->nrpages can be non-zero when this function returns even after
444 * truncation of the whole mapping.
446 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
448 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
454 * @mapping: mapping to truncate
461 void truncate_inode_pages_final(struct address_space *mapping)
470 mapping_set_exiting(mapping);
472 if (!mapping_empty(mapping)) {
479 xa_lock_irq(&mapping->i_pages);
480 xa_unlock_irq(&mapping->i_pages);
483 truncate_inode_pages(mapping, 0);
489 * @mapping: the address_space which holds the folios to invalidate
497 unsigned long mapping_try_invalidate(struct address_space *mapping,
508 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
515 count += invalidate_exceptional_entry(mapping,
520 ret = mapping_evict_folio(mapping, folio);
543 * @mapping: the address_space which holds the cache to invalidate
555 unsigned long invalidate_mapping_pages(struct address_space *mapping,
558 return mapping_try_invalidate(mapping, start, end, NULL);
569 static int invalidate_complete_folio2(struct address_space *mapping,
572 if (folio->mapping != mapping)
578 spin_lock(&mapping->host->i_lock);
579 xa_lock_irq(&mapping->i_pages);
585 xa_unlock_irq(&mapping->i_pages);
586 if (mapping_shrinkable(mapping))
587 inode_add_lru(mapping->host);
588 spin_unlock(&mapping->host->i_lock);
590 filemap_free_folio(mapping, folio);
593 xa_unlock_irq(&mapping->i_pages);
594 spin_unlock(&mapping->host->i_lock);
598 static int folio_launder(struct address_space *mapping, struct folio *folio)
602 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
604 return mapping->a_ops->launder_folio(folio);
609 * @mapping: the address_space
618 int invalidate_inode_pages2_range(struct address_space *mapping,
629 if (mapping_empty(mapping))
634 while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
641 if (!invalidate_exceptional_entry2(mapping,
652 unmap_mapping_pages(mapping, indices[i],
658 if (unlikely(folio->mapping != mapping)) {
669 ret2 = folio_launder(mapping, folio);
671 if (!invalidate_complete_folio2(mapping, folio))
689 if (dax_mapping(mapping)) {
690 unmap_mapping_pages(mapping, start, end - start + 1, false);
698 * @mapping: the address_space
705 int invalidate_inode_pages2(struct address_space *mapping)
707 return invalidate_inode_pages2_range(mapping, 0, -1);
728 struct address_space *mapping = inode->i_mapping;
740 unmap_mapping_range(mapping, holebegin, 0, 1);
741 truncate_inode_pages(mapping, newsize);
742 unmap_mapping_range(mapping, holebegin, 0, 1);
836 struct address_space *mapping = inode->i_mapping;
853 unmap_mapping_range(mapping, unmap_start,
855 truncate_inode_pages_range(mapping, lstart, lend);