Lines Matching refs:mapping

120 static void page_cache_delete(struct address_space *mapping,
123 XA_STATE(xas, &mapping->i_pages, page->index);
126 mapping_set_update(&xas, mapping);
141 page->mapping = NULL;
145 mapping->nrexceptional += nr;
154 mapping->nrpages -= nr;
157 static void unaccount_page_cache_page(struct address_space *mapping,
170 cleancache_invalidate_page(mapping, page);
184 if (mapping_exiting(mapping) &&
210 filemap_nr_thps_dec(mapping);
224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
234 struct address_space *mapping = page->mapping;
238 unaccount_page_cache_page(mapping, page);
239 page_cache_delete(mapping, page, shadow);
242 static void page_cache_free_page(struct address_space *mapping,
247 freepage = mapping->a_ops->freepage;
269 struct address_space *mapping = page_mapping(page);
273 xa_lock_irqsave(&mapping->i_pages, flags);
275 xa_unlock_irqrestore(&mapping->i_pages, flags);
277 page_cache_free_page(mapping, page);
283 * @mapping: the mapping to which pages belong
286 * The function walks over mapping->i_pages and removes pages passed in @pvec
287 * from the mapping. The function expects @pvec to be sorted by page index
289 * It tolerates holes in @pvec (mapping entries at those indices are not
295 static void page_cache_delete_batch(struct address_space *mapping,
298 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
303 mapping_set_update(&xas, mapping);
327 page->mapping = NULL;
340 mapping->nrpages -= total_pages;
343 void delete_from_page_cache_batch(struct address_space *mapping,
352 xa_lock_irqsave(&mapping->i_pages, flags);
356 unaccount_page_cache_page(mapping, pvec->pages[i]);
358 page_cache_delete_batch(mapping, pvec);
359 xa_unlock_irqrestore(&mapping->i_pages, flags);
362 page_cache_free_page(mapping, pvec->pages[i]);
365 int filemap_check_errors(struct address_space *mapping)
369 if (test_bit(AS_ENOSPC, &mapping->flags) &&
370 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
372 if (test_bit(AS_EIO, &mapping->flags) &&
373 test_and_clear_bit(AS_EIO, &mapping->flags))
379 static int filemap_check_and_keep_errors(struct address_space *mapping)
382 if (test_bit(AS_EIO, &mapping->flags))
384 if (test_bit(AS_ENOSPC, &mapping->flags))
390 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
391 * @mapping: address space structure to write
396 * Start writeback against all of a mapping's dirty pages that lie
406 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
417 if (!mapping_can_writeback(mapping) ||
418 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
421 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
422 ret = do_writepages(mapping, &wbc);
427 static inline int __filemap_fdatawrite(struct address_space *mapping,
430 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
433 int filemap_fdatawrite(struct address_space *mapping)
435 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
439 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
442 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
448 * @mapping: target address_space
455 int filemap_flush(struct address_space *mapping)
457 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
463 * @mapping: address space within which to check
473 bool filemap_range_has_page(struct address_space *mapping,
477 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
504 static void __filemap_fdatawait_range(struct address_space *mapping,
519 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
537 * @mapping: address space structure to wait for
551 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
554 __filemap_fdatawait_range(mapping, start_byte, end_byte);
555 return filemap_check_errors(mapping);
561 * @mapping: address space structure to wait for
573 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
576 __filemap_fdatawait_range(mapping, start_byte, end_byte);
577 return filemap_check_and_keep_errors(mapping);
599 struct address_space *mapping = file->f_mapping;
601 __filemap_fdatawait_range(mapping, start_byte, end_byte);
608 * @mapping: address space structure to wait for
620 int filemap_fdatawait_keep_errors(struct address_space *mapping)
622 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
623 return filemap_check_and_keep_errors(mapping);
628 static bool mapping_needs_writeback(struct address_space *mapping)
630 if (dax_mapping(mapping))
631 return mapping->nrexceptional;
633 return mapping->nrpages;
638 * @mapping: the address_space for the pages
649 int filemap_write_and_wait_range(struct address_space *mapping,
654 if (mapping_needs_writeback(mapping)) {
655 err = __filemap_fdatawrite_range(mapping, lstart, lend,
664 int err2 = filemap_fdatawait_range(mapping,
670 filemap_check_errors(mapping);
673 err = filemap_check_errors(mapping);
679 void __filemap_set_wb_err(struct address_space *mapping, int err)
681 errseq_t eseq = errseq_set(&mapping->wb_err, err);
683 trace_filemap_set_wb_err(mapping, eseq);
696 * Grab the wb_err from the mapping. If it matches what we have in the file,
699 * If it doesn't match, then take the mapping value, set the "seen" flag in
705 * While we handle mapping->wb_err with atomic operations, the f_wb_err
715 struct address_space *mapping = file->f_mapping;
718 if (errseq_check(&mapping->wb_err, old)) {
722 err = errseq_check_and_advance(&mapping->wb_err,
733 clear_bit(AS_EIO, &mapping->flags);
734 clear_bit(AS_ENOSPC, &mapping->flags);
758 struct address_space *mapping = file->f_mapping;
760 if (mapping_needs_writeback(mapping)) {
761 err = __filemap_fdatawrite_range(mapping, lstart, lend,
765 __filemap_fdatawait_range(mapping, lstart, lend);
792 struct address_space *mapping = old->mapping;
793 void (*freepage)(struct page *) = mapping->a_ops->freepage;
795 XA_STATE(xas, &mapping->i_pages, offset);
800 VM_BUG_ON_PAGE(new->mapping, new);
803 new->mapping = mapping;
811 old->mapping = NULL;
831 struct address_space *mapping,
835 XA_STATE(xas, &mapping->i_pages, offset);
842 mapping_set_update(&xas, mapping);
845 page->mapping = mapping;
889 mapping->nrexceptional--;
890 mapping->nrpages++;
909 page->mapping = NULL;
919 * @mapping: the page's address_space
928 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
931 return __add_to_page_cache_locked(page, mapping, offset,
936 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
943 ret = __add_to_page_cache_locked(page, mapping, offset,
1523 struct address_space *mapping;
1526 mapping = page_mapping(page);
1527 if (mapping)
1528 mapping_set_error(mapping, err);
1607 * @mapping: Mapping.
1624 pgoff_t page_cache_next_miss(struct address_space *mapping,
1627 XA_STATE(xas, &mapping->i_pages, index);
1643 * @mapping: Mapping.
1660 pgoff_t page_cache_prev_miss(struct address_space *mapping,
1663 XA_STATE(xas, &mapping->i_pages, index);
1679 * @mapping: the address_space to search
1682 * Looks up the page cache slot at @mapping & @offset. If there is a
1690 struct page *find_get_entry(struct address_space *mapping, pgoff_t index)
1692 XA_STATE(xas, &mapping->i_pages, index);
1728 * @mapping: The address_space to search.
1731 * Looks up the page at @mapping & @index. If there is a page in the
1740 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index)
1745 page = find_get_entry(mapping, index);
1749 if (unlikely(page->mapping != mapping)) {
1761 * @mapping: The address_space to search.
1766 * Looks up the page cache entry at @mapping & @index.
1791 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
1797 page = find_get_entry(mapping, index);
1814 if (unlikely(page->mapping != mapping)) {
1835 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1851 err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
1873 * @mapping: The address_space to search
1880 * @nr_entries entries in the mapping. The entries are placed at
1884 * The search returns a group of mapping-contiguous page cache entries
1898 unsigned find_get_entries(struct address_space *mapping,
1902 XA_STATE(xas, &mapping->i_pages, start);
1953 * @mapping: The address_space to search
1960 * pages in the mapping starting at index @start and up to index @end
1964 * The search returns a group of mapping-contiguous pages with ascending
1972 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1976 XA_STATE(xas, &mapping->i_pages, *start);
2028 * @mapping: The address_space to search
2038 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2041 XA_STATE(xas, &mapping->i_pages, index);
2082 * @mapping: the address_space to search
2094 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2098 XA_STATE(xas, &mapping->i_pages, *index);
2180 * mapping->a_ops->readpage() function for the actual low-level stuff.
2193 struct address_space *mapping = filp->f_mapping;
2194 struct inode *inode = mapping->host;
2238 page = find_get_page(mapping, index);
2242 page_cache_sync_readahead(mapping,
2245 page = find_get_page(mapping, index);
2254 page_cache_async_readahead(mapping,
2284 !mapping->a_ops->is_partially_uptodate)
2292 if (!page->mapping)
2294 if (!mapping->a_ops->is_partially_uptodate(page,
2331 if (mapping_writably_mapped(mapping))
2379 if (!page->mapping) {
2404 error = mapping->a_ops->readpage(filp, page);
2429 if (page->mapping == NULL) {
2457 page = page_cache_alloc(mapping);
2462 error = add_to_page_cache_lru(page, mapping, index,
2463 mapping_gfp_constraint(mapping, GFP_KERNEL));
2520 struct address_space *mapping = file->f_mapping;
2521 struct inode *inode = mapping->host;
2526 if (filemap_range_has_page(mapping, iocb->ki_pos,
2530 retval = filemap_write_and_wait_range(mapping,
2539 retval = mapping->a_ops->direct_IO(iocb, iter);
2623 struct address_space *mapping = file->f_mapping;
2624 DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
2674 struct address_space *mapping = file->f_mapping;
2687 page_cache_async_readahead(mapping, ra, file,
2721 struct address_space *mapping = file->f_mapping;
2723 struct inode *inode = mapping->host;
2736 page = find_get_page(mapping, offset);
2750 page = pagecache_get_page(mapping, offset,
2764 if (unlikely(compound_head(page)->mapping != mapping)) {
2811 error = mapping->a_ops->readpage(file, page);
2845 struct address_space *mapping = file->f_mapping;
2848 XA_STATE(xas, &mapping->i_pages, start_pgoff);
2880 if (head->mapping != mapping || !PageUptodate(head))
2883 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2921 if (page->mapping != inode->i_mapping) {
2948 struct address_space *mapping = file->f_mapping;
2950 if (!mapping->a_ops->readpage)
2997 static struct page *do_read_cache_page(struct address_space *mapping,
3006 page = find_get_page(mapping, index);
3011 err = add_to_page_cache_lru(page, mapping, index, gfp);
3024 err = mapping->a_ops->readpage(data, page);
3055 * otherwise serialising on page lock to stabilise the mapping gives
3059 * will be a race with remove_mapping that determines if the mapping
3078 if (!page->mapping) {
3106 * @mapping: the page's address_space
3118 struct page *read_cache_page(struct address_space *mapping,
3123 return do_read_cache_page(mapping, index, filler, data,
3124 mapping_gfp_mask(mapping));
3130 * @mapping: the page's address_space
3134 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3141 struct page *read_cache_page_gfp(struct address_space *mapping,
3145 return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3149 int pagecache_write_begin(struct file *file, struct address_space *mapping,
3153 const struct address_space_operations *aops = mapping->a_ops;
3155 return aops->write_begin(file, mapping, pos, len, flags,
3160 int pagecache_write_end(struct file *file, struct address_space *mapping,
3164 const struct address_space_operations *aops = mapping->a_ops;
3166 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
3195 struct address_space *mapping = file->f_mapping;
3196 struct inode *inode = mapping->host;
3211 written = filemap_write_and_wait_range(mapping, pos,
3223 written = invalidate_inode_pages2_range(mapping,
3235 written = mapping->a_ops->direct_IO(iocb, from);
3252 * Skip invalidation for async writes or if mapping has no pages.
3254 if (written > 0 && mapping->nrpages &&
3255 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3277 struct page *grab_cache_page_write_begin(struct address_space *mapping,
3286 page = pagecache_get_page(mapping, index, fgp_flags,
3287 mapping_gfp_mask(mapping));
3298 struct address_space *mapping = file->f_mapping;
3299 const struct address_space_operations *a_ops = mapping->a_ops;
3336 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
3341 if (mapping_writably_mapped(mapping))
3347 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3372 balance_dirty_pages_ratelimited(mapping);
3403 struct address_space * mapping = file->f_mapping;
3404 struct inode *inode = mapping->host;
3451 err = filemap_write_and_wait_range(mapping, pos, endbyte);
3455 invalidate_mapping_pages(mapping,
3525 struct address_space * const mapping = page->mapping;
3531 if (mapping && mapping->a_ops->releasepage)
3532 return mapping->a_ops->releasepage(page, gfp_mask);