Lines Matching refs:page

39  * If a page does not map to a contiguous run of blocks then it simply falls
42 * Why is this? If a page's completion depends on a number of different BIOs
44 * status of that page is hard. See end_buffer_async_read() for the details.
53 struct page *page = bv->bv_page;
54 page_endio(page, bio_op(bio),
77 /* Restrict the given (page cache) mask for slab allocations */
96 * the page, which allows readpage to avoid triggering a duplicate call
100 * them. So when the buffer is up to date and the page size == block size,
101 * this marks the page up to date instead of adding new buffers.
104 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
106 struct inode *inode = page->mapping->host;
110 if (!page_has_buffers(page)) {
113 * the page and the page just needs to be set up to date
117 SetPageUptodate(page);
120 create_empty_buffers(page, i_blocksize(inode), 0);
122 head = page_buffers(page);
138 struct page *page;
158 struct page *page = args->page;
159 struct inode *inode = page->mapping->host;
180 gfp = readahead_gfp_mask(page->mapping);
183 gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
186 if (page_has_buffers(page))
189 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
222 * Then do more get_blocks calls until we are done with this page.
224 map_bh->b_page = page;
245 /* some filesystems will copy data into the page during
248 * we just collected from get_block into the page's buffers
252 map_buffer_to_page(page, map_bh, page_block);
277 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
279 SetPageUptodate(page);
280 unlock_page(page);
284 SetPageMappedToDisk(page);
287 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
288 cleancache_get_page(page) == 0) {
289 SetPageUptodate(page);
294 * This page will go to BIO. Do we need to send this BIO off first?
303 page))
315 if (bio_add_page(args->bio, page, length, 0) < length) {
333 if (!PageUptodate(page))
334 block_read_full_page(page, args->get_block);
336 unlock_page(page);
345 * This function walks the pages and the blocks within each page, building and
350 * - encountering a page which has buffers
351 * - encountering a page which has a non-hole after a hole
352 * - encountering a page with non-contiguous blocks
355 * It does handle a page which has holes at the end - that is a common case:
381 struct page *page;
387 while ((page = readahead_page(rac))) {
388 prefetchw(&page->flags);
389 args.page = page;
392 put_page(page);
402 int mpage_readpage(struct page *page, get_block_t get_block)
405 .page = page,
420 * If the page has buffers then they will be used for obtaining the disk
424 * If the page has no buffers (preferred) then the page is mapped here.
426 * If all blocks are found to be contiguous then the page can go into the
431 * just allocate full-size (16-page) BIOs.
445 static void clean_buffers(struct page *page, unsigned first_unmapped)
449 if (!page_has_buffers(page))
451 head = page_buffers(page);
462 * we cannot drop the bh if the page is not uptodate or a concurrent
466 if (buffer_heads_over_limit && PageUptodate(page))
467 try_to_free_buffers(page);
471 * For situations where we want to clean all buffers attached to a page.
472 * We don't need to calculate how many buffers are attached to the page,
475 void clean_page_buffers(struct page *page)
477 clean_buffers(page, ~0U);
480 static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
485 struct address_space *mapping = page->mapping;
486 struct inode *inode = page->mapping->host;
505 if (page_has_buffers(page)) {
506 struct buffer_head *head = page_buffers(page);
547 * Page has buffers, but they are all unmapped. The page was
556 * The page has no buffers: map it to disk
558 BUG_ON(!PageUptodate(page));
559 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
561 map_bh.b_page = page;
591 if (page->index >= end_index) {
593 * The page straddles i_size. It must be zeroed out on each
595 * "A file is mapped in multiples of the page size. For a file
596 * that is not a multiple of the page size, the remaining memory
602 if (page->index > end_index || !offset)
604 zero_user_segment(page, offset, PAGE_SIZE);
608 * This page will go to BIO. Do we need to send this BIO off first?
617 page, wbc))
630 * Must try to add the page before marking the buffer clean or
634 wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
636 if (bio_add_page(bio, page, length, 0) < length) {
641 clean_buffers(page, first_unmapped);
643 BUG_ON(PageWriteback(page));
644 set_page_writeback(page);
645 unlock_page(page);
662 ret = mapping->a_ops->writepage(page, wbc);
687 * If a page is already under I/O, generic_writepages() skips it, even
726 int mpage_writepage(struct page *page, get_block_t get_block,
735 int ret = __mpage_writepage(page, wbc, &mpd);