Lines Matching refs:page

42  * see if a page needs releasing upon read_cache_pages() failure
49 struct page *page)
51 if (page_has_private(page)) {
52 if (!trylock_page(page))
54 page->mapping = mapping;
55 do_invalidatepage(page, 0, PAGE_SIZE);
56 page->mapping = NULL;
57 unlock_page(page);
59 put_page(page);
68 struct page *victim;
82 * @filler: callback routine for filling a single page.
90 int (*filler)(void *, struct page *), void *data)
92 struct page *page;
96 page = lru_to_page(pages);
97 list_del(&page->lru);
98 if (add_to_page_cache_lru(page, mapping, page->index,
100 read_cache_pages_invalidate_page(mapping, page);
103 put_page(page);
105 ret = filler(data, page);
121 struct page *page;
132 while ((page = readahead_page(rac))) {
133 unlock_page(page);
134 put_page(page);
144 while ((page = readahead_page(rac))) {
145 aops->readpage(rac->file, page);
146 put_page(page);
185 * locked pages to the page cache, but will not yet have submitted
186 * them for I/O. Adding another page may need to allocate memory,
199 struct page *page = xa_load(&mapping->i_pages, index + i);
203 if (page && !xa_is_value(page)) {
207 * next batch. This page may be the one we would
209 * have a stable reference to this page, and it's
216 page = __page_cache_alloc(gfp_mask);
217 if (!page)
220 page->index = index + i;
221 list_add(&page->lru, &page_pool);
222 } else if (add_to_page_cache_lru(page, mapping, index + i,
224 put_page(page);
229 SetPageReadahead(page);
234 * Now start the IO. We ignore I/O errors - if the page is not
246 * behaviour which would occur if page allocations are causing VM writeback.
255 pgoff_t end_index; /* The last page we want to read */
263 /* Don't read past the page containing the last byte of the file */
308 * for 128k (32 page) max ra
309 * 1-8 page = 32k initial, > 8 page = 128k initial
350 * ^start ^page marked with PG_readahead
354 * readahead pages and stalled on the missing page at readahead_index;
361 * page at (start+size-async_size) with PG_readahead, and use it as readahead
363 * readahead-for-nothing fuss, saving pointless page cache lookups.
371 * There is a special-case: if the first page which the application tries to
372 * read happens to be the first page of the file, it is assumed that a linear
399 * page cache context based read-ahead
471 * Hit a marked page without valid readahead state.
511 * Query the page cache and look for the traces(cached history pages)
561 * requested range, which we'll set to 1 page for this case.
582 struct file_ra_state *ra, struct page *page,
592 if (PageWriteback(page))
595 ClearPageReadahead(page);