Lines Matching refs:page

2136 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2138 struct address_space *mapping = page->mapping;
2139 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2149 inode = page->mapping->host;
2152 write_data = kmap(page);
2156 kunmap(page);
2162 kunmap(page);
2185 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2190 kunmap(page);
2218 struct page *page;
2221 page = wdata->pages[i];
2224 * page lock: the page may be truncated or invalidated
2225 * (changing page->mapping to NULL), or even swizzled
2230 lock_page(page);
2231 else if (!trylock_page(page))
2234 if (unlikely(page->mapping != mapping)) {
2235 unlock_page(page);
2239 if (!wbc->range_cyclic && page->index > end) {
2241 unlock_page(page);
2245 if (*next && (page->index != *next)) {
2246 /* Not next consecutive page */
2247 unlock_page(page);
2252 wait_on_page_writeback(page);
2254 if (PageWriteback(page) ||
2255 !clear_page_dirty_for_io(page)) {
2256 unlock_page(page);
2264 set_page_writeback(page);
2265 if (page_offset(page) >= i_size_read(mapping->host)) {
2267 unlock_page(page);
2268 end_page_writeback(page);
2272 wdata->pages[i] = page;
2273 *next = page->index + 1;
2334 * If wsize is smaller than the page cache size, default to writing
2335 * one page at a time via cifs_writepage
2462 * We hit the last page and there is more work to be done: wrap
2483 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2490 get_page(page);
2491 if (!PageUptodate(page))
2492 cifs_dbg(FYI, "ppw - page not up to date\n");
2498 * or re-dirty the page with "redirty_page_for_writepage()" in
2501 * Just unlocking the page will cause the radix tree tag-bits
2502 * to fail to update with the state of the page correctly.
2504 set_page_writeback(page);
2506 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2510 redirty_page_for_writepage(wbc, page);
2512 SetPageError(page);
2513 mapping_set_error(page->mapping, rc);
2515 SetPageUptodate(page);
2517 end_page_writeback(page);
2518 put_page(page);
2523 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2525 int rc = cifs_writepage_locked(page, wbc);
2526 unlock_page(page);
2532 struct page *page, void *fsdata)
2545 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2546 page, pos, copied);
2548 if (PageChecked(page)) {
2550 SetPageUptodate(page);
2551 ClearPageChecked(page);
2552 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2553 SetPageUptodate(page);
2555 if (!PageUptodate(page)) {
2566 page_data = kmap(page);
2569 kunmap(page);
2575 set_page_dirty(page);
2585 unlock_page(page);
2586 put_page(page);
2709 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2910 struct page **pagevec;
3348 cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3367 struct page **pages =
3368 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3402 struct page *page;
3406 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3407 if (!page) {
3411 rdata->pages[i] = page;
3455 struct page *page = rdata->pages[i];
3460 void *addr = kmap_atomic(page);
3465 written = copy_page_to_iter(page, 0, copy, iter);
3500 struct page *page = rdata->pages[i];
3511 /* no need to hold page hostage */
3514 put_page(page);
3520 /* enough data to fill the page */
3528 page, page_offset, n, iter);
3535 server, page, page_offset, n);
3646 struct page **pagevec;
4135 * If the page is mmap'ed into a process' page tables, then we need to make
4141 struct page *page = vmf->page;
4143 lock_page(page);
4199 struct page *page = rdata->pages[i];
4201 lru_cache_add(page);
4205 flush_dcache_page(page);
4206 SetPageUptodate(page);
4209 unlock_page(page);
4213 cifs_readpage_to_fscache(rdata->mapping->host, page);
4217 put_page(page);
4243 struct page *page = rdata->pages[i];
4257 /* enough for partial page, fill and zero the rest */
4258 zero_user(page, len + page_offset, to_read - len);
4261 } else if (page->index > eof_index) {
4270 zero_user(page, 0, PAGE_SIZE);
4271 lru_cache_add(page);
4272 flush_dcache_page(page);
4273 SetPageUptodate(page);
4274 unlock_page(page);
4275 put_page(page);
4280 /* no need to hold page hostage */
4281 lru_cache_add(page);
4282 unlock_page(page);
4283 put_page(page);
4291 page, page_offset, n, iter);
4298 server, page, page_offset, n);
4329 struct page *page, *tpage;
4336 page = lru_to_page(page_list);
4339 * Lock the page and put it in the cache. Since no one else
4340 * should have access to this page, we're safe to simply set
4343 __SetPageLocked(page);
4344 rc = add_to_page_cache_locked(page, mapping,
4345 page->index, gfp);
4349 __ClearPageLocked(page);
4353 /* move first page to the tmplist */
4354 *offset = (loff_t)page->index << PAGE_SHIFT;
4357 list_move_tail(&page->lru, tmplist);
4360 expected_index = page->index + 1;
4361 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4363 if (page->index != expected_index)
4366 /* would this page push the read over the rsize? */
4370 __SetPageLocked(page);
4371 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4373 __ClearPageLocked(page);
4376 list_move_tail(&page->lru, tmplist);
4401 * After this point, every page in the list might have PG_fscache set,
4402 * so we will need to clean that up off of every page we don't use.
4423 * Start with the page at end of list and move it to private
4436 struct page *page, *tpage;
4456 * page. The VFS will fall back to readpage. We should never
4458 * rsize is smaller than a cache page.
4477 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4478 list_del(&page->lru);
4479 lru_cache_add(page);
4480 unlock_page(page);
4481 put_page(page);
4500 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4501 list_del(&page->lru);
4502 rdata->pages[rdata->nr_pages++] = page;
4517 page = rdata->pages[i];
4518 lru_cache_add(page);
4519 unlock_page(page);
4520 put_page(page);
4540 * cifs_readpage_worker must be called with the page pinned
4542 static int cifs_readpage_worker(struct file *file, struct page *page,
4548 /* Is the page cached? */
4549 rc = cifs_readpage_from_fscache(file_inode(file), page);
4553 read_data = kmap(page);
4573 flush_dcache_page(page);
4574 SetPageUptodate(page);
4576 /* send this page to the cache */
4577 cifs_readpage_to_fscache(file_inode(file), page);
4582 kunmap(page);
4585 unlock_page(page);
4589 static int cifs_readpage(struct file *file, struct page *page)
4591 loff_t offset = page_file_offset(page);
4604 page, (int)offset, (int)offset);
4606 rc = cifs_readpage_worker(file, page, &offset);
4632 page caching in the current Linux kernel design */
4644 /* since no page cache to corrupt on directio
4659 struct page **pagep, void **fsdata)
4666 struct page *page;
4672 page = grab_cache_page_write_begin(mapping, index, flags);
4673 if (!page) {
4678 if (PageUptodate(page))
4682 * If we write a full page it will be up to date, no need to read from
4692 * is, when the page lies beyond the EOF, or straddles the EOF
4699 zero_user_segments(page, 0, offset,
4703 * PageChecked means that the parts of the page
4706 * page, it can be set uptodate.
4708 SetPageChecked(page);
4715 * might as well read a page, it is fast enough. If we get
4719 cifs_readpage_worker(file, page, &page_start);
4720 put_page(page);
4730 *pagep = page;
4734 static int cifs_release_page(struct page *page, gfp_t gfp)
4736 if (PagePrivate(page))
4739 return cifs_fscache_release_page(page, gfp);
4742 static void cifs_invalidate_page(struct page *page, unsigned int offset,
4745 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4748 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4751 static int cifs_launder_page(struct page *page)
4754 loff_t range_start = page_offset(page);
4763 cifs_dbg(FYI, "Launder page: %p\n", page);
4765 if (clear_page_dirty_for_io(page))
4766 rc = cifs_writepage_locked(page, &wbc);
4768 cifs_fscache_invalidate_page(page, page->mapping->host);
4929 * contain the header plus one complete page of data. Otherwise, we need