Lines Matching refs:page

17  * mark a page as having been made dirty and thus needing writeback
19 int afs_set_page_dirty(struct page *page)
22 return __set_page_dirty_nobuffers(page);
26 * partly or wholly fill a page that's under preparation for writing
29 loff_t pos, unsigned int len, struct page *page)
41 data = kmap(page);
43 kunmap(page);
56 req->pages[0] = page;
57 get_page(page);
75 * prepare to perform part of a write to a page
79 struct page **_page, void **fsdata)
82 struct page *page;
93 page = grab_cache_page_write_begin(mapping, index, flags);
94 if (!page)
97 if (!PageUptodate(page) && len != PAGE_SIZE) {
98 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
100 unlock_page(page);
101 put_page(page);
105 SetPageUptodate(page);
109 /* See if this page is already partially written in a way that we can
113 if (PagePrivate(page)) {
114 priv = page_private(page);
121 if (PageWriteback(page)) {
123 page->index, priv);
135 *_page = page;
140 * flush the page out.
144 ret = write_one_page(page);
148 ret = lock_page_killable(page);
154 put_page(page);
160 * finalise part of a write to a page
164 struct page *page, void *fsdata)
175 vnode->fid.vid, vnode->fid.vnode, page->index);
191 if (!PageUptodate(page)) {
198 len - copied, page);
202 SetPageUptodate(page);
205 if (PagePrivate(page)) {
206 priv = page_private(page);
214 set_page_private(page, priv);
216 page->index, priv);
219 attach_page_private(page, (void *)priv);
221 page->index, priv);
224 set_page_dirty(page);
225 if (PageDirty(page))
230 unlock_page(page);
231 put_page(page);
260 struct page *page = pv.pages[loop];
261 ClearPageUptodate(page);
262 SetPageError(page);
263 end_page_writeback(page);
264 if (page->index >= first)
265 first = page->index + 1;
266 lock_page(page);
267 generic_error_remove_page(mapping, page);
268 unlock_page(page);
303 struct page *page = pv.pages[loop];
305 redirty_page_for_writepage(wbc, page);
306 end_page_writeback(page);
307 if (page->index >= first)
308 first = page->index + 1;
491 * Synchronously write back the locked page and any subsequent non-locked dirty
496 struct page *primary_page,
500 struct page *pages[8], *page;
514 * written regions, stopping when we find a page that is not
552 page = pages[loop];
556 if (page->index > final_page)
558 if (!trylock_page(page))
560 if (!PageDirty(page) || PageWriteback(page)) {
561 unlock_page(page);
565 priv = page_private(page);
570 unlock_page(page);
576 page->index, priv);
578 if (!clear_page_dirty_for_io(page))
580 if (test_set_page_writeback(page))
582 unlock_page(page);
583 put_page(page);
597 * set; the first page is still locked at this point, but all the rest
655 * write a page back to the server
656 * - the caller locked the page for us
658 int afs_writepage(struct page *page, struct writeback_control *wbc)
662 _enter("{%lx},", page->index);
664 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
684 struct page *page;
691 PAGECACHE_TAG_DIRTY, 1, &page);
695 _debug("wback %lx", page->index);
699 * page lock: the page may be truncated or invalidated
700 * (changing page->mapping to NULL), or even swizzled
703 ret = lock_page_killable(page);
705 put_page(page);
710 if (page->mapping != mapping || !PageDirty(page)) {
711 unlock_page(page);
712 put_page(page);
716 if (PageWriteback(page)) {
717 unlock_page(page);
719 wait_on_page_writeback(page);
720 put_page(page);
724 if (!clear_page_dirty_for_io(page))
726 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
727 put_page(page);
833 * notification that a previously read-only page is about to become writable
844 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
848 /* Wait for the page to be written to the cache before we allow it to
849 * be modified. We then assume the entire page will need writing back.
852 fscache_wait_on_page_write(vnode->cache, vmf->page);
855 if (PageWriteback(vmf->page) &&
856 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
859 if (lock_page_killable(vmf->page) < 0)
862 /* We mustn't change page->private until writeback is complete as that
863 * details the portion of the page we need to write back and we might
864 * need to redirty the page if there's a problem.
866 wait_on_page_writeback(vmf->page);
871 vmf->page->index, priv);
872 if (PagePrivate(vmf->page))
873 set_page_private(vmf->page, priv);
875 attach_page_private(vmf->page, (void *)priv);
911 * Clean up a page during invalidation.
913 int afs_launder_page(struct page *page)
915 struct address_space *mapping = page->mapping;
921 _enter("{%lx}", page->index);
923 priv = page_private(page);
924 if (clear_page_dirty_for_io(page)) {
927 if (PagePrivate(page)) {
933 page->index, priv);
934 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
937 priv = (unsigned long)detach_page_private(page);
939 page->index, priv);
942 if (PageFsCache(page)) {
943 fscache_wait_on_page_write(vnode->cache, page);
944 fscache_uncache_page(vnode->cache, page);