Lines Matching defs:page
78 /* We don't use the page cache to create symlink data, so if
219 int ocfs2_read_inline_data(struct inode *inode, struct page *page,
243 kaddr = kmap_atomic(page);
246 /* Clear the remaining part of the page */
248 flush_dcache_page(page);
251 SetPageUptodate(page);
256 static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
261 BUG_ON(!PageLocked(page));
270 ret = ocfs2_read_inline_data(inode, page, di_bh);
272 unlock_page(page);
287 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
326 ret = ocfs2_readpage_inline(inode, &folio->page);
357 * Use the nonblocking flag for the dlm code to avoid page
394 * ->writepage is called during the process of invalidating the page cache
400 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
403 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
404 page->index);
406 return block_write_full_page(page, ocfs2_get_block, wbc);
534 * 'from' and 'to' are the region in the page to avoid zeroing.
541 static void ocfs2_clear_page_regions(struct page *page,
550 kaddr = kmap_atomic(page);
571 static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
574 u64 offset = page_offset(page) + block_start;
592 int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
601 if (!page_has_buffers(page))
602 create_empty_buffers(page, bsize, 0);
604 head = page_buffers(page);
616 if (PageUptodate(page))
622 * For an allocating write with cluster size >= page
623 * size, we always write the entire page.
633 if (PageUptodate(page)) {
637 ocfs2_should_read_blk(inode, page, block_start) &&
671 zero_user(page, block_start, bh->b_size);
737 * w_target_page is the page being written to by the user.
747 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
748 struct page *w_target_page;
777 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
796 * The intent is to allow us to lock the target page from write_begin()
871 * If a page has any new buffers, zero them out here, and mark them uptodate
875 static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
880 BUG_ON(!PageLocked(page));
881 if (!page_has_buffers(page))
884 bh = head = page_buffers(page);
891 if (!PageUptodate(page)) {
897 zero_user_segment(page, start, end);
922 struct page *tmppage;
942 struct page *page, u32 cpos,
955 * the page boundary.
957 new = new | ((i_size_read(inode) <= page_offset(page)) &&
958 (page_offset(page) <= user_pos));
960 if (page == wc->w_target_page) {
965 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
969 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
984 * If we haven't allocated the new page yet, we
993 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1006 * range inside of a page needs to be written.
1008 * We can skip this if the page is up to date - it's already
1011 if (new && !PageUptodate(page))
1012 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1015 flush_dcache_page(page);
1028 struct page *mmap_page)
1040 * page. Otherwise, we'll need a whole clusters worth. If we're
1042 * last page of the write.
1048 * We need the index *past* the last page we could possibly
1049 * touch. This is the page past the end of the write or
1070 * and wants us to directly use the page
1088 /* Direct write has no mapping page. */
1133 * This is safe to call with the page locks - it won't take
1190 /* This is the direct io target page. */
1263 * ocfs2_write_end() wants to know which parts of the target page it
1281 * on page size and cluster size.
1478 struct page *page;
1489 page = find_or_create_page(mapping, 0, GFP_NOFS);
1490 if (!page) {
1497 * If we don't set w_num_pages then this page won't get unlocked
1500 wc->w_pages[0] = wc->w_target_page = page;
1515 if (!PageUptodate(page)) {
1516 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1540 unsigned len, struct page *mmap_page,
1648 struct page **pagep, void **fsdata,
1649 struct buffer_head *di_bh, struct page *mmap_page)
1729 * ocfs2_write_end() knows which range in the target page to
1795 * Fill our page array first. That way we've grabbed enough so
1804 * the target page. In this case, we exit with no error and no target
1805 * page. This will trigger the caller, page_mkwrite(), to re-try
1844 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1846 * to unlock the target page manually to prevent deadlocks when
1884 struct page **pagep, void **fsdata)
1960 struct page *tmppage;
1991 * When page is fully beyond new isize (data copy
1992 * failed), do not bother zeroing the page. Invalidate
1994 * put page & buffer dirty bits into inconsistent
2007 /* This is the direct io target page. */
2063 * this lock and will ask for the page lock when flushing the data.
2081 struct page *page, void *fsdata)