Lines Matching defs:page
66 * sharing, avoids the page cache, and synchronously waits for an
93 struct page *pages[ITER_GET_BVECS_PAGES];
125 * page.
721 /* Async create can't handle more than a page of xattrs */
884 * flush any page cache pages in this range. this
897 struct page **pages;
1300 * throw out any page cache pages in this range. this
1424 struct page **pages;
1470 * write from beginning of first page,
1550 struct page *pinned_page = NULL;
1624 struct page *page = NULL;
1627 page = __page_cache_alloc(GFP_KERNEL);
1628 if (!page)
1632 statret = __ceph_do_getattr(inode, page,
1633 CEPH_STAT_CAP_INLINE_DATA, !!page);
1635 if (page)
1636 __free_page(page);
1653 zero_user_segment(page, statret, end);
1654 ret = copy_page_to_iter(page,
1667 __free_pages(page, 0);
1733 /* We can write back this queue in page reclaim */
1955 struct page *page;
1958 page = find_lock_page(inode->i_mapping, index);
1959 if (page) {
1960 wait_on_page_writeback(page);
1961 zero_user(page, offset & (PAGE_SIZE - 1), size);
1962 unlock_page(page);
1963 put_page(page);