Lines Matching refs:pages
37 * to one zisofs block. Store the data in the @pages array with @pcount
42 struct page **pages, unsigned poffset,
68 if (!pages[i])
70 memzero_page(pages[i], 0, PAGE_SIZE);
71 SetPageUptodate(pages[i]);
121 if (pages[curpage]) {
122 stream.next_out = kmap_local_page(pages[curpage])
174 if (pages[curpage]) {
175 flush_dcache_page(pages[curpage]);
176 SetPageUptodate(pages[curpage]);
203 * Uncompress data so that pages[full_page] is fully uptodate and possibly
204 * fills in other pages if we have data for them.
207 struct page **pages)
222 BUG_ON(!pages[full_page]);
227 * pages with the data we have anyway...
229 start_off = page_offset(pages[full_page]);
268 pcount, pages, poffset, &err);
270 pages += poffset >> PAGE_SHIFT;
290 if (poffset && *pages) {
291 memzero_page(*pages, poffset, PAGE_SIZE - poffset);
292 SetPageUptodate(*pages);
299 * per reference. We inject the additional pages into the page
313 struct page **pages;
338 pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
339 sizeof(*pages), GFP_KERNEL);
340 if (!pages) {
344 pages[full_page] = page;
348 pages[i] = grab_cache_page_nowait(mapping, index);
349 if (pages[i])
350 ClearPageError(pages[i]);
353 err = zisofs_fill_pages(inode, full_page, pcount, pages);
355 /* Release any residual pages, do not SetPageUptodate */
357 if (pages[i]) {
358 flush_dcache_page(pages[i]);
360 SetPageError(pages[i]);
361 unlock_page(pages[i]);
363 put_page(pages[i]);
368 kfree(pages);