Lines Matching refs:page
84 * zero_partial_compressed_page - zero out of bounds compressed page region
86 static void zero_partial_compressed_page(struct page *page,
89 u8 *kp = page_address(page);
92 ntfs_debug("Zeroing page region outside initialized size.");
93 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
103 * handle_bounds_compressed_page - test for&handle out of bounds compressed page
105 static inline void handle_bounds_compressed_page(struct page *page,
108 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
110 zero_partial_compressed_page(page, initialized_size);
122 * @xpage: the target page (-1 if none) (IN)
134 * and at offset @dest_pos into the page @dest_pages[@dest_index].
136 * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
143 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
152 static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
168 struct page *dp; /* Current destination page being worked on. */
213 * the out of bounds page range.
250 /* Get the current destination page. */
253 /* No page present. Skip decompression of this sub-block. */
263 /* We have a valid destination page. Setup the destination pointers. */
287 * First stage: add current page index to array of
428 * ntfs_read_compressed_block - read a compressed block into the page cache
429 * @page: locked page in the compression block(s) we need to read
431 * When we are called the page has already been verified to be locked and the
434 * 1. Determine which compression block(s) @page is in.
447 * of bounds remainder of the page in question and mark it as handled. At the
448 * moment we would just return -EIO on such a page. This bug will only become
462 int ntfs_read_compressed_block(struct page *page)
466 struct address_space *mapping = page->mapping;
475 unsigned long offset, index = page->index;
494 * compression blocks (cbs) overlapping @page. Due to alignment
502 struct page **pages;
506 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
515 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
526 unlock_page(page);
532 * We have already been given one page, this is the one we must do.
537 pages[xpage] = page;
539 * The remaining pages need to be allocated and inserted into the page
548 /* Is the page fully outside i_size? (truncate in progress) */
553 zero_user(page, 0, PAGE_SIZE);
555 SetPageUptodate(page);
556 unlock_page(page);
564 page = pages[i];
565 if (page) {
567 * We only (re)read the page if it isn't already read
571 if (!PageDirty(page) && (!PageUptodate(page) ||
572 PageError(page))) {
573 ClearPageError(page);
574 kmap(page);
577 unlock_page(page);
578 put_page(page);
721 /* The last page and maximum offset within it for the current cb. */
731 /* Sparse cb, zero out page range overlapping the cb. */
738 page = pages[cur_page];
739 if (page) {
741 clear_page(page_address(page));
743 memset(page_address(page) + cur_ofs, 0,
746 flush_dcache_page(page);
747 kunmap(page);
748 SetPageUptodate(page);
749 unlock_page(page);
753 put_page(page);
761 /* If we have a partial final page, deal with it now. */
763 page = pages[cur_page];
764 if (page)
765 memset(page_address(page) + cur_ofs, 0,
795 page = pages[cur_page];
796 if (page)
797 memcpy(page_address(page) + cur_ofs, cb_pos,
804 /* If we have a partial final page, deal with it now. */
806 page = pages[cur_page];
807 if (page)
808 memcpy(page_address(page) + cur_ofs, cb_pos,
817 page = pages[cur2_page];
818 if (page) {
821 * the out of bounds page range.
823 handle_bounds_compressed_page(page, i_size,
825 flush_dcache_page(page);
826 kunmap(page);
827 SetPageUptodate(page);
828 unlock_page(page);
832 put_page(page);
841 /* Compressed cb, decompress it into the destination page(s). */
860 page = pages[prev_cur_page];
861 if (page) {
862 flush_dcache_page(page);
863 kunmap(page);
864 unlock_page(page);
866 put_page(page);
886 page = pages[cur_page];
887 if (page) {
890 "prejudice. Inode 0x%lx, page index "
891 "0x%lx.", ni->mft_no, page->index);
892 flush_dcache_page(page);
893 kunmap(page);
894 unlock_page(page);
896 put_page(page);
905 /* If we have completed the requested page, we return success. */
938 page = pages[i];
939 if (page) {
940 flush_dcache_page(page);
941 kunmap(page);
942 unlock_page(page);
944 put_page(page);