Lines Matching refs:page

42 /* Extract a hash from a hash page */
43 static void extract_hash(struct page *hpage, unsigned int hoffset,
70 * Verify a single data page against the file's Merkle tree.
74 * only ascend the tree until an already-verified page is seen, as indicated by
75 * the PageChecked bit being set; then verify the path to that page.
81 * Note that multiple processes may race to verify a hash page and mark it
84 * Return: true if the page is valid, else false.
87 struct ahash_request *req, struct page *data_page,
97 struct page *hpages[FS_VERITY_MAX_LEVELS];
104 pr_debug_ratelimited("Verifying data page %lu...\n", index);
115 * the way until we find a verified hash page, indicated by PageChecked;
121 struct page *hpage;
133 "Error %d reading Merkle tree page %lu",
142 pr_debug_ratelimited("Hash page already checked, want %s:%*phN\n",
147 pr_debug_ratelimited("Hash page not yet checked\n");
158 struct page *hpage = hpages[level - 1];
171 pr_debug("Verified hash page at level %d, now want %s:%*phN\n",
175 /* Finally, verify the data page */
188 * fsverity_verify_page() - verify a data page
189 * @page: the page to verity
191 * Verify a page that has just been read from a verity file. The page must be a
192 * pagecache page that is still locked and not yet uptodate.
194 * Return: true if the page is valid, else false.
196 bool fsverity_verify_page(struct page *page)
198 struct inode *inode = page->mapping->host;
206 valid = verify_page(inode, vi, req, page, 0);
225 * that issue bios to read data directly into the page cache. Filesystems that
226 * populate the page cache without issuing bios (e.g. non block-based
227 * filesystems) must instead call fsverity_verify_page() directly on each page.
247 * when a Merkle tree page is read, we also try to piggy-back on
259 struct page *page = bv->bv_page;
260 unsigned long level0_index = page->index >> params->log_arity;
264 if (!PageError(page) &&
265 !verify_page(inode, vi, req, page, level0_ra_pages))
266 SetPageError(page);