1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Data verification functions, i.e. hooks for ->readahead()
4 *
5 * Copyright 2019 Google LLC
6 */
7
8 #include "fsverity_private.h"
9
10 #include <crypto/hash.h>
11 #include <linux/bio.h>
12
13 static struct workqueue_struct *fsverity_read_workqueue;
14
15 /*
16 * Returns true if the hash block with index @hblock_idx in the tree, located in
17 * @hpage, has already been verified.
18 */
is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, unsigned long hblock_idx)19 static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
20 unsigned long hblock_idx)
21 {
22 bool verified;
23 unsigned int blocks_per_page;
24 unsigned int i;
25
26 /*
27 * When the Merkle tree block size and page size are the same, then the
28 * ->hash_block_verified bitmap isn't allocated, and we use PG_checked
29 * to directly indicate whether the page's block has been verified.
30 *
31 * Using PG_checked also guarantees that we re-verify hash pages that
32 * get evicted and re-instantiated from the backing storage, as new
33 * pages always start out with PG_checked cleared.
34 */
35 if (!vi->hash_block_verified)
36 return PageChecked(hpage);
37
38 /*
39 * When the Merkle tree block size and page size differ, we use a bitmap
40 * to indicate whether each hash block has been verified.
41 *
42 * However, we still need to ensure that hash pages that get evicted and
43 * re-instantiated from the backing storage are re-verified. To do
44 * this, we use PG_checked again, but now it doesn't really mean
45 * "checked". Instead, now it just serves as an indicator for whether
46 * the hash page is newly instantiated or not.
47 *
48 * The first thread that sees PG_checked=0 must clear the corresponding
49 * bitmap bits, then set PG_checked=1. This requires a spinlock. To
50 * avoid having to take this spinlock in the common case of
51 * PG_checked=1, we start with an opportunistic lockless read.
52 */
53 if (PageChecked(hpage)) {
54 /*
55 * A read memory barrier is needed here to give ACQUIRE
56 * semantics to the above PageChecked() test.
57 */
58 smp_rmb();
59 return test_bit(hblock_idx, vi->hash_block_verified);
60 }
61 spin_lock(&vi->hash_page_init_lock);
62 if (PageChecked(hpage)) {
63 verified = test_bit(hblock_idx, vi->hash_block_verified);
64 } else {
65 blocks_per_page = vi->tree_params.blocks_per_page;
66 hblock_idx = round_down(hblock_idx, blocks_per_page);
67 for (i = 0; i < blocks_per_page; i++)
68 clear_bit(hblock_idx + i, vi->hash_block_verified);
69 /*
70 * A write memory barrier is needed here to give RELEASE
71 * semantics to the below SetPageChecked() operation.
72 */
73 smp_wmb();
74 SetPageChecked(hpage);
75 verified = false;
76 }
77 spin_unlock(&vi->hash_page_init_lock);
78 return verified;
79 }
80
81 /*
82 * Verify a single data block against the file's Merkle tree.
83 *
84 * In principle, we need to verify the entire path to the root node. However,
85 * for efficiency the filesystem may cache the hash blocks. Therefore we need
86 * only ascend the tree until an already-verified hash block is seen, and then
87 * verify the path to that block.
88 *
89 * Return: %true if the data block is valid, else %false.
90 */
91 static bool
verify_data_block(struct inode *inode, struct fsverity_info *vi, const void *data, u64 data_pos, unsigned long max_ra_pages)92 verify_data_block(struct inode *inode, struct fsverity_info *vi,
93 const void *data, u64 data_pos, unsigned long max_ra_pages)
94 {
95 const struct merkle_tree_params *params = &vi->tree_params;
96 const unsigned int hsize = params->digest_size;
97 int level;
98 u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
99 const u8 *want_hash;
100 u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
101 /* The hash blocks that are traversed, indexed by level */
102 struct {
103 /* Page containing the hash block */
104 struct page *page;
105 /* Mapped address of the hash block (will be within @page) */
106 const void *addr;
107 /* Index of the hash block in the tree overall */
108 unsigned long index;
109 /* Byte offset of the wanted hash relative to @addr */
110 unsigned int hoffset;
111 } hblocks[FS_VERITY_MAX_LEVELS];
112 /*
113 * The index of the previous level's block within that level; also the
114 * index of that block's hash within the current level.
115 */
116 u64 hidx = data_pos >> params->log_blocksize;
117
118 /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */
119 BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX);
120
121 if (unlikely(data_pos >= inode->i_size)) {
122 /*
123 * This can happen in the data page spanning EOF when the Merkle
124 * tree block size is less than the page size. The Merkle tree
125 * doesn't cover data blocks fully past EOF. But the entire
126 * page spanning EOF can be visible to userspace via a mmap, and
127 * any part past EOF should be all zeroes. Therefore, we need
128 * to verify that any data blocks fully past EOF are all zeroes.
129 */
130 if (memchr_inv(data, 0, params->block_size)) {
131 fsverity_err(inode,
132 "FILE CORRUPTED! Data past EOF is not zeroed");
133 return false;
134 }
135 return true;
136 }
137
138 #ifdef CONFIG_SECURITY_CODE_SIGN
139 if (data_pos >= vi->verified_data_size) {
140 pr_debug_ratelimited("Data[%lu] out of verity range %lu\n",
141 data_pos, vi->verified_data_size);
142 return true;
143 }
144 #endif
145 /*
146 * Starting at the leaf level, ascend the tree saving hash blocks along
147 * the way until we find a hash block that has already been verified, or
148 * until we reach the root.
149 */
150 for (level = 0; level < params->num_levels; level++) {
151 unsigned long next_hidx;
152 unsigned long hblock_idx;
153 pgoff_t hpage_idx;
154 unsigned int hblock_offset_in_page;
155 unsigned int hoffset;
156 struct page *hpage;
157 const void *haddr;
158
159 /*
160 * The index of the block in the current level; also the index
161 * of that block's hash within the next level.
162 */
163 next_hidx = hidx >> params->log_arity;
164
165 /* Index of the hash block in the tree overall */
166 hblock_idx = params->level_start[level] + next_hidx;
167
168 /* Index of the hash page in the tree overall */
169 hpage_idx = hblock_idx >> params->log_blocks_per_page;
170
171 /* Byte offset of the hash block within the page */
172 hblock_offset_in_page =
173 (hblock_idx << params->log_blocksize) & ~PAGE_MASK;
174
175 /* Byte offset of the hash within the block */
176 hoffset = (hidx << params->log_digestsize) &
177 (params->block_size - 1);
178
179 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
180 hpage_idx, level == 0 ? min(max_ra_pages,
181 params->tree_pages - hpage_idx) : 0);
182 if (IS_ERR(hpage)) {
183 fsverity_err(inode,
184 "Error %ld reading Merkle tree page %lu",
185 PTR_ERR(hpage), hpage_idx);
186 goto error;
187 }
188 haddr = kmap_local_page(hpage) + hblock_offset_in_page;
189 if (is_hash_block_verified(vi, hpage, hblock_idx)) {
190 memcpy(_want_hash, haddr + hoffset, hsize);
191 want_hash = _want_hash;
192 kunmap_local(haddr);
193 put_page(hpage);
194 goto descend;
195 }
196 hblocks[level].page = hpage;
197 hblocks[level].addr = haddr;
198 hblocks[level].index = hblock_idx;
199 hblocks[level].hoffset = hoffset;
200 hidx = next_hidx;
201 }
202
203 want_hash = vi->root_hash;
204 descend:
205 /* Descend the tree verifying hash blocks. */
206 for (; level > 0; level--) {
207 struct page *hpage = hblocks[level - 1].page;
208 const void *haddr = hblocks[level - 1].addr;
209 unsigned long hblock_idx = hblocks[level - 1].index;
210 unsigned int hoffset = hblocks[level - 1].hoffset;
211
212 if (fsverity_hash_block(params, inode, haddr, real_hash) != 0)
213 goto error;
214 if (memcmp(want_hash, real_hash, hsize) != 0)
215 goto corrupted;
216 /*
217 * Mark the hash block as verified. This must be atomic and
218 * idempotent, as the same hash block might be verified by
219 * multiple threads concurrently.
220 */
221 if (vi->hash_block_verified)
222 set_bit(hblock_idx, vi->hash_block_verified);
223 else
224 SetPageChecked(hpage);
225 memcpy(_want_hash, haddr + hoffset, hsize);
226 want_hash = _want_hash;
227 kunmap_local(haddr);
228 put_page(hpage);
229 }
230
231 /* Finally, verify the data block. */
232 if (fsverity_hash_block(params, inode, data, real_hash) != 0)
233 goto error;
234 if (memcmp(want_hash, real_hash, hsize) != 0)
235 goto corrupted;
236 return true;
237
238 corrupted:
239 fsverity_err(inode,
240 "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
241 data_pos, level - 1,
242 params->hash_alg->name, hsize, want_hash,
243 params->hash_alg->name, hsize, real_hash);
244 error:
245 for (; level > 0; level--) {
246 kunmap_local(hblocks[level - 1].addr);
247 put_page(hblocks[level - 1].page);
248 }
249 return false;
250 }
251
252 static bool
verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, unsigned long max_ra_pages)253 verify_data_blocks(struct folio *data_folio, size_t len, size_t offset,
254 unsigned long max_ra_pages)
255 {
256 struct inode *inode = data_folio->mapping->host;
257 struct fsverity_info *vi = inode->i_verity_info;
258 const unsigned int block_size = vi->tree_params.block_size;
259 u64 pos = (u64)data_folio->index << PAGE_SHIFT;
260
261 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
262 return false;
263 if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
264 folio_test_uptodate(data_folio)))
265 return false;
266 do {
267 void *data;
268 bool valid;
269
270 data = kmap_local_folio(data_folio, offset);
271 valid = verify_data_block(inode, vi, data, pos + offset,
272 max_ra_pages);
273 kunmap_local(data);
274 if (!valid)
275 return false;
276 offset += block_size;
277 len -= block_size;
278 } while (len);
279 return true;
280 }
281
282 /**
283 * fsverity_verify_blocks() - verify data in a folio
284 * @folio: the folio containing the data to verify
285 * @len: the length of the data to verify in the folio
286 * @offset: the offset of the data to verify in the folio
287 *
288 * Verify data that has just been read from a verity file. The data must be
289 * located in a pagecache folio that is still locked and not yet uptodate. The
290 * length and offset of the data must be Merkle tree block size aligned.
291 *
292 * Return: %true if the data is valid, else %false.
293 */
fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)294 bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
295 {
296 return verify_data_blocks(folio, len, offset, 0);
297 }
298 EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
299
300 #ifdef CONFIG_BLOCK
301 /**
302 * fsverity_verify_bio() - verify a 'read' bio that has just completed
303 * @bio: the bio to verify
304 *
305 * Verify the bio's data against the file's Merkle tree. All bio data segments
306 * must be aligned to the file's Merkle tree block size. If any data fails
307 * verification, then bio->bi_status is set to an error status.
308 *
309 * This is a helper function for use by the ->readahead() method of filesystems
310 * that issue bios to read data directly into the page cache. Filesystems that
311 * populate the page cache without issuing bios (e.g. non block-based
312 * filesystems) must instead call fsverity_verify_page() directly on each page.
313 * All filesystems must also call fsverity_verify_page() on holes.
314 */
fsverity_verify_bio(struct bio *bio)315 void fsverity_verify_bio(struct bio *bio)
316 {
317 struct folio_iter fi;
318 unsigned long max_ra_pages = 0;
319
320 if (bio->bi_opf & REQ_RAHEAD) {
321 /*
322 * If this bio is for data readahead, then we also do readahead
323 * of the first (largest) level of the Merkle tree. Namely,
324 * when a Merkle tree page is read, we also try to piggy-back on
325 * some additional pages -- up to 1/4 the number of data pages.
326 *
327 * This improves sequential read performance, as it greatly
328 * reduces the number of I/O requests made to the Merkle tree.
329 */
330 max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
331 }
332
333 bio_for_each_folio_all(fi, bio) {
334 if (!verify_data_blocks(fi.folio, fi.length, fi.offset,
335 max_ra_pages)) {
336 bio->bi_status = BLK_STS_IOERR;
337 break;
338 }
339 }
340 }
341 EXPORT_SYMBOL_GPL(fsverity_verify_bio);
342 #endif /* CONFIG_BLOCK */
343
344 /**
345 * fsverity_get_verified_data_size() - get verified data size of a verity file
346 * @inode: the file's inode
347 *
348 * Return: verified data size
349 */
fsverity_get_verified_data_size(const struct inode *inode)350 u64 fsverity_get_verified_data_size(const struct inode *inode)
351 {
352 #ifdef CONFIG_SECURITY_CODE_SIGN
353 return fsverity_get_info(inode)->verified_data_size;
354 #else
355 return inode->i_size;
356 #endif
357 }
358
359 /**
360 * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
361 * @work: the work to enqueue
362 *
363 * Enqueue verification work for asynchronous processing.
364 */
fsverity_enqueue_verify_work(struct work_struct *work)365 void fsverity_enqueue_verify_work(struct work_struct *work)
366 {
367 queue_work(fsverity_read_workqueue, work);
368 }
369 EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
370
fsverity_init_workqueue(void)371 void __init fsverity_init_workqueue(void)
372 {
373 /*
374 * Use a high-priority workqueue to prioritize verification work, which
375 * blocks reads from completing, over regular application tasks.
376 *
377 * For performance reasons, don't use an unbound workqueue. Using an
378 * unbound workqueue for crypto operations causes excessive scheduler
379 * latency on ARM64.
380 */
381 fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
382 WQ_HIGHPRI,
383 num_online_cpus());
384 if (!fsverity_read_workqueue)
385 panic("failed to allocate fsverity_read_queue");
386 }
387