xref: /kernel/linux/linux-5.10/fs/ext4/verity.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/ext4/verity.c: fs-verity support for ext4
4 *
5 * Copyright 2019 Google LLC
6 */
7
8/*
9 * Implementation of fsverity_operations for ext4.
10 *
11 * ext4 stores the verity metadata (Merkle tree and fsverity_descriptor) past
12 * the end of the file, starting at the first 64K boundary beyond i_size.  This
13 * approach works because (a) verity files are readonly, and (b) pages fully
14 * beyond i_size aren't visible to userspace but can be read/written internally
15 * by ext4 with only some relatively small changes to ext4.  This approach
16 * avoids having to depend on the EA_INODE feature and on rearchitecturing
17 * ext4's xattr support to support paging multi-gigabyte xattrs into memory, and
18 * to support encrypting xattrs.  Note that the verity metadata *must* be
19 * encrypted when the file is, since it contains hashes of the plaintext data.
20 *
21 * Using a 64K boundary rather than a 4K one keeps things ready for
22 * architectures with 64K pages, and it doesn't necessarily waste space on-disk
23 * since there can be a hole between i_size and the start of the Merkle tree.
24 */
25
26#include <linux/quotaops.h>
27
28#include "ext4.h"
29#include "ext4_extents.h"
30#include "ext4_jbd2.h"
31
32static inline loff_t ext4_verity_metadata_pos(const struct inode *inode)
33{
34	return round_up(inode->i_size, 65536);
35}
36
37/*
38 * Read some verity metadata from the inode.  __vfs_read() can't be used because
39 * we need to read beyond i_size.
40 */
41static int pagecache_read(struct inode *inode, void *buf, size_t count,
42			  loff_t pos)
43{
44	while (count) {
45		size_t n = min_t(size_t, count,
46				 PAGE_SIZE - offset_in_page(pos));
47		struct page *page;
48		void *addr;
49
50		page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
51					 NULL);
52		if (IS_ERR(page))
53			return PTR_ERR(page);
54
55		addr = kmap_atomic(page);
56		memcpy(buf, addr + offset_in_page(pos), n);
57		kunmap_atomic(addr);
58
59		put_page(page);
60
61		buf += n;
62		pos += n;
63		count -= n;
64	}
65	return 0;
66}
67
68/*
69 * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
70 * kernel_write() can't be used because the file descriptor is readonly.
71 */
72static int pagecache_write(struct inode *inode, const void *buf, size_t count,
73			   loff_t pos)
74{
75	if (pos + count > inode->i_sb->s_maxbytes)
76		return -EFBIG;
77
78	while (count) {
79		size_t n = min_t(size_t, count,
80				 PAGE_SIZE - offset_in_page(pos));
81		struct page *page;
82		void *fsdata = NULL;
83		int res;
84
85		res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
86					    &page, &fsdata);
87		if (res)
88			return res;
89
90		memcpy_to_page(page, offset_in_page(pos), buf, n);
91
92		res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
93					  page, fsdata);
94		if (res < 0)
95			return res;
96		if (res != n)
97			return -EIO;
98
99		buf += n;
100		pos += n;
101		count -= n;
102	}
103	return 0;
104}
105
106static int ext4_begin_enable_verity(struct file *filp)
107{
108	struct inode *inode = file_inode(filp);
109	const int credits = 2; /* superblock and inode for ext4_orphan_add() */
110	handle_t *handle;
111	int err;
112
113	if (IS_DAX(inode) || ext4_test_inode_flag(inode, EXT4_INODE_DAX))
114		return -EINVAL;
115
116	if (ext4_verity_in_progress(inode))
117		return -EBUSY;
118
119	/*
120	 * Since the file was opened readonly, we have to initialize the jbd
121	 * inode and quotas here and not rely on ->open() doing it.  This must
122	 * be done before evicting the inline data.
123	 */
124
125	err = ext4_inode_attach_jinode(inode);
126	if (err)
127		return err;
128
129	err = dquot_initialize(inode);
130	if (err)
131		return err;
132
133	err = ext4_convert_inline_data(inode);
134	if (err)
135		return err;
136
137	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
138		ext4_warning_inode(inode,
139				   "verity is only allowed on extent-based files");
140		return -EOPNOTSUPP;
141	}
142
143	/*
144	 * ext4 uses the last allocated block to find the verity descriptor, so
145	 * we must remove any other blocks past EOF which might confuse things.
146	 */
147	err = ext4_truncate(inode);
148	if (err)
149		return err;
150
151	handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
152	if (IS_ERR(handle))
153		return PTR_ERR(handle);
154
155	err = ext4_orphan_add(handle, inode);
156	if (err == 0)
157		ext4_set_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
158
159	ext4_journal_stop(handle);
160	return err;
161}
162
163/*
164 * ext4 stores the verity descriptor beginning on the next filesystem block
165 * boundary after the Merkle tree.  Then, the descriptor size is stored in the
166 * last 4 bytes of the last allocated filesystem block --- which is either the
167 * block in which the descriptor ends, or the next block after that if there
168 * weren't at least 4 bytes remaining.
169 *
170 * We can't simply store the descriptor in an xattr because it *must* be
171 * encrypted when ext4 encryption is used, but ext4 encryption doesn't encrypt
172 * xattrs.  Also, if the descriptor includes a large signature blob it may be
173 * too large to store in an xattr without the EA_INODE feature.
174 */
175static int ext4_write_verity_descriptor(struct inode *inode, const void *desc,
176					size_t desc_size, u64 merkle_tree_size)
177{
178	const u64 desc_pos = round_up(ext4_verity_metadata_pos(inode) +
179				      merkle_tree_size, i_blocksize(inode));
180	const u64 desc_end = desc_pos + desc_size;
181	const __le32 desc_size_disk = cpu_to_le32(desc_size);
182	const u64 desc_size_pos = round_up(desc_end + sizeof(desc_size_disk),
183					   i_blocksize(inode)) -
184				  sizeof(desc_size_disk);
185	int err;
186
187	err = pagecache_write(inode, desc, desc_size, desc_pos);
188	if (err)
189		return err;
190
191	return pagecache_write(inode, &desc_size_disk, sizeof(desc_size_disk),
192			       desc_size_pos);
193}
194
195static int ext4_end_enable_verity(struct file *filp, const void *desc,
196				  size_t desc_size, u64 merkle_tree_size)
197{
198	struct inode *inode = file_inode(filp);
199	const int credits = 2; /* superblock and inode for ext4_orphan_del() */
200	handle_t *handle;
201	struct ext4_iloc iloc;
202	int err = 0;
203
204	/*
205	 * If an error already occurred (which fs/verity/ signals by passing
206	 * desc == NULL), then only clean-up is needed.
207	 */
208	if (desc == NULL)
209		goto cleanup;
210
211	/* Append the verity descriptor. */
212	err = ext4_write_verity_descriptor(inode, desc, desc_size,
213					   merkle_tree_size);
214	if (err)
215		goto cleanup;
216
217	/*
218	 * Write all pages (both data and verity metadata).  Note that this must
219	 * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages
220	 * beyond i_size won't be written properly.  For crash consistency, this
221	 * also must happen before the verity inode flag gets persisted.
222	 */
223	err = filemap_write_and_wait(inode->i_mapping);
224	if (err)
225		goto cleanup;
226
227	/*
228	 * Finally, set the verity inode flag and remove the inode from the
229	 * orphan list (in a single transaction).
230	 */
231
232	handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
233	if (IS_ERR(handle)) {
234		err = PTR_ERR(handle);
235		goto cleanup;
236	}
237
238	err = ext4_orphan_del(handle, inode);
239	if (err)
240		goto stop_and_cleanup;
241
242	err = ext4_reserve_inode_write(handle, inode, &iloc);
243	if (err)
244		goto stop_and_cleanup;
245
246	ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
247	ext4_set_inode_flags(inode, false);
248	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
249	if (err)
250		goto stop_and_cleanup;
251
252	ext4_journal_stop(handle);
253
254	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
255	return 0;
256
257stop_and_cleanup:
258	ext4_journal_stop(handle);
259cleanup:
260	/*
261	 * Verity failed to be enabled, so clean up by truncating any verity
262	 * metadata that was written beyond i_size (both from cache and from
263	 * disk), removing the inode from the orphan list (if it wasn't done
264	 * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.
265	 */
266	truncate_inode_pages(inode->i_mapping, inode->i_size);
267	ext4_truncate(inode);
268	ext4_orphan_del(NULL, inode);
269	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
270	return err;
271}
272
273static int ext4_get_verity_descriptor_location(struct inode *inode,
274					       size_t *desc_size_ret,
275					       u64 *desc_pos_ret)
276{
277	struct ext4_ext_path *path;
278	struct ext4_extent *last_extent;
279	u32 end_lblk;
280	u64 desc_size_pos;
281	__le32 desc_size_disk;
282	u32 desc_size;
283	u64 desc_pos;
284	int err;
285
286	/*
287	 * Descriptor size is in last 4 bytes of last allocated block.
288	 * See ext4_write_verity_descriptor().
289	 */
290
291	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
292		EXT4_ERROR_INODE(inode, "verity file doesn't use extents");
293		return -EFSCORRUPTED;
294	}
295
296	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
297	if (IS_ERR(path))
298		return PTR_ERR(path);
299
300	last_extent = path[path->p_depth].p_ext;
301	if (!last_extent) {
302		EXT4_ERROR_INODE(inode, "verity file has no extents");
303		ext4_ext_drop_refs(path);
304		kfree(path);
305		return -EFSCORRUPTED;
306	}
307
308	end_lblk = le32_to_cpu(last_extent->ee_block) +
309		   ext4_ext_get_actual_len(last_extent);
310	desc_size_pos = (u64)end_lblk << inode->i_blkbits;
311	ext4_ext_drop_refs(path);
312	kfree(path);
313
314	if (desc_size_pos < sizeof(desc_size_disk))
315		goto bad;
316	desc_size_pos -= sizeof(desc_size_disk);
317
318	err = pagecache_read(inode, &desc_size_disk, sizeof(desc_size_disk),
319			     desc_size_pos);
320	if (err)
321		return err;
322	desc_size = le32_to_cpu(desc_size_disk);
323
324	/*
325	 * The descriptor is stored just before the desc_size_disk, but starting
326	 * on a filesystem block boundary.
327	 */
328
329	if (desc_size > INT_MAX || desc_size > desc_size_pos)
330		goto bad;
331
332	desc_pos = round_down(desc_size_pos - desc_size, i_blocksize(inode));
333	if (desc_pos < ext4_verity_metadata_pos(inode))
334		goto bad;
335
336	*desc_size_ret = desc_size;
337	*desc_pos_ret = desc_pos;
338	return 0;
339
340bad:
341	EXT4_ERROR_INODE(inode, "verity file corrupted; can't find descriptor");
342	return -EFSCORRUPTED;
343}
344
345static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
346				      size_t buf_size)
347{
348	size_t desc_size = 0;
349	u64 desc_pos = 0;
350	int err;
351
352	err = ext4_get_verity_descriptor_location(inode, &desc_size, &desc_pos);
353	if (err)
354		return err;
355
356	if (buf_size) {
357		if (desc_size > buf_size)
358			return -ERANGE;
359		err = pagecache_read(inode, buf, desc_size, desc_pos);
360		if (err)
361			return err;
362	}
363	return desc_size;
364}
365
366static struct page *ext4_read_merkle_tree_page(struct inode *inode,
367					       pgoff_t index,
368					       unsigned long num_ra_pages)
369{
370	struct page *page;
371
372	index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
373
374	page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
375	if (!page || !PageUptodate(page)) {
376		DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
377
378		if (page)
379			put_page(page);
380		else if (num_ra_pages > 1)
381			page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
382		page = read_mapping_page(inode->i_mapping, index, NULL);
383	}
384	return page;
385}
386
387static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
388					u64 index, int log_blocksize)
389{
390	loff_t pos = ext4_verity_metadata_pos(inode) + (index << log_blocksize);
391
392	return pagecache_write(inode, buf, 1 << log_blocksize, pos);
393}
394
395const struct fsverity_operations ext4_verityops = {
396	.begin_enable_verity	= ext4_begin_enable_verity,
397	.end_enable_verity	= ext4_end_enable_verity,
398	.get_verity_descriptor	= ext4_get_verity_descriptor,
399	.read_merkle_tree_page	= ext4_read_merkle_tree_page,
400	.write_merkle_tree_block = ext4_write_merkle_tree_block,
401};
402