xref: /kernel/linux/linux-5.10/block/blk-crypto.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6/*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10#define pr_fmt(fmt) "blk-crypto: " fmt
11
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <linux/keyslot-manager.h>
15#include <linux/module.h>
16#include <linux/ratelimit.h>
17#include <linux/slab.h>
18
19#include "blk-crypto-internal.h"
20
21const struct blk_crypto_mode blk_crypto_modes[] = {
22	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
23		.cipher_str = "xts(aes)",
24		.keysize = 64,
25		.ivsize = 16,
26	},
27	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
28		.cipher_str = "essiv(cbc(aes),sha256)",
29		.keysize = 16,
30		.ivsize = 16,
31	},
32	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
33		.cipher_str = "adiantum(xchacha12,aes)",
34		.keysize = 32,
35		.ivsize = 32,
36	},
37};
38
39/*
40 * This number needs to be at least (the number of threads doing IO
41 * concurrently) * (maximum recursive depth of a bio), so that we don't
42 * deadlock on crypt_ctx allocations. The default is chosen to be the same
43 * as the default number of post read contexts in both EXT4 and F2FS.
44 */
45static int num_prealloc_crypt_ctxs = 128;
46
47module_param(num_prealloc_crypt_ctxs, int, 0444);
48MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
49		"Number of bio crypto contexts to preallocate");
50
51static struct kmem_cache *bio_crypt_ctx_cache;
52static mempool_t *bio_crypt_ctx_pool;
53
54static int __init bio_crypt_ctx_init(void)
55{
56	size_t i;
57
58	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
59	if (!bio_crypt_ctx_cache)
60		goto out_no_mem;
61
62	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
63						      bio_crypt_ctx_cache);
64	if (!bio_crypt_ctx_pool)
65		goto out_no_mem;
66
67	/* This is assumed in various places. */
68	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
69
70	/* Sanity check that no algorithm exceeds the defined limits. */
71	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
72		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
73		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
74	}
75
76	return 0;
77out_no_mem:
78	panic("Failed to allocate mem for bio crypt ctxs\n");
79}
80subsys_initcall(bio_crypt_ctx_init);
81
82void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
83		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
84{
85	struct bio_crypt_ctx *bc;
86
87	/*
88	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
89	 * that the mempool_alloc() can't fail.
90	 */
91	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
92
93	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
94
95	bc->bc_key = key;
96	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
97
98	bio->bi_crypt_context = bc;
99}
100
101void __bio_crypt_free_ctx(struct bio *bio)
102{
103	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
104	bio->bi_crypt_context = NULL;
105}
106
107int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
108{
109	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
110	if (!dst->bi_crypt_context)
111		return -ENOMEM;
112	*dst->bi_crypt_context = *src->bi_crypt_context;
113	return 0;
114}
115EXPORT_SYMBOL_GPL(__bio_crypt_clone);
116
117/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
118void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
119			     unsigned int inc)
120{
121	int i;
122
123	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
124		dun[i] += inc;
125		/*
126		 * If the addition in this limb overflowed, then we need to
127		 * carry 1 into the next limb. Else the carry is 0.
128		 */
129		if (dun[i] < inc)
130			inc = 1;
131		else
132			inc = 0;
133	}
134}
135
136void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
137{
138	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
139
140	bio_crypt_dun_increment(bc->bc_dun,
141				bytes >> bc->bc_key->data_unit_size_bits);
142}
143
144/*
145 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
146 * @next_dun, treating the DUNs as multi-limb integers.
147 */
148bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
149				 unsigned int bytes,
150				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
151{
152	int i;
153	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
154
155	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
156		if (bc->bc_dun[i] + carry != next_dun[i])
157			return false;
158		/*
159		 * If the addition in this limb overflowed, then we need to
160		 * carry 1 into the next limb. Else the carry is 0.
161		 */
162		if ((bc->bc_dun[i] + carry) < carry)
163			carry = 1;
164		else
165			carry = 0;
166	}
167
168	/* If the DUN wrapped through 0, don't treat it as contiguous. */
169	return carry == 0;
170}
171
172/*
173 * Checks that two bio crypt contexts are compatible - i.e. that
174 * they are mergeable except for data_unit_num continuity.
175 */
176static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
177				     struct bio_crypt_ctx *bc2)
178{
179	if (!bc1)
180		return !bc2;
181
182	return bc2 && bc1->bc_key == bc2->bc_key;
183}
184
185bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
186{
187	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
188}
189
190/*
191 * Checks that two bio crypt contexts are compatible, and also
192 * that their data_unit_nums are continuous (and can hence be merged)
193 * in the order @bc1 followed by @bc2.
194 */
195bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
196			     struct bio_crypt_ctx *bc2)
197{
198	if (!bio_crypt_ctx_compatible(bc1, bc2))
199		return false;
200
201	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
202}
203
204/* Check that all I/O segments are data unit aligned. */
205static bool bio_crypt_check_alignment(struct bio *bio)
206{
207	const unsigned int data_unit_size =
208		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
209	struct bvec_iter iter;
210	struct bio_vec bv;
211
212	bio_for_each_segment(bv, bio, iter) {
213		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
214			return false;
215	}
216
217	return true;
218}
219
220blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
221{
222	return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
223					&rq->crypt_keyslot);
224}
225
226void __blk_crypto_rq_put_keyslot(struct request *rq)
227{
228	blk_ksm_put_slot(rq->crypt_keyslot);
229	rq->crypt_keyslot = NULL;
230}
231
232void __blk_crypto_free_request(struct request *rq)
233{
234	/* The keyslot, if one was needed, should have been released earlier. */
235	if (WARN_ON_ONCE(rq->crypt_keyslot))
236		__blk_crypto_rq_put_keyslot(rq);
237
238	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
239	rq->crypt_ctx = NULL;
240}
241
242/**
243 * __blk_crypto_bio_prep - Prepare bio for inline encryption
244 *
245 * @bio_ptr: pointer to original bio pointer
246 *
247 * If the bio crypt context provided for the bio is supported by the underlying
248 * device's inline encryption hardware, do nothing.
249 *
250 * Otherwise, try to perform en/decryption for this bio by falling back to the
251 * kernel crypto API. When the crypto API fallback is used for encryption,
252 * blk-crypto may choose to split the bio into 2 - the first one that will
253 * continue to be processed and the second one that will be resubmitted via
254 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
255 * of the aforementioned "first one", and *bio_ptr will be updated to this
256 * bounce bio.
257 *
258 * Caller must ensure bio has bio_crypt_ctx.
259 *
260 * Return: true on success; false on error (and bio->bi_status will be set
261 *	   appropriately, and bio_endio() will have been called so bio
262 *	   submission should abort).
263 */
264bool __blk_crypto_bio_prep(struct bio **bio_ptr)
265{
266	struct bio *bio = *bio_ptr;
267	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
268
269	/* Error if bio has no data. */
270	if (WARN_ON_ONCE(!bio_has_data(bio))) {
271		bio->bi_status = BLK_STS_IOERR;
272		goto fail;
273	}
274
275	if (!bio_crypt_check_alignment(bio)) {
276		bio->bi_status = BLK_STS_IOERR;
277		goto fail;
278	}
279
280	/*
281	 * Success if device supports the encryption context, or if we succeeded
282	 * in falling back to the crypto API.
283	 */
284	if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
285					 &bc_key->crypto_cfg))
286		return true;
287
288	if (blk_crypto_fallback_bio_prep(bio_ptr))
289		return true;
290fail:
291	bio_endio(*bio_ptr);
292	return false;
293}
294
295int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
296			     gfp_t gfp_mask)
297{
298	if (!rq->crypt_ctx) {
299		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
300		if (!rq->crypt_ctx)
301			return -ENOMEM;
302	}
303	*rq->crypt_ctx = *bio->bi_crypt_context;
304	return 0;
305}
306
307/**
308 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
309 * @blk_key: Pointer to the blk_crypto_key to initialize.
310 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
311 *	     @crypto_mode; see blk_crypto_modes[].
312 * @crypto_mode: identifier for the encryption algorithm to use
313 * @dun_bytes: number of bytes that will be used to specify the DUN when this
314 *	       key is used
315 * @data_unit_size: the data unit size to use for en/decryption
316 *
317 * Return: 0 on success, -errno on failure.  The caller is responsible for
318 *	   zeroizing both blk_key and raw_key when done with them.
319 */
320int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
321			enum blk_crypto_mode_num crypto_mode,
322			unsigned int dun_bytes,
323			unsigned int data_unit_size)
324{
325	const struct blk_crypto_mode *mode;
326
327	memset(blk_key, 0, sizeof(*blk_key));
328
329	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
330		return -EINVAL;
331
332	mode = &blk_crypto_modes[crypto_mode];
333	if (mode->keysize == 0)
334		return -EINVAL;
335
336	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
337		return -EINVAL;
338
339	if (!is_power_of_2(data_unit_size))
340		return -EINVAL;
341
342	blk_key->crypto_cfg.crypto_mode = crypto_mode;
343	blk_key->crypto_cfg.dun_bytes = dun_bytes;
344	blk_key->crypto_cfg.data_unit_size = data_unit_size;
345	blk_key->data_unit_size_bits = ilog2(data_unit_size);
346	blk_key->size = mode->keysize;
347	memcpy(blk_key->raw, raw_key, mode->keysize);
348
349	return 0;
350}
351
352/*
353 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
354 * request queue it's submitted to supports inline crypto, or the
355 * blk-crypto-fallback is enabled and supports the cfg).
356 */
357bool blk_crypto_config_supported(struct request_queue *q,
358				 const struct blk_crypto_config *cfg)
359{
360	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
361	       blk_ksm_crypto_cfg_supported(q->ksm, cfg);
362}
363
364/**
365 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
366 * @key: A key to use on the device
367 * @q: the request queue for the device
368 *
369 * Upper layers must call this function to ensure that either the hardware
370 * supports the key's crypto settings, or the crypto API fallback has transforms
371 * for the needed mode allocated and ready to go. This function may allocate
372 * an skcipher, and *should not* be called from the data path, since that might
373 * cause a deadlock
374 *
375 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
376 *	   blk-crypto-fallback is either disabled or the needed algorithm
377 *	   is disabled in the crypto API; or another -errno code.
378 */
379int blk_crypto_start_using_key(const struct blk_crypto_key *key,
380			       struct request_queue *q)
381{
382	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
383		return 0;
384	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
385}
386
387/**
388 * blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue
389 * @q: a request_queue on which I/O using the key may have been done
390 * @key: the key to evict
391 *
392 * For a given request_queue, this function removes the given blk_crypto_key
393 * from the keyslot management structures and evicts it from any underlying
394 * hardware keyslot(s) or blk-crypto-fallback keyslot it may have been
395 * programmed into.
396 *
397 * Upper layers must call this before freeing the blk_crypto_key.  It must be
398 * called for every request_queue the key may have been used on.  The key must
399 * no longer be in use by any I/O when this function is called.
400 *
401 * Context: May sleep.
402 */
403void blk_crypto_evict_key(struct request_queue *q,
404			  const struct blk_crypto_key *key)
405{
406	int err;
407
408	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
409		err = blk_ksm_evict_key(q->ksm, key);
410	else
411		err = blk_crypto_fallback_evict_key(key);
412	/*
413	 * An error can only occur here if the key failed to be evicted from a
414	 * keyslot (due to a hardware or driver issue) or is allegedly still in
415	 * use by I/O (due to a kernel bug).  Even in these cases, the key is
416	 * still unlinked from the keyslot management structures, and the caller
417	 * is allowed and expected to free it right away.  There's nothing
418	 * callers can do to handle errors, so just log them and return void.
419	 */
420	if (err)
421		pr_warn_ratelimited("error %d evicting key\n", err);
422}
423