1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm.
6 *
7 * s390 Version:
8 *   Copyright IBM Corp. 2005, 2017
9 *   Author(s): Jan Glauber (jang@de.ibm.com)
10 *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11 *		Patrick Steuer <patrick.steuer@de.ibm.com>
12 *		Harald Freudenberger <freude@de.ibm.com>
13 *
14 * Derived from "crypto/aes_generic.c"
15 */
16
17#define KMSG_COMPONENT "aes_s390"
18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20#include <crypto/aes.h>
21#include <crypto/algapi.h>
22#include <crypto/ghash.h>
23#include <crypto/internal/aead.h>
24#include <crypto/internal/skcipher.h>
25#include <crypto/scatterwalk.h>
26#include <linux/err.h>
27#include <linux/module.h>
28#include <linux/cpufeature.h>
29#include <linux/init.h>
30#include <linux/mutex.h>
31#include <linux/fips.h>
32#include <linux/string.h>
33#include <crypto/xts.h>
34#include <asm/cpacf.h>
35
36static u8 *ctrblk;
37static DEFINE_MUTEX(ctrblk_lock);
38
39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
40		    kma_functions;
41
42struct s390_aes_ctx {
43	u8 key[AES_MAX_KEY_SIZE];
44	int key_len;
45	unsigned long fc;
46	union {
47		struct crypto_skcipher *skcipher;
48		struct crypto_cipher *cip;
49	} fallback;
50};
51
52struct s390_xts_ctx {
53	u8 key[32];
54	u8 pcc_key[32];
55	int key_len;
56	unsigned long fc;
57	struct crypto_skcipher *fallback;
58};
59
60struct gcm_sg_walk {
61	struct scatter_walk walk;
62	unsigned int walk_bytes;
63	u8 *walk_ptr;
64	unsigned int walk_bytes_remain;
65	u8 buf[AES_BLOCK_SIZE];
66	unsigned int buf_bytes;
67	u8 *ptr;
68	unsigned int nbytes;
69};
70
71static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
72		unsigned int key_len)
73{
74	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
75
76	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
77	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
78			CRYPTO_TFM_REQ_MASK);
79
80	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
81}
82
83static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
84		       unsigned int key_len)
85{
86	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
87	unsigned long fc;
88
89	/* Pick the correct function code based on the key length */
90	fc = (key_len == 16) ? CPACF_KM_AES_128 :
91	     (key_len == 24) ? CPACF_KM_AES_192 :
92	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
93
94	/* Check if the function code is available */
95	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
96	if (!sctx->fc)
97		return setkey_fallback_cip(tfm, in_key, key_len);
98
99	sctx->key_len = key_len;
100	memcpy(sctx->key, in_key, key_len);
101	return 0;
102}
103
104static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
105{
106	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
107
108	if (unlikely(!sctx->fc)) {
109		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
110		return;
111	}
112	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
113}
114
115static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
116{
117	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
118
119	if (unlikely(!sctx->fc)) {
120		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
121		return;
122	}
123	cpacf_km(sctx->fc | CPACF_DECRYPT,
124		 &sctx->key, out, in, AES_BLOCK_SIZE);
125}
126
127static int fallback_init_cip(struct crypto_tfm *tfm)
128{
129	const char *name = tfm->__crt_alg->cra_name;
130	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
131
132	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
133						 CRYPTO_ALG_NEED_FALLBACK);
134
135	if (IS_ERR(sctx->fallback.cip)) {
136		pr_err("Allocating AES fallback algorithm %s failed\n",
137		       name);
138		return PTR_ERR(sctx->fallback.cip);
139	}
140
141	return 0;
142}
143
144static void fallback_exit_cip(struct crypto_tfm *tfm)
145{
146	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
147
148	crypto_free_cipher(sctx->fallback.cip);
149	sctx->fallback.cip = NULL;
150}
151
152static struct crypto_alg aes_alg = {
153	.cra_name		=	"aes",
154	.cra_driver_name	=	"aes-s390",
155	.cra_priority		=	300,
156	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
157					CRYPTO_ALG_NEED_FALLBACK,
158	.cra_blocksize		=	AES_BLOCK_SIZE,
159	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
160	.cra_module		=	THIS_MODULE,
161	.cra_init               =       fallback_init_cip,
162	.cra_exit               =       fallback_exit_cip,
163	.cra_u			=	{
164		.cipher = {
165			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
166			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
167			.cia_setkey		=	aes_set_key,
168			.cia_encrypt		=	crypto_aes_encrypt,
169			.cia_decrypt		=	crypto_aes_decrypt,
170		}
171	}
172};
173
174static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
175				    unsigned int len)
176{
177	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
178
179	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
180				    CRYPTO_TFM_REQ_MASK);
181	crypto_skcipher_set_flags(sctx->fallback.skcipher,
182				  crypto_skcipher_get_flags(tfm) &
183				  CRYPTO_TFM_REQ_MASK);
184	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
185}
186
187static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
188				   struct skcipher_request *req,
189				   unsigned long modifier)
190{
191	struct skcipher_request *subreq = skcipher_request_ctx(req);
192
193	*subreq = *req;
194	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
195	return (modifier & CPACF_DECRYPT) ?
196		crypto_skcipher_decrypt(subreq) :
197		crypto_skcipher_encrypt(subreq);
198}
199
200static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
201			   unsigned int key_len)
202{
203	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
204	unsigned long fc;
205
206	/* Pick the correct function code based on the key length */
207	fc = (key_len == 16) ? CPACF_KM_AES_128 :
208	     (key_len == 24) ? CPACF_KM_AES_192 :
209	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
210
211	/* Check if the function code is available */
212	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
213	if (!sctx->fc)
214		return setkey_fallback_skcipher(tfm, in_key, key_len);
215
216	sctx->key_len = key_len;
217	memcpy(sctx->key, in_key, key_len);
218	return 0;
219}
220
221static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
222{
223	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
224	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
225	struct skcipher_walk walk;
226	unsigned int nbytes, n;
227	int ret;
228
229	if (unlikely(!sctx->fc))
230		return fallback_skcipher_crypt(sctx, req, modifier);
231
232	ret = skcipher_walk_virt(&walk, req, false);
233	while ((nbytes = walk.nbytes) != 0) {
234		/* only use complete blocks */
235		n = nbytes & ~(AES_BLOCK_SIZE - 1);
236		cpacf_km(sctx->fc | modifier, sctx->key,
237			 walk.dst.virt.addr, walk.src.virt.addr, n);
238		ret = skcipher_walk_done(&walk, nbytes - n);
239	}
240	return ret;
241}
242
243static int ecb_aes_encrypt(struct skcipher_request *req)
244{
245	return ecb_aes_crypt(req, 0);
246}
247
248static int ecb_aes_decrypt(struct skcipher_request *req)
249{
250	return ecb_aes_crypt(req, CPACF_DECRYPT);
251}
252
253static int fallback_init_skcipher(struct crypto_skcipher *tfm)
254{
255	const char *name = crypto_tfm_alg_name(&tfm->base);
256	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
257
258	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
259				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
260
261	if (IS_ERR(sctx->fallback.skcipher)) {
262		pr_err("Allocating AES fallback algorithm %s failed\n",
263		       name);
264		return PTR_ERR(sctx->fallback.skcipher);
265	}
266
267	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
268				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
269	return 0;
270}
271
272static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
273{
274	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
275
276	crypto_free_skcipher(sctx->fallback.skcipher);
277}
278
279static struct skcipher_alg ecb_aes_alg = {
280	.base.cra_name		=	"ecb(aes)",
281	.base.cra_driver_name	=	"ecb-aes-s390",
282	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
283	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
284	.base.cra_blocksize	=	AES_BLOCK_SIZE,
285	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
286	.base.cra_module	=	THIS_MODULE,
287	.init			=	fallback_init_skcipher,
288	.exit			=	fallback_exit_skcipher,
289	.min_keysize		=	AES_MIN_KEY_SIZE,
290	.max_keysize		=	AES_MAX_KEY_SIZE,
291	.setkey			=	ecb_aes_set_key,
292	.encrypt		=	ecb_aes_encrypt,
293	.decrypt		=	ecb_aes_decrypt,
294};
295
296static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
297			   unsigned int key_len)
298{
299	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
300	unsigned long fc;
301
302	/* Pick the correct function code based on the key length */
303	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
304	     (key_len == 24) ? CPACF_KMC_AES_192 :
305	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
306
307	/* Check if the function code is available */
308	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
309	if (!sctx->fc)
310		return setkey_fallback_skcipher(tfm, in_key, key_len);
311
312	sctx->key_len = key_len;
313	memcpy(sctx->key, in_key, key_len);
314	return 0;
315}
316
317static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
318{
319	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
320	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
321	struct skcipher_walk walk;
322	unsigned int nbytes, n;
323	int ret;
324	struct {
325		u8 iv[AES_BLOCK_SIZE];
326		u8 key[AES_MAX_KEY_SIZE];
327	} param;
328
329	if (unlikely(!sctx->fc))
330		return fallback_skcipher_crypt(sctx, req, modifier);
331
332	ret = skcipher_walk_virt(&walk, req, false);
333	if (ret)
334		return ret;
335	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
336	memcpy(param.key, sctx->key, sctx->key_len);
337	while ((nbytes = walk.nbytes) != 0) {
338		/* only use complete blocks */
339		n = nbytes & ~(AES_BLOCK_SIZE - 1);
340		cpacf_kmc(sctx->fc | modifier, &param,
341			  walk.dst.virt.addr, walk.src.virt.addr, n);
342		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
343		ret = skcipher_walk_done(&walk, nbytes - n);
344	}
345	memzero_explicit(&param, sizeof(param));
346	return ret;
347}
348
349static int cbc_aes_encrypt(struct skcipher_request *req)
350{
351	return cbc_aes_crypt(req, 0);
352}
353
354static int cbc_aes_decrypt(struct skcipher_request *req)
355{
356	return cbc_aes_crypt(req, CPACF_DECRYPT);
357}
358
359static struct skcipher_alg cbc_aes_alg = {
360	.base.cra_name		=	"cbc(aes)",
361	.base.cra_driver_name	=	"cbc-aes-s390",
362	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
363	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
364	.base.cra_blocksize	=	AES_BLOCK_SIZE,
365	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
366	.base.cra_module	=	THIS_MODULE,
367	.init			=	fallback_init_skcipher,
368	.exit			=	fallback_exit_skcipher,
369	.min_keysize		=	AES_MIN_KEY_SIZE,
370	.max_keysize		=	AES_MAX_KEY_SIZE,
371	.ivsize			=	AES_BLOCK_SIZE,
372	.setkey			=	cbc_aes_set_key,
373	.encrypt		=	cbc_aes_encrypt,
374	.decrypt		=	cbc_aes_decrypt,
375};
376
377static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
378			       unsigned int len)
379{
380	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
381
382	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
383	crypto_skcipher_set_flags(xts_ctx->fallback,
384				  crypto_skcipher_get_flags(tfm) &
385				  CRYPTO_TFM_REQ_MASK);
386	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
387}
388
389static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
390			   unsigned int key_len)
391{
392	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
393	unsigned long fc;
394	int err;
395
396	err = xts_fallback_setkey(tfm, in_key, key_len);
397	if (err)
398		return err;
399
400	/* In fips mode only 128 bit or 256 bit keys are valid */
401	if (fips_enabled && key_len != 32 && key_len != 64)
402		return -EINVAL;
403
404	/* Pick the correct function code based on the key length */
405	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
406	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
407
408	/* Check if the function code is available */
409	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
410	if (!xts_ctx->fc)
411		return 0;
412
413	/* Split the XTS key into the two subkeys */
414	key_len = key_len / 2;
415	xts_ctx->key_len = key_len;
416	memcpy(xts_ctx->key, in_key, key_len);
417	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
418	return 0;
419}
420
421static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
422{
423	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
424	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
425	struct skcipher_walk walk;
426	unsigned int offset, nbytes, n;
427	int ret;
428	struct {
429		u8 key[32];
430		u8 tweak[16];
431		u8 block[16];
432		u8 bit[16];
433		u8 xts[16];
434	} pcc_param;
435	struct {
436		u8 key[32];
437		u8 init[16];
438	} xts_param;
439
440	if (req->cryptlen < AES_BLOCK_SIZE)
441		return -EINVAL;
442
443	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
444		struct skcipher_request *subreq = skcipher_request_ctx(req);
445
446		*subreq = *req;
447		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
448		return (modifier & CPACF_DECRYPT) ?
449			crypto_skcipher_decrypt(subreq) :
450			crypto_skcipher_encrypt(subreq);
451	}
452
453	ret = skcipher_walk_virt(&walk, req, false);
454	if (ret)
455		return ret;
456	offset = xts_ctx->key_len & 0x10;
457	memset(pcc_param.block, 0, sizeof(pcc_param.block));
458	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
459	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
460	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
461	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
462	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
463
464	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
465	memcpy(xts_param.init, pcc_param.xts, 16);
466
467	while ((nbytes = walk.nbytes) != 0) {
468		/* only use complete blocks */
469		n = nbytes & ~(AES_BLOCK_SIZE - 1);
470		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
471			 walk.dst.virt.addr, walk.src.virt.addr, n);
472		ret = skcipher_walk_done(&walk, nbytes - n);
473	}
474	memzero_explicit(&pcc_param, sizeof(pcc_param));
475	memzero_explicit(&xts_param, sizeof(xts_param));
476	return ret;
477}
478
479static int xts_aes_encrypt(struct skcipher_request *req)
480{
481	return xts_aes_crypt(req, 0);
482}
483
484static int xts_aes_decrypt(struct skcipher_request *req)
485{
486	return xts_aes_crypt(req, CPACF_DECRYPT);
487}
488
489static int xts_fallback_init(struct crypto_skcipher *tfm)
490{
491	const char *name = crypto_tfm_alg_name(&tfm->base);
492	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
493
494	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
495				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
496
497	if (IS_ERR(xts_ctx->fallback)) {
498		pr_err("Allocating XTS fallback algorithm %s failed\n",
499		       name);
500		return PTR_ERR(xts_ctx->fallback);
501	}
502	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
503				    crypto_skcipher_reqsize(xts_ctx->fallback));
504	return 0;
505}
506
507static void xts_fallback_exit(struct crypto_skcipher *tfm)
508{
509	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
510
511	crypto_free_skcipher(xts_ctx->fallback);
512}
513
514static struct skcipher_alg xts_aes_alg = {
515	.base.cra_name		=	"xts(aes)",
516	.base.cra_driver_name	=	"xts-aes-s390",
517	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
518	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
519	.base.cra_blocksize	=	AES_BLOCK_SIZE,
520	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
521	.base.cra_module	=	THIS_MODULE,
522	.init			=	xts_fallback_init,
523	.exit			=	xts_fallback_exit,
524	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
525	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
526	.ivsize			=	AES_BLOCK_SIZE,
527	.setkey			=	xts_aes_set_key,
528	.encrypt		=	xts_aes_encrypt,
529	.decrypt		=	xts_aes_decrypt,
530};
531
532static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
533			   unsigned int key_len)
534{
535	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
536	unsigned long fc;
537
538	/* Pick the correct function code based on the key length */
539	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
540	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
541	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
542
543	/* Check if the function code is available */
544	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
545	if (!sctx->fc)
546		return setkey_fallback_skcipher(tfm, in_key, key_len);
547
548	sctx->key_len = key_len;
549	memcpy(sctx->key, in_key, key_len);
550	return 0;
551}
552
553static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
554{
555	unsigned int i, n;
556
557	/* only use complete blocks, max. PAGE_SIZE */
558	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
559	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
560	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
561		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
562		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
563		ctrptr += AES_BLOCK_SIZE;
564	}
565	return n;
566}
567
568static int ctr_aes_crypt(struct skcipher_request *req)
569{
570	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
571	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
572	u8 buf[AES_BLOCK_SIZE], *ctrptr;
573	struct skcipher_walk walk;
574	unsigned int n, nbytes;
575	int ret, locked;
576
577	if (unlikely(!sctx->fc))
578		return fallback_skcipher_crypt(sctx, req, 0);
579
580	locked = mutex_trylock(&ctrblk_lock);
581
582	ret = skcipher_walk_virt(&walk, req, false);
583	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
584		n = AES_BLOCK_SIZE;
585
586		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
587			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
588		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
589		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
590			    walk.src.virt.addr, n, ctrptr);
591		if (ctrptr == ctrblk)
592			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
593			       AES_BLOCK_SIZE);
594		crypto_inc(walk.iv, AES_BLOCK_SIZE);
595		ret = skcipher_walk_done(&walk, nbytes - n);
596	}
597	if (locked)
598		mutex_unlock(&ctrblk_lock);
599	/*
600	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
601	 */
602	if (nbytes) {
603		memset(buf, 0, AES_BLOCK_SIZE);
604		memcpy(buf, walk.src.virt.addr, nbytes);
605		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
606			    AES_BLOCK_SIZE, walk.iv);
607		memcpy(walk.dst.virt.addr, buf, nbytes);
608		crypto_inc(walk.iv, AES_BLOCK_SIZE);
609		ret = skcipher_walk_done(&walk, 0);
610	}
611
612	return ret;
613}
614
615static struct skcipher_alg ctr_aes_alg = {
616	.base.cra_name		=	"ctr(aes)",
617	.base.cra_driver_name	=	"ctr-aes-s390",
618	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
619	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
620	.base.cra_blocksize	=	1,
621	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
622	.base.cra_module	=	THIS_MODULE,
623	.init			=	fallback_init_skcipher,
624	.exit			=	fallback_exit_skcipher,
625	.min_keysize		=	AES_MIN_KEY_SIZE,
626	.max_keysize		=	AES_MAX_KEY_SIZE,
627	.ivsize			=	AES_BLOCK_SIZE,
628	.setkey			=	ctr_aes_set_key,
629	.encrypt		=	ctr_aes_crypt,
630	.decrypt		=	ctr_aes_crypt,
631	.chunksize		=	AES_BLOCK_SIZE,
632};
633
634static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
635			  unsigned int keylen)
636{
637	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
638
639	switch (keylen) {
640	case AES_KEYSIZE_128:
641		ctx->fc = CPACF_KMA_GCM_AES_128;
642		break;
643	case AES_KEYSIZE_192:
644		ctx->fc = CPACF_KMA_GCM_AES_192;
645		break;
646	case AES_KEYSIZE_256:
647		ctx->fc = CPACF_KMA_GCM_AES_256;
648		break;
649	default:
650		return -EINVAL;
651	}
652
653	memcpy(ctx->key, key, keylen);
654	ctx->key_len = keylen;
655	return 0;
656}
657
658static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
659{
660	switch (authsize) {
661	case 4:
662	case 8:
663	case 12:
664	case 13:
665	case 14:
666	case 15:
667	case 16:
668		break;
669	default:
670		return -EINVAL;
671	}
672
673	return 0;
674}
675
676static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
677			   unsigned int len)
678{
679	memset(gw, 0, sizeof(*gw));
680	gw->walk_bytes_remain = len;
681	scatterwalk_start(&gw->walk, sg);
682}
683
684static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
685{
686	struct scatterlist *nextsg;
687
688	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
689	while (!gw->walk_bytes) {
690		nextsg = sg_next(gw->walk.sg);
691		if (!nextsg)
692			return 0;
693		scatterwalk_start(&gw->walk, nextsg);
694		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
695						   gw->walk_bytes_remain);
696	}
697	gw->walk_ptr = scatterwalk_map(&gw->walk);
698	return gw->walk_bytes;
699}
700
701static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
702					     unsigned int nbytes)
703{
704	gw->walk_bytes_remain -= nbytes;
705	scatterwalk_unmap(gw->walk_ptr);
706	scatterwalk_advance(&gw->walk, nbytes);
707	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
708	gw->walk_ptr = NULL;
709}
710
711static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
712{
713	int n;
714
715	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
716		gw->ptr = gw->buf;
717		gw->nbytes = gw->buf_bytes;
718		goto out;
719	}
720
721	if (gw->walk_bytes_remain == 0) {
722		gw->ptr = NULL;
723		gw->nbytes = 0;
724		goto out;
725	}
726
727	if (!_gcm_sg_clamp_and_map(gw)) {
728		gw->ptr = NULL;
729		gw->nbytes = 0;
730		goto out;
731	}
732
733	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
734		gw->ptr = gw->walk_ptr;
735		gw->nbytes = gw->walk_bytes;
736		goto out;
737	}
738
739	while (1) {
740		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
741		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
742		gw->buf_bytes += n;
743		_gcm_sg_unmap_and_advance(gw, n);
744		if (gw->buf_bytes >= minbytesneeded) {
745			gw->ptr = gw->buf;
746			gw->nbytes = gw->buf_bytes;
747			goto out;
748		}
749		if (!_gcm_sg_clamp_and_map(gw)) {
750			gw->ptr = NULL;
751			gw->nbytes = 0;
752			goto out;
753		}
754	}
755
756out:
757	return gw->nbytes;
758}
759
760static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
761{
762	if (gw->walk_bytes_remain == 0) {
763		gw->ptr = NULL;
764		gw->nbytes = 0;
765		goto out;
766	}
767
768	if (!_gcm_sg_clamp_and_map(gw)) {
769		gw->ptr = NULL;
770		gw->nbytes = 0;
771		goto out;
772	}
773
774	if (gw->walk_bytes >= minbytesneeded) {
775		gw->ptr = gw->walk_ptr;
776		gw->nbytes = gw->walk_bytes;
777		goto out;
778	}
779
780	scatterwalk_unmap(gw->walk_ptr);
781	gw->walk_ptr = NULL;
782
783	gw->ptr = gw->buf;
784	gw->nbytes = sizeof(gw->buf);
785
786out:
787	return gw->nbytes;
788}
789
790static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
791{
792	if (gw->ptr == NULL)
793		return 0;
794
795	if (gw->ptr == gw->buf) {
796		int n = gw->buf_bytes - bytesdone;
797		if (n > 0) {
798			memmove(gw->buf, gw->buf + bytesdone, n);
799			gw->buf_bytes = n;
800		} else
801			gw->buf_bytes = 0;
802	} else
803		_gcm_sg_unmap_and_advance(gw, bytesdone);
804
805	return bytesdone;
806}
807
808static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
809{
810	int i, n;
811
812	if (gw->ptr == NULL)
813		return 0;
814
815	if (gw->ptr == gw->buf) {
816		for (i = 0; i < bytesdone; i += n) {
817			if (!_gcm_sg_clamp_and_map(gw))
818				return i;
819			n = min(gw->walk_bytes, bytesdone - i);
820			memcpy(gw->walk_ptr, gw->buf + i, n);
821			_gcm_sg_unmap_and_advance(gw, n);
822		}
823	} else
824		_gcm_sg_unmap_and_advance(gw, bytesdone);
825
826	return bytesdone;
827}
828
829static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
830{
831	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
832	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
833	unsigned int ivsize = crypto_aead_ivsize(tfm);
834	unsigned int taglen = crypto_aead_authsize(tfm);
835	unsigned int aadlen = req->assoclen;
836	unsigned int pclen = req->cryptlen;
837	int ret = 0;
838
839	unsigned int n, len, in_bytes, out_bytes,
840		     min_bytes, bytes, aad_bytes, pc_bytes;
841	struct gcm_sg_walk gw_in, gw_out;
842	u8 tag[GHASH_DIGEST_SIZE];
843
844	struct {
845		u32 _[3];		/* reserved */
846		u32 cv;			/* Counter Value */
847		u8 t[GHASH_DIGEST_SIZE];/* Tag */
848		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
849		u64 taadl;		/* Total AAD Length */
850		u64 tpcl;		/* Total Plain-/Cipher-text Length */
851		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
852		u8 k[AES_MAX_KEY_SIZE];	/* Key */
853	} param;
854
855	/*
856	 * encrypt
857	 *   req->src: aad||plaintext
858	 *   req->dst: aad||ciphertext||tag
859	 * decrypt
860	 *   req->src: aad||ciphertext||tag
861	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
862	 * aad, plaintext and ciphertext may be empty.
863	 */
864	if (flags & CPACF_DECRYPT)
865		pclen -= taglen;
866	len = aadlen + pclen;
867
868	memset(&param, 0, sizeof(param));
869	param.cv = 1;
870	param.taadl = aadlen * 8;
871	param.tpcl = pclen * 8;
872	memcpy(param.j0, req->iv, ivsize);
873	*(u32 *)(param.j0 + ivsize) = 1;
874	memcpy(param.k, ctx->key, ctx->key_len);
875
876	gcm_walk_start(&gw_in, req->src, len);
877	gcm_walk_start(&gw_out, req->dst, len);
878
879	do {
880		min_bytes = min_t(unsigned int,
881				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
882		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
883		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
884		bytes = min(in_bytes, out_bytes);
885
886		if (aadlen + pclen <= bytes) {
887			aad_bytes = aadlen;
888			pc_bytes = pclen;
889			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
890		} else {
891			if (aadlen <= bytes) {
892				aad_bytes = aadlen;
893				pc_bytes = (bytes - aadlen) &
894					   ~(AES_BLOCK_SIZE - 1);
895				flags |= CPACF_KMA_LAAD;
896			} else {
897				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
898				pc_bytes = 0;
899			}
900		}
901
902		if (aad_bytes > 0)
903			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
904
905		cpacf_kma(ctx->fc | flags, &param,
906			  gw_out.ptr + aad_bytes,
907			  gw_in.ptr + aad_bytes, pc_bytes,
908			  gw_in.ptr, aad_bytes);
909
910		n = aad_bytes + pc_bytes;
911		if (gcm_in_walk_done(&gw_in, n) != n)
912			return -ENOMEM;
913		if (gcm_out_walk_done(&gw_out, n) != n)
914			return -ENOMEM;
915		aadlen -= aad_bytes;
916		pclen -= pc_bytes;
917	} while (aadlen + pclen > 0);
918
919	if (flags & CPACF_DECRYPT) {
920		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
921		if (crypto_memneq(tag, param.t, taglen))
922			ret = -EBADMSG;
923	} else
924		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
925
926	memzero_explicit(&param, sizeof(param));
927	return ret;
928}
929
930static int gcm_aes_encrypt(struct aead_request *req)
931{
932	return gcm_aes_crypt(req, CPACF_ENCRYPT);
933}
934
935static int gcm_aes_decrypt(struct aead_request *req)
936{
937	return gcm_aes_crypt(req, CPACF_DECRYPT);
938}
939
940static struct aead_alg gcm_aes_aead = {
941	.setkey			= gcm_aes_setkey,
942	.setauthsize		= gcm_aes_setauthsize,
943	.encrypt		= gcm_aes_encrypt,
944	.decrypt		= gcm_aes_decrypt,
945
946	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
947	.maxauthsize		= GHASH_DIGEST_SIZE,
948	.chunksize		= AES_BLOCK_SIZE,
949
950	.base			= {
951		.cra_blocksize		= 1,
952		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
953		.cra_priority		= 900,
954		.cra_name		= "gcm(aes)",
955		.cra_driver_name	= "gcm-aes-s390",
956		.cra_module		= THIS_MODULE,
957	},
958};
959
960static struct crypto_alg *aes_s390_alg;
961static struct skcipher_alg *aes_s390_skcipher_algs[4];
962static int aes_s390_skciphers_num;
963static struct aead_alg *aes_s390_aead_alg;
964
965static int aes_s390_register_skcipher(struct skcipher_alg *alg)
966{
967	int ret;
968
969	ret = crypto_register_skcipher(alg);
970	if (!ret)
971		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
972	return ret;
973}
974
975static void aes_s390_fini(void)
976{
977	if (aes_s390_alg)
978		crypto_unregister_alg(aes_s390_alg);
979	while (aes_s390_skciphers_num--)
980		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
981	if (ctrblk)
982		free_page((unsigned long) ctrblk);
983
984	if (aes_s390_aead_alg)
985		crypto_unregister_aead(aes_s390_aead_alg);
986}
987
988static int __init aes_s390_init(void)
989{
990	int ret;
991
992	/* Query available functions for KM, KMC, KMCTR and KMA */
993	cpacf_query(CPACF_KM, &km_functions);
994	cpacf_query(CPACF_KMC, &kmc_functions);
995	cpacf_query(CPACF_KMCTR, &kmctr_functions);
996	cpacf_query(CPACF_KMA, &kma_functions);
997
998	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
999	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1000	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1001		ret = crypto_register_alg(&aes_alg);
1002		if (ret)
1003			goto out_err;
1004		aes_s390_alg = &aes_alg;
1005		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1006		if (ret)
1007			goto out_err;
1008	}
1009
1010	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1011	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1012	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1013		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1014		if (ret)
1015			goto out_err;
1016	}
1017
1018	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1019	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1020		ret = aes_s390_register_skcipher(&xts_aes_alg);
1021		if (ret)
1022			goto out_err;
1023	}
1024
1025	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1026	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1027	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1028		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1029		if (!ctrblk) {
1030			ret = -ENOMEM;
1031			goto out_err;
1032		}
1033		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1034		if (ret)
1035			goto out_err;
1036	}
1037
1038	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1039	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1040	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1041		ret = crypto_register_aead(&gcm_aes_aead);
1042		if (ret)
1043			goto out_err;
1044		aes_s390_aead_alg = &gcm_aes_aead;
1045	}
1046
1047	return 0;
1048out_err:
1049	aes_s390_fini();
1050	return ret;
1051}
1052
1053module_cpu_feature_match(MSA, aes_s390_init);
1054module_exit(aes_s390_fini);
1055
1056MODULE_ALIAS_CRYPTO("aes-all");
1057
1058MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1059MODULE_LICENSE("GPL");
1060