1// SPDX-License-Identifier: GPL-2.0-only
2/**
3 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
4 *
5 * Copyright (C) 2012 International Business Machines Inc.
6 *
7 * Author: Kent Yoder <yoder1@us.ibm.com>
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/aes.h>
12#include <crypto/algapi.h>
13#include <crypto/scatterwalk.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/crypto.h>
17#include <asm/vio.h>
18
19#include "nx_csbcpb.h"
20#include "nx.h"
21
22
23static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
24			      const u8           *in_key,
25			      unsigned int        key_len)
26{
27	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
28	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
29	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
30
31	nx_ctx_init(nx_ctx, HCOP_FC_AES);
32
33	switch (key_len) {
34	case AES_KEYSIZE_128:
35		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
36		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
37		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
38		break;
39	default:
40		return -EINVAL;
41	}
42
43	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
44	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
45
46	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
47	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
48
49	return 0;
50
51}
52
53static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
54				  const u8           *in_key,
55				  unsigned int        key_len)
56{
57	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
58
59	if (key_len < 3)
60		return -EINVAL;
61
62	key_len -= 3;
63
64	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
65
66	return ccm_aes_nx_set_key(tfm, in_key, key_len);
67}
68
69static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
70				  unsigned int authsize)
71{
72	switch (authsize) {
73	case 4:
74	case 6:
75	case 8:
76	case 10:
77	case 12:
78	case 14:
79	case 16:
80		break;
81	default:
82		return -EINVAL;
83	}
84
85	return 0;
86}
87
88static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
89				      unsigned int authsize)
90{
91	switch (authsize) {
92	case 8:
93	case 12:
94	case 16:
95		break;
96	default:
97		return -EINVAL;
98	}
99
100	return 0;
101}
102
103/* taken from crypto/ccm.c */
104static int set_msg_len(u8 *block, unsigned int msglen, int csize)
105{
106	__be32 data;
107
108	memset(block, 0, csize);
109	block += csize;
110
111	if (csize >= 4)
112		csize = 4;
113	else if (msglen > (unsigned int)(1 << (8 * csize)))
114		return -EOVERFLOW;
115
116	data = cpu_to_be32(msglen);
117	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
118
119	return 0;
120}
121
122/* taken from crypto/ccm.c */
123static inline int crypto_ccm_check_iv(const u8 *iv)
124{
125	/* 2 <= L <= 8, so 1 <= L' <= 7. */
126	if (1 > iv[0] || iv[0] > 7)
127		return -EINVAL;
128
129	return 0;
130}
131
132/* based on code from crypto/ccm.c */
133static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
134		       unsigned int cryptlen, u8 *b0)
135{
136	unsigned int l, lp, m = authsize;
137	int rc;
138
139	memcpy(b0, iv, 16);
140
141	lp = b0[0];
142	l = lp + 1;
143
144	/* set m, bits 3-5 */
145	*b0 |= (8 * ((m - 2) / 2));
146
147	/* set adata, bit 6, if associated data is used */
148	if (assoclen)
149		*b0 |= 64;
150
151	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
152
153	return rc;
154}
155
156static int generate_pat(u8                   *iv,
157			struct aead_request  *req,
158			struct nx_crypto_ctx *nx_ctx,
159			unsigned int          authsize,
160			unsigned int          nbytes,
161			unsigned int	      assoclen,
162			u8                   *out)
163{
164	struct nx_sg *nx_insg = nx_ctx->in_sg;
165	struct nx_sg *nx_outsg = nx_ctx->out_sg;
166	unsigned int iauth_len = 0;
167	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
168	int rc;
169	unsigned int max_sg_len;
170
171	/* zero the ctr value */
172	memset(iv + 15 - iv[0], 0, iv[0] + 1);
173
174	/* page 78 of nx_wb.pdf has,
175	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
176	 * in length. If a full message is used, the AES CCA implementation
177	 * restricts the maximum AAD length to 2^32 -1 bytes.
178	 * If partial messages are used, the implementation supports
179	 * 2^64 -1 bytes maximum AAD length.
180	 *
181	 * However, in the cryptoapi's aead_request structure,
182	 * assoclen is an unsigned int, thus it cannot hold a length
183	 * value greater than 2^32 - 1.
184	 * Thus the AAD is further constrained by this and is never
185	 * greater than 2^32.
186	 */
187
188	if (!assoclen) {
189		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
190	} else if (assoclen <= 14) {
191		/* if associated data is 14 bytes or less, we do 1 GCM
192		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
193		 * which is fed in through the source buffers here */
194		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
195		b1 = nx_ctx->priv.ccm.iauth_tag;
196		iauth_len = assoclen;
197	} else if (assoclen <= 65280) {
198		/* if associated data is less than (2^16 - 2^8), we construct
199		 * B1 differently and feed in the associated data to a CCA
200		 * operation */
201		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
202		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
203		iauth_len = 14;
204	} else {
205		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
206		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
207		iauth_len = 10;
208	}
209
210	/* generate B0 */
211	rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
212	if (rc)
213		return rc;
214
215	/* generate B1:
216	 * add control info for associated data
217	 * RFC 3610 and NIST Special Publication 800-38C
218	 */
219	if (b1) {
220		memset(b1, 0, 16);
221		if (assoclen <= 65280) {
222			*(u16 *)b1 = assoclen;
223			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
224					 iauth_len, SCATTERWALK_FROM_SG);
225		} else {
226			*(u16 *)b1 = (u16)(0xfffe);
227			*(u32 *)&b1[2] = assoclen;
228			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
229					 iauth_len, SCATTERWALK_FROM_SG);
230		}
231	}
232
233	/* now copy any remaining AAD to scatterlist and call nx... */
234	if (!assoclen) {
235		return rc;
236	} else if (assoclen <= 14) {
237		unsigned int len = 16;
238
239		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
240
241		if (len != 16)
242			return -EINVAL;
243
244		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
245					    nx_ctx->ap->sglen);
246
247		if (len != 16)
248			return -EINVAL;
249
250		/* inlen should be negative, indicating to phyp that its a
251		 * pointer to an sg list */
252		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
253					sizeof(struct nx_sg);
254		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
255					sizeof(struct nx_sg);
256
257		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
258		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
259
260		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
261
262		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
263				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
264		if (rc)
265			return rc;
266
267		atomic_inc(&(nx_ctx->stats->aes_ops));
268		atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
269
270	} else {
271		unsigned int processed = 0, to_process;
272
273		processed += iauth_len;
274
275		/* page_limit: number of sg entries that fit on one page */
276		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
277				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
278		max_sg_len = min_t(u64, max_sg_len,
279				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
280
281		do {
282			to_process = min_t(u32, assoclen - processed,
283					   nx_ctx->ap->databytelen);
284
285			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
286						    nx_ctx->ap->sglen,
287						    req->src, processed,
288						    &to_process);
289
290			if ((to_process + processed) < assoclen) {
291				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
292					NX_FDM_INTERMEDIATE;
293			} else {
294				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
295					~NX_FDM_INTERMEDIATE;
296			}
297
298
299			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
300						sizeof(struct nx_sg);
301
302			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
303
304			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
305				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
306			if (rc)
307				return rc;
308
309			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
310				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
311				AES_BLOCK_SIZE);
312
313			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
314
315			atomic_inc(&(nx_ctx->stats->aes_ops));
316			atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
317
318			processed += to_process;
319		} while (processed < assoclen);
320
321		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
322	}
323
324	memcpy(out, result, AES_BLOCK_SIZE);
325
326	return rc;
327}
328
329static int ccm_nx_decrypt(struct aead_request   *req,
330			  u8                    *iv,
331			  unsigned int assoclen)
332{
333	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
334	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
335	unsigned int nbytes = req->cryptlen;
336	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
337	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
338	unsigned long irq_flags;
339	unsigned int processed = 0, to_process;
340	int rc = -1;
341
342	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
343
344	nbytes -= authsize;
345
346	/* copy out the auth tag to compare with later */
347	scatterwalk_map_and_copy(priv->oauth_tag,
348				 req->src, nbytes + req->assoclen, authsize,
349				 SCATTERWALK_FROM_SG);
350
351	rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
352			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
353	if (rc)
354		goto out;
355
356	do {
357
358		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
359		 * update. This value is bound by sg list limits.
360		 */
361		to_process = nbytes - processed;
362
363		if ((to_process + processed) < nbytes)
364			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
365		else
366			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
367
368		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
369
370		rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
371				       &to_process, processed + req->assoclen,
372				       csbcpb->cpb.aes_ccm.iv_or_ctr);
373		if (rc)
374			goto out;
375
376		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
377			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
378		if (rc)
379			goto out;
380
381		/* for partial completion, copy following for next
382		 * entry into loop...
383		 */
384		memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
385		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
386			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
387		memcpy(csbcpb->cpb.aes_ccm.in_s0,
388			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
389
390		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
391
392		/* update stats */
393		atomic_inc(&(nx_ctx->stats->aes_ops));
394		atomic64_add(csbcpb->csb.processed_byte_count,
395			     &(nx_ctx->stats->aes_bytes));
396
397		processed += to_process;
398	} while (processed < nbytes);
399
400	rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
401		    authsize) ? -EBADMSG : 0;
402out:
403	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
404	return rc;
405}
406
407static int ccm_nx_encrypt(struct aead_request   *req,
408			  u8                    *iv,
409			  unsigned int assoclen)
410{
411	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
412	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
413	unsigned int nbytes = req->cryptlen;
414	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
415	unsigned long irq_flags;
416	unsigned int processed = 0, to_process;
417	int rc = -1;
418
419	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
420
421	rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
422			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
423	if (rc)
424		goto out;
425
426	do {
427		/* to process: the AES_BLOCK_SIZE data chunk to process in this
428		 * update. This value is bound by sg list limits.
429		 */
430		to_process = nbytes - processed;
431
432		if ((to_process + processed) < nbytes)
433			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
434		else
435			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
436
437		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
438
439		rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
440				       &to_process, processed + req->assoclen,
441				       csbcpb->cpb.aes_ccm.iv_or_ctr);
442		if (rc)
443			goto out;
444
445		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
446				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
447		if (rc)
448			goto out;
449
450		/* for partial completion, copy following for next
451		 * entry into loop...
452		 */
453		memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
454		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
455			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
456		memcpy(csbcpb->cpb.aes_ccm.in_s0,
457			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
458
459		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
460
461		/* update stats */
462		atomic_inc(&(nx_ctx->stats->aes_ops));
463		atomic64_add(csbcpb->csb.processed_byte_count,
464			     &(nx_ctx->stats->aes_bytes));
465
466		processed += to_process;
467
468	} while (processed < nbytes);
469
470	/* copy out the auth tag */
471	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
472				 req->dst, nbytes + req->assoclen, authsize,
473				 SCATTERWALK_TO_SG);
474
475out:
476	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
477	return rc;
478}
479
480static int ccm4309_aes_nx_encrypt(struct aead_request *req)
481{
482	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
483	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
484	u8 *iv = rctx->iv;
485
486	iv[0] = 3;
487	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
488	memcpy(iv + 4, req->iv, 8);
489
490	return ccm_nx_encrypt(req, iv, req->assoclen - 8);
491}
492
493static int ccm_aes_nx_encrypt(struct aead_request *req)
494{
495	int rc;
496
497	rc = crypto_ccm_check_iv(req->iv);
498	if (rc)
499		return rc;
500
501	return ccm_nx_encrypt(req, req->iv, req->assoclen);
502}
503
504static int ccm4309_aes_nx_decrypt(struct aead_request *req)
505{
506	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
507	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
508	u8 *iv = rctx->iv;
509
510	iv[0] = 3;
511	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
512	memcpy(iv + 4, req->iv, 8);
513
514	return ccm_nx_decrypt(req, iv, req->assoclen - 8);
515}
516
517static int ccm_aes_nx_decrypt(struct aead_request *req)
518{
519	int rc;
520
521	rc = crypto_ccm_check_iv(req->iv);
522	if (rc)
523		return rc;
524
525	return ccm_nx_decrypt(req, req->iv, req->assoclen);
526}
527
528struct aead_alg nx_ccm_aes_alg = {
529	.base = {
530		.cra_name        = "ccm(aes)",
531		.cra_driver_name = "ccm-aes-nx",
532		.cra_priority    = 300,
533		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
534		.cra_blocksize   = 1,
535		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
536		.cra_module      = THIS_MODULE,
537	},
538	.init        = nx_crypto_ctx_aes_ccm_init,
539	.exit        = nx_crypto_ctx_aead_exit,
540	.ivsize      = AES_BLOCK_SIZE,
541	.maxauthsize = AES_BLOCK_SIZE,
542	.setkey      = ccm_aes_nx_set_key,
543	.setauthsize = ccm_aes_nx_setauthsize,
544	.encrypt     = ccm_aes_nx_encrypt,
545	.decrypt     = ccm_aes_nx_decrypt,
546};
547
548struct aead_alg nx_ccm4309_aes_alg = {
549	.base = {
550		.cra_name        = "rfc4309(ccm(aes))",
551		.cra_driver_name = "rfc4309-ccm-aes-nx",
552		.cra_priority    = 300,
553		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
554		.cra_blocksize   = 1,
555		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
556		.cra_module      = THIS_MODULE,
557	},
558	.init        = nx_crypto_ctx_aes_ccm_init,
559	.exit        = nx_crypto_ctx_aead_exit,
560	.ivsize      = 8,
561	.maxauthsize = AES_BLOCK_SIZE,
562	.setkey      = ccm4309_aes_nx_set_key,
563	.setauthsize = ccm4309_aes_nx_setauthsize,
564	.encrypt     = ccm4309_aes_nx_encrypt,
565	.decrypt     = ccm4309_aes_nx_decrypt,
566};
567