1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Marvell. */
3
4#include <crypto/aes.h>
5#include <crypto/authenc.h>
6#include <crypto/cryptd.h>
7#include <crypto/des.h>
8#include <crypto/internal/aead.h>
9#include <crypto/sha1.h>
10#include <crypto/sha2.h>
11#include <crypto/xts.h>
12#include <crypto/gcm.h>
13#include <crypto/scatterwalk.h>
14#include <linux/rtnetlink.h>
15#include <linux/sort.h>
16#include <linux/module.h>
17#include "otx2_cptvf.h"
18#include "otx2_cptvf_algs.h"
19#include "otx2_cpt_reqmgr.h"
20
21/* Size of salt in AES GCM mode */
22#define AES_GCM_SALT_SIZE 4
23/* Size of IV in AES GCM mode */
24#define AES_GCM_IV_SIZE 8
25/* Size of ICV (Integrity Check Value) in AES GCM mode */
26#define AES_GCM_ICV_SIZE 16
27/* Offset of IV in AES GCM mode */
28#define AES_GCM_IV_OFFSET 8
29#define CONTROL_WORD_LEN 8
30#define KEY2_OFFSET 48
31#define DMA_MODE_FLAG(dma_mode) \
32	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
33
34/* Truncated SHA digest size */
35#define SHA1_TRUNC_DIGEST_SIZE 12
36#define SHA256_TRUNC_DIGEST_SIZE 16
37#define SHA384_TRUNC_DIGEST_SIZE 24
38#define SHA512_TRUNC_DIGEST_SIZE 32
39
40static DEFINE_MUTEX(mutex);
41static int is_crypto_registered;
42
43struct cpt_device_desc {
44	struct pci_dev *dev;
45	int num_queues;
46};
47
48struct cpt_device_table {
49	atomic_t count;
50	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
51};
52
53static struct cpt_device_table se_devices = {
54	.count = ATOMIC_INIT(0)
55};
56
57static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
58{
59	int count;
60
61	count = atomic_read(&se_devices.count);
62	if (count < 1)
63		return -ENODEV;
64
65	*cpu_num = get_cpu();
66	/*
67	 * On OcteonTX2 platform CPT instruction queue is bound to each
68	 * local function LF, in turn LFs can be attached to PF
69	 * or VF therefore we always use first device. We get maximum
70	 * performance if one CPT queue is available for each cpu
71	 * otherwise CPT queues need to be shared between cpus.
72	 */
73	if (*cpu_num >= se_devices.desc[0].num_queues)
74		*cpu_num %= se_devices.desc[0].num_queues;
75	*pdev = se_devices.desc[0].dev;
76
77	put_cpu();
78
79	return 0;
80}
81
82static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
83{
84	struct otx2_cpt_req_ctx *rctx;
85	struct aead_request *req;
86	struct crypto_aead *tfm;
87
88	req = container_of(cpt_req->areq, struct aead_request, base);
89	tfm = crypto_aead_reqtfm(req);
90	rctx = aead_request_ctx_dma(req);
91	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
92		   rctx->fctx.hmac.s.hmac_recv,
93		   crypto_aead_authsize(tfm)) != 0)
94		return -EBADMSG;
95
96	return 0;
97}
98
99static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
100{
101	struct otx2_cpt_inst_info *inst_info = arg2;
102	struct crypto_async_request *areq = arg1;
103	struct otx2_cpt_req_info *cpt_req;
104	struct pci_dev *pdev;
105
106	if (inst_info) {
107		cpt_req = inst_info->req;
108		if (!status) {
109			/*
110			 * When selected cipher is NULL we need to manually
111			 * verify whether calculated hmac value matches
112			 * received hmac value
113			 */
114			if (cpt_req->req_type ==
115			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
116			    !cpt_req->is_enc)
117				status = validate_hmac_cipher_null(cpt_req);
118		}
119		pdev = inst_info->pdev;
120		otx2_cpt_info_destroy(pdev, inst_info);
121	}
122	if (areq)
123		crypto_request_complete(areq, status);
124}
125
126static void output_iv_copyback(struct crypto_async_request *areq)
127{
128	struct otx2_cpt_req_info *req_info;
129	struct otx2_cpt_req_ctx *rctx;
130	struct skcipher_request *sreq;
131	struct crypto_skcipher *stfm;
132	struct otx2_cpt_enc_ctx *ctx;
133	u32 start, ivsize;
134
135	sreq = container_of(areq, struct skcipher_request, base);
136	stfm = crypto_skcipher_reqtfm(sreq);
137	ctx = crypto_skcipher_ctx(stfm);
138	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
139	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
140		rctx = skcipher_request_ctx_dma(sreq);
141		req_info = &rctx->cpt_req;
142		ivsize = crypto_skcipher_ivsize(stfm);
143		start = sreq->cryptlen - ivsize;
144
145		if (req_info->is_enc) {
146			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
147						 ivsize, 0);
148		} else {
149			if (sreq->src != sreq->dst) {
150				scatterwalk_map_and_copy(sreq->iv, sreq->src,
151							 start, ivsize, 0);
152			} else {
153				memcpy(sreq->iv, req_info->iv_out, ivsize);
154				kfree(req_info->iv_out);
155			}
156		}
157	}
158}
159
160static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
161{
162	struct otx2_cpt_inst_info *inst_info = arg2;
163	struct crypto_async_request *areq = arg1;
164	struct pci_dev *pdev;
165
166	if (areq) {
167		if (!status)
168			output_iv_copyback(areq);
169		if (inst_info) {
170			pdev = inst_info->pdev;
171			otx2_cpt_info_destroy(pdev, inst_info);
172		}
173		crypto_request_complete(areq, status);
174	}
175}
176
177static inline void update_input_data(struct otx2_cpt_req_info *req_info,
178				     struct scatterlist *inp_sg,
179				     u32 nbytes, u32 *argcnt)
180{
181	req_info->req.dlen += nbytes;
182
183	while (nbytes) {
184		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
185		u8 *ptr = sg_virt(inp_sg);
186
187		req_info->in[*argcnt].vptr = (void *)ptr;
188		req_info->in[*argcnt].size = len;
189		nbytes -= len;
190		++(*argcnt);
191		inp_sg = sg_next(inp_sg);
192	}
193}
194
195static inline void update_output_data(struct otx2_cpt_req_info *req_info,
196				      struct scatterlist *outp_sg,
197				      u32 offset, u32 nbytes, u32 *argcnt)
198{
199	u32 len, sg_len;
200	u8 *ptr;
201
202	req_info->rlen += nbytes;
203
204	while (nbytes) {
205		sg_len = outp_sg->length - offset;
206		len = (nbytes < sg_len) ? nbytes : sg_len;
207		ptr = sg_virt(outp_sg);
208
209		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
210		req_info->out[*argcnt].size = len;
211		nbytes -= len;
212		++(*argcnt);
213		offset = 0;
214		outp_sg = sg_next(outp_sg);
215	}
216}
217
218static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
219				 u32 *argcnt)
220{
221	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
222	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
223	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
224	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
225	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
226	int ivsize = crypto_skcipher_ivsize(stfm);
227	u32 start = req->cryptlen - ivsize;
228	gfp_t flags;
229
230	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
231			GFP_KERNEL : GFP_ATOMIC;
232	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
233	req_info->ctrl.s.se_req = 1;
234
235	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
236				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
237	if (enc) {
238		req_info->req.opcode.s.minor = 2;
239	} else {
240		req_info->req.opcode.s.minor = 3;
241		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
242		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
243		    req->src == req->dst) {
244			req_info->iv_out = kmalloc(ivsize, flags);
245			if (!req_info->iv_out)
246				return -ENOMEM;
247
248			scatterwalk_map_and_copy(req_info->iv_out, req->src,
249						 start, ivsize, 0);
250		}
251	}
252	/* Encryption data length */
253	req_info->req.param1 = req->cryptlen;
254	/* Authentication data length */
255	req_info->req.param2 = 0;
256
257	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
258	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
259	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
260
261	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
262		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
263	else
264		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
265
266	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
267
268	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
269
270	/*
271	 * Storing  Packet Data Information in offset
272	 * Control Word First 8 bytes
273	 */
274	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
275	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
276	req_info->req.dlen += CONTROL_WORD_LEN;
277	++(*argcnt);
278
279	req_info->in[*argcnt].vptr = (u8 *)fctx;
280	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
281	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
282
283	++(*argcnt);
284
285	return 0;
286}
287
288static inline int create_input_list(struct skcipher_request *req, u32 enc,
289				    u32 enc_iv_len)
290{
291	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
292	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
293	u32 argcnt =  0;
294	int ret;
295
296	ret = create_ctx_hdr(req, enc, &argcnt);
297	if (ret)
298		return ret;
299
300	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
301	req_info->in_cnt = argcnt;
302
303	return 0;
304}
305
306static inline void create_output_list(struct skcipher_request *req,
307				      u32 enc_iv_len)
308{
309	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
310	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
311	u32 argcnt = 0;
312
313	/*
314	 * OUTPUT Buffer Processing
315	 * AES encryption/decryption output would be
316	 * received in the following format
317	 *
318	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
319	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
320	 */
321	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
322	req_info->out_cnt = argcnt;
323}
324
325static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
326{
327	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
328	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
329	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
330	int ret;
331
332	if (ctx->fbk_cipher) {
333		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
334		skcipher_request_set_callback(&rctx->sk_fbk_req,
335					      req->base.flags,
336					      req->base.complete,
337					      req->base.data);
338		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
339					   req->dst, req->cryptlen, req->iv);
340		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
341			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
342	} else {
343		ret = -EINVAL;
344	}
345	return ret;
346}
347
348static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
349{
350	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
351	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
352	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
353	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
354	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
355	struct pci_dev *pdev;
356	int status, cpu_num;
357
358	if (req->cryptlen == 0)
359		return 0;
360
361	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
362		return -EINVAL;
363
364	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
365		return skcipher_do_fallback(req, enc);
366
367	/* Clear control words */
368	rctx->ctrl_word.flags = 0;
369	rctx->fctx.enc.enc_ctrl.u = 0;
370
371	status = create_input_list(req, enc, enc_iv_len);
372	if (status)
373		return status;
374	create_output_list(req, enc_iv_len);
375
376	status = get_se_device(&pdev, &cpu_num);
377	if (status)
378		return status;
379
380	req_info->callback = otx2_cpt_skcipher_callback;
381	req_info->areq = &req->base;
382	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
383	req_info->is_enc = enc;
384	req_info->is_trunc_hmac = false;
385	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
386
387	/*
388	 * We perform an asynchronous send and once
389	 * the request is completed the driver would
390	 * intimate through registered call back functions
391	 */
392	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
393
394	return status;
395}
396
397static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
398{
399	return cpt_enc_dec(req, true);
400}
401
402static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
403{
404	return cpt_enc_dec(req, false);
405}
406
407static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
408				       const u8 *key, u32 keylen)
409{
410	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
411	const u8 *key2 = key + (keylen / 2);
412	const u8 *key1 = key;
413	int ret;
414
415	ret = xts_verify_key(tfm, key, keylen);
416	if (ret)
417		return ret;
418	ctx->key_len = keylen;
419	ctx->enc_align_len = 1;
420	memcpy(ctx->enc_key, key1, keylen / 2);
421	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
422	ctx->cipher_type = OTX2_CPT_AES_XTS;
423	switch (ctx->key_len) {
424	case 2 * AES_KEYSIZE_128:
425		ctx->key_type = OTX2_CPT_AES_128_BIT;
426		break;
427	case 2 * AES_KEYSIZE_192:
428		ctx->key_type = OTX2_CPT_AES_192_BIT;
429		break;
430	case 2 * AES_KEYSIZE_256:
431		ctx->key_type = OTX2_CPT_AES_256_BIT;
432		break;
433	default:
434		return -EINVAL;
435	}
436	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
437}
438
439static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
440			  u32 keylen, u8 cipher_type)
441{
442	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
443
444	if (keylen != DES3_EDE_KEY_SIZE)
445		return -EINVAL;
446
447	ctx->key_len = keylen;
448	ctx->cipher_type = cipher_type;
449	ctx->enc_align_len = 8;
450
451	memcpy(ctx->enc_key, key, keylen);
452
453	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
454}
455
456static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
457			  u32 keylen, u8 cipher_type)
458{
459	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
460
461	switch (keylen) {
462	case AES_KEYSIZE_128:
463		ctx->key_type = OTX2_CPT_AES_128_BIT;
464		break;
465	case AES_KEYSIZE_192:
466		ctx->key_type = OTX2_CPT_AES_192_BIT;
467		break;
468	case AES_KEYSIZE_256:
469		ctx->key_type = OTX2_CPT_AES_256_BIT;
470		break;
471	default:
472		return -EINVAL;
473	}
474	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
475		ctx->enc_align_len = 16;
476	else
477		ctx->enc_align_len = 1;
478
479	ctx->key_len = keylen;
480	ctx->cipher_type = cipher_type;
481
482	memcpy(ctx->enc_key, key, keylen);
483
484	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
485}
486
487static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
488					    const u8 *key, u32 keylen)
489{
490	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
491}
492
493static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
494					    const u8 *key, u32 keylen)
495{
496	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
497}
498
499static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
500					     const u8 *key, u32 keylen)
501{
502	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
503}
504
505static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
506					     const u8 *key, u32 keylen)
507{
508	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
509}
510
511static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
512				      struct crypto_alg *alg)
513{
514	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
515		ctx->fbk_cipher =
516				crypto_alloc_skcipher(alg->cra_name, 0,
517						      CRYPTO_ALG_ASYNC |
518						      CRYPTO_ALG_NEED_FALLBACK);
519		if (IS_ERR(ctx->fbk_cipher)) {
520			pr_err("%s() failed to allocate fallback for %s\n",
521				__func__, alg->cra_name);
522			return PTR_ERR(ctx->fbk_cipher);
523		}
524	}
525	return 0;
526}
527
528static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
529{
530	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
531	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
532	struct crypto_alg *alg = tfm->__crt_alg;
533
534	memset(ctx, 0, sizeof(*ctx));
535	/*
536	 * Additional memory for skcipher_request is
537	 * allocated since the cryptd daemon uses
538	 * this memory for request_ctx information
539	 */
540	crypto_skcipher_set_reqsize_dma(
541		stfm, sizeof(struct otx2_cpt_req_ctx) +
542		      sizeof(struct skcipher_request));
543
544	return cpt_skcipher_fallback_init(ctx, alg);
545}
546
547static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
548{
549	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
550
551	if (ctx->fbk_cipher) {
552		crypto_free_skcipher(ctx->fbk_cipher);
553		ctx->fbk_cipher = NULL;
554	}
555}
556
557static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
558				  struct crypto_alg *alg)
559{
560	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
561		ctx->fbk_cipher =
562			    crypto_alloc_aead(alg->cra_name, 0,
563					      CRYPTO_ALG_ASYNC |
564					      CRYPTO_ALG_NEED_FALLBACK);
565		if (IS_ERR(ctx->fbk_cipher)) {
566			pr_err("%s() failed to allocate fallback for %s\n",
567				__func__, alg->cra_name);
568			return PTR_ERR(ctx->fbk_cipher);
569		}
570	}
571	return 0;
572}
573
574static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
575{
576	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
577	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
578	struct crypto_alg *alg = tfm->__crt_alg;
579
580	ctx->cipher_type = cipher_type;
581	ctx->mac_type = mac_type;
582
583	/*
584	 * When selected cipher is NULL we use HMAC opcode instead of
585	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
586	 * for calculating ipad and opad
587	 */
588	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
589		switch (ctx->mac_type) {
590		case OTX2_CPT_SHA1:
591			ctx->hashalg = crypto_alloc_shash("sha1", 0,
592							  CRYPTO_ALG_ASYNC);
593			if (IS_ERR(ctx->hashalg))
594				return PTR_ERR(ctx->hashalg);
595			break;
596
597		case OTX2_CPT_SHA256:
598			ctx->hashalg = crypto_alloc_shash("sha256", 0,
599							  CRYPTO_ALG_ASYNC);
600			if (IS_ERR(ctx->hashalg))
601				return PTR_ERR(ctx->hashalg);
602			break;
603
604		case OTX2_CPT_SHA384:
605			ctx->hashalg = crypto_alloc_shash("sha384", 0,
606							  CRYPTO_ALG_ASYNC);
607			if (IS_ERR(ctx->hashalg))
608				return PTR_ERR(ctx->hashalg);
609			break;
610
611		case OTX2_CPT_SHA512:
612			ctx->hashalg = crypto_alloc_shash("sha512", 0,
613							  CRYPTO_ALG_ASYNC);
614			if (IS_ERR(ctx->hashalg))
615				return PTR_ERR(ctx->hashalg);
616			break;
617		}
618	}
619	switch (ctx->cipher_type) {
620	case OTX2_CPT_AES_CBC:
621	case OTX2_CPT_AES_ECB:
622		ctx->enc_align_len = 16;
623		break;
624	case OTX2_CPT_DES3_CBC:
625	case OTX2_CPT_DES3_ECB:
626		ctx->enc_align_len = 8;
627		break;
628	case OTX2_CPT_AES_GCM:
629	case OTX2_CPT_CIPHER_NULL:
630		ctx->enc_align_len = 1;
631		break;
632	}
633	crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
634
635	return cpt_aead_fallback_init(ctx, alg);
636}
637
638static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
639{
640	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
641}
642
643static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
644{
645	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
646}
647
648static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
649{
650	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
651}
652
653static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
654{
655	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
656}
657
658static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
659{
660	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
661}
662
663static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
664{
665	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
666}
667
668static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
669{
670	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
671}
672
673static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
674{
675	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
676}
677
678static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
679{
680	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
681}
682
683static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
684{
685	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
686
687	kfree(ctx->ipad);
688	kfree(ctx->opad);
689	if (ctx->hashalg)
690		crypto_free_shash(ctx->hashalg);
691	kfree(ctx->sdesc);
692
693	if (ctx->fbk_cipher) {
694		crypto_free_aead(ctx->fbk_cipher);
695		ctx->fbk_cipher = NULL;
696	}
697}
698
699static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
700					  unsigned int authsize)
701{
702	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
703
704	if (crypto_rfc4106_check_authsize(authsize))
705		return -EINVAL;
706
707	tfm->authsize = authsize;
708	/* Set authsize for fallback case */
709	if (ctx->fbk_cipher)
710		ctx->fbk_cipher->authsize = authsize;
711
712	return 0;
713}
714
715static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
716				      unsigned int authsize)
717{
718	tfm->authsize = authsize;
719
720	return 0;
721}
722
723static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
724					   unsigned int authsize)
725{
726	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
727
728	ctx->is_trunc_hmac = true;
729	tfm->authsize = authsize;
730
731	return 0;
732}
733
734static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
735{
736	struct otx2_cpt_sdesc *sdesc;
737	int size;
738
739	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
740	sdesc = kmalloc(size, GFP_KERNEL);
741	if (!sdesc)
742		return NULL;
743
744	sdesc->shash.tfm = alg;
745
746	return sdesc;
747}
748
749static inline void swap_data32(void *buf, u32 len)
750{
751	cpu_to_be32_array(buf, buf, len / 4);
752}
753
754static inline void swap_data64(void *buf, u32 len)
755{
756	u64 *src = buf;
757	int i = 0;
758
759	for (i = 0 ; i < len / 8; i++, src++)
760		cpu_to_be64s(src);
761}
762
763static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
764{
765	struct sha512_state *sha512;
766	struct sha256_state *sha256;
767	struct sha1_state *sha1;
768
769	switch (mac_type) {
770	case OTX2_CPT_SHA1:
771		sha1 = (struct sha1_state *) in_pad;
772		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
773		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
774		break;
775
776	case OTX2_CPT_SHA256:
777		sha256 = (struct sha256_state *) in_pad;
778		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
779		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
780		break;
781
782	case OTX2_CPT_SHA384:
783	case OTX2_CPT_SHA512:
784		sha512 = (struct sha512_state *) in_pad;
785		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
786		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
787		break;
788
789	default:
790		return -EINVAL;
791	}
792
793	return 0;
794}
795
796static int aead_hmac_init(struct crypto_aead *cipher)
797{
798	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
799	int state_size = crypto_shash_statesize(ctx->hashalg);
800	int ds = crypto_shash_digestsize(ctx->hashalg);
801	int bs = crypto_shash_blocksize(ctx->hashalg);
802	int authkeylen = ctx->auth_key_len;
803	u8 *ipad = NULL, *opad = NULL;
804	int ret = 0, icount = 0;
805
806	ctx->sdesc = alloc_sdesc(ctx->hashalg);
807	if (!ctx->sdesc)
808		return -ENOMEM;
809
810	ctx->ipad = kzalloc(bs, GFP_KERNEL);
811	if (!ctx->ipad) {
812		ret = -ENOMEM;
813		goto calc_fail;
814	}
815
816	ctx->opad = kzalloc(bs, GFP_KERNEL);
817	if (!ctx->opad) {
818		ret = -ENOMEM;
819		goto calc_fail;
820	}
821
822	ipad = kzalloc(state_size, GFP_KERNEL);
823	if (!ipad) {
824		ret = -ENOMEM;
825		goto calc_fail;
826	}
827
828	opad = kzalloc(state_size, GFP_KERNEL);
829	if (!opad) {
830		ret = -ENOMEM;
831		goto calc_fail;
832	}
833
834	if (authkeylen > bs) {
835		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
836					  authkeylen, ipad);
837		if (ret)
838			goto calc_fail;
839
840		authkeylen = ds;
841	} else {
842		memcpy(ipad, ctx->key, authkeylen);
843	}
844
845	memset(ipad + authkeylen, 0, bs - authkeylen);
846	memcpy(opad, ipad, bs);
847
848	for (icount = 0; icount < bs; icount++) {
849		ipad[icount] ^= 0x36;
850		opad[icount] ^= 0x5c;
851	}
852
853	/*
854	 * Partial Hash calculated from the software
855	 * algorithm is retrieved for IPAD & OPAD
856	 */
857
858	/* IPAD Calculation */
859	crypto_shash_init(&ctx->sdesc->shash);
860	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
861	crypto_shash_export(&ctx->sdesc->shash, ipad);
862	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
863	if (ret)
864		goto calc_fail;
865
866	/* OPAD Calculation */
867	crypto_shash_init(&ctx->sdesc->shash);
868	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
869	crypto_shash_export(&ctx->sdesc->shash, opad);
870	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
871	if (ret)
872		goto calc_fail;
873
874	kfree(ipad);
875	kfree(opad);
876
877	return 0;
878
879calc_fail:
880	kfree(ctx->ipad);
881	ctx->ipad = NULL;
882	kfree(ctx->opad);
883	ctx->opad = NULL;
884	kfree(ipad);
885	kfree(opad);
886	kfree(ctx->sdesc);
887	ctx->sdesc = NULL;
888
889	return ret;
890}
891
892static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
893					    const unsigned char *key,
894					    unsigned int keylen)
895{
896	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
897	struct crypto_authenc_key_param *param;
898	int enckeylen = 0, authkeylen = 0;
899	struct rtattr *rta = (void *)key;
900
901	if (!RTA_OK(rta, keylen))
902		return -EINVAL;
903
904	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
905		return -EINVAL;
906
907	if (RTA_PAYLOAD(rta) < sizeof(*param))
908		return -EINVAL;
909
910	param = RTA_DATA(rta);
911	enckeylen = be32_to_cpu(param->enckeylen);
912	key += RTA_ALIGN(rta->rta_len);
913	keylen -= RTA_ALIGN(rta->rta_len);
914	if (keylen < enckeylen)
915		return -EINVAL;
916
917	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
918		return -EINVAL;
919
920	authkeylen = keylen - enckeylen;
921	memcpy(ctx->key, key, keylen);
922
923	switch (enckeylen) {
924	case AES_KEYSIZE_128:
925		ctx->key_type = OTX2_CPT_AES_128_BIT;
926		break;
927	case AES_KEYSIZE_192:
928		ctx->key_type = OTX2_CPT_AES_192_BIT;
929		break;
930	case AES_KEYSIZE_256:
931		ctx->key_type = OTX2_CPT_AES_256_BIT;
932		break;
933	default:
934		/* Invalid key length */
935		return -EINVAL;
936	}
937
938	ctx->enc_key_len = enckeylen;
939	ctx->auth_key_len = authkeylen;
940
941	return aead_hmac_init(cipher);
942}
943
944static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
945					     const unsigned char *key,
946					     unsigned int keylen)
947{
948	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
949	struct crypto_authenc_key_param *param;
950	struct rtattr *rta = (void *)key;
951	int enckeylen = 0;
952
953	if (!RTA_OK(rta, keylen))
954		return -EINVAL;
955
956	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
957		return -EINVAL;
958
959	if (RTA_PAYLOAD(rta) < sizeof(*param))
960		return -EINVAL;
961
962	param = RTA_DATA(rta);
963	enckeylen = be32_to_cpu(param->enckeylen);
964	key += RTA_ALIGN(rta->rta_len);
965	keylen -= RTA_ALIGN(rta->rta_len);
966	if (enckeylen != 0)
967		return -EINVAL;
968
969	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
970		return -EINVAL;
971
972	memcpy(ctx->key, key, keylen);
973	ctx->enc_key_len = enckeylen;
974	ctx->auth_key_len = keylen;
975
976	return 0;
977}
978
979static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
980					const unsigned char *key,
981					unsigned int keylen)
982{
983	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
984
985	/*
986	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
987	 * and salt (4 bytes)
988	 */
989	switch (keylen) {
990	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
991		ctx->key_type = OTX2_CPT_AES_128_BIT;
992		ctx->enc_key_len = AES_KEYSIZE_128;
993		break;
994	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
995		ctx->key_type = OTX2_CPT_AES_192_BIT;
996		ctx->enc_key_len = AES_KEYSIZE_192;
997		break;
998	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
999		ctx->key_type = OTX2_CPT_AES_256_BIT;
1000		ctx->enc_key_len = AES_KEYSIZE_256;
1001		break;
1002	default:
1003		/* Invalid key and salt length */
1004		return -EINVAL;
1005	}
1006
1007	/* Store encryption key and salt */
1008	memcpy(ctx->key, key, keylen);
1009
1010	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
1011}
1012
1013static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
1014				      u32 *argcnt)
1015{
1016	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1017	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1018	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1019	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1020	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
1021	int mac_len = crypto_aead_authsize(tfm);
1022	int ds;
1023
1024	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
1025
1026	switch (ctx->cipher_type) {
1027	case OTX2_CPT_AES_CBC:
1028		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
1029			return -EINVAL;
1030
1031		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
1032		/* Copy encryption key to context */
1033		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
1034		       ctx->enc_key_len);
1035		/* Copy IV to context */
1036		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
1037
1038		ds = crypto_shash_digestsize(ctx->hashalg);
1039		if (ctx->mac_type == OTX2_CPT_SHA384)
1040			ds = SHA512_DIGEST_SIZE;
1041		if (ctx->ipad)
1042			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
1043		if (ctx->opad)
1044			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
1045		break;
1046
1047	case OTX2_CPT_AES_GCM:
1048		if (crypto_ipsec_check_assoclen(req->assoclen))
1049			return -EINVAL;
1050
1051		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1052		/* Copy encryption key to context */
1053		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1054		/* Copy salt to context */
1055		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1056		       AES_GCM_SALT_SIZE);
1057
1058		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1059		break;
1060
1061	default:
1062		/* Unknown cipher type */
1063		return -EINVAL;
1064	}
1065	cpu_to_be64s(&rctx->ctrl_word.flags);
1066
1067	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1068	req_info->ctrl.s.se_req = 1;
1069	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1070				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1071	if (enc) {
1072		req_info->req.opcode.s.minor = 2;
1073		req_info->req.param1 = req->cryptlen;
1074		req_info->req.param2 = req->cryptlen + req->assoclen;
1075	} else {
1076		req_info->req.opcode.s.minor = 3;
1077		req_info->req.param1 = req->cryptlen - mac_len;
1078		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1079	}
1080
1081	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1082	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1083	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1084	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1085	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1086
1087	/*
1088	 * Storing Packet Data Information in offset
1089	 * Control Word First 8 bytes
1090	 */
1091	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1092	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1093	req_info->req.dlen += CONTROL_WORD_LEN;
1094	++(*argcnt);
1095
1096	req_info->in[*argcnt].vptr = (u8 *)fctx;
1097	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1098	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1099	++(*argcnt);
1100
1101	return 0;
1102}
1103
1104static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1105				      u32 enc)
1106{
1107	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1108	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1109	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1110	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1111
1112	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1113	req_info->ctrl.s.se_req = 1;
1114	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1115				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1116	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1117
1118	req_info->req.opcode.s.minor = 0;
1119	req_info->req.param1 = ctx->auth_key_len;
1120	req_info->req.param2 = ctx->mac_type << 8;
1121
1122	/* Add authentication key */
1123	req_info->in[*argcnt].vptr = ctx->key;
1124	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1125	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1126	++(*argcnt);
1127}
1128
1129static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1130{
1131	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1132	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1133	u32 inputlen =  req->cryptlen + req->assoclen;
1134	u32 status, argcnt = 0;
1135
1136	status = create_aead_ctx_hdr(req, enc, &argcnt);
1137	if (status)
1138		return status;
1139	update_input_data(req_info, req->src, inputlen, &argcnt);
1140	req_info->in_cnt = argcnt;
1141
1142	return 0;
1143}
1144
1145static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1146					   u32 mac_len)
1147{
1148	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1149	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1150	u32 argcnt = 0, outputlen = 0;
1151
1152	if (enc)
1153		outputlen = req->cryptlen +  req->assoclen + mac_len;
1154	else
1155		outputlen = req->cryptlen + req->assoclen - mac_len;
1156
1157	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1158	req_info->out_cnt = argcnt;
1159}
1160
1161static inline void create_aead_null_input_list(struct aead_request *req,
1162					       u32 enc, u32 mac_len)
1163{
1164	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1165	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1166	u32 inputlen, argcnt = 0;
1167
1168	if (enc)
1169		inputlen =  req->cryptlen + req->assoclen;
1170	else
1171		inputlen =  req->cryptlen + req->assoclen - mac_len;
1172
1173	create_hmac_ctx_hdr(req, &argcnt, enc);
1174	update_input_data(req_info, req->src, inputlen, &argcnt);
1175	req_info->in_cnt = argcnt;
1176}
1177
1178static inline int create_aead_null_output_list(struct aead_request *req,
1179					       u32 enc, u32 mac_len)
1180{
1181	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1182	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1183	struct scatterlist *dst;
1184	u8 *ptr = NULL;
1185	int argcnt = 0, status, offset;
1186	u32 inputlen;
1187
1188	if (enc)
1189		inputlen =  req->cryptlen + req->assoclen;
1190	else
1191		inputlen =  req->cryptlen + req->assoclen - mac_len;
1192
1193	/*
1194	 * If source and destination are different
1195	 * then copy payload to destination
1196	 */
1197	if (req->src != req->dst) {
1198
1199		ptr = kmalloc(inputlen, (req_info->areq->flags &
1200					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1201					 GFP_KERNEL : GFP_ATOMIC);
1202		if (!ptr)
1203			return -ENOMEM;
1204
1205		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1206					   inputlen);
1207		if (status != inputlen) {
1208			status = -EINVAL;
1209			goto error_free;
1210		}
1211		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1212					     inputlen);
1213		if (status != inputlen) {
1214			status = -EINVAL;
1215			goto error_free;
1216		}
1217		kfree(ptr);
1218	}
1219
1220	if (enc) {
1221		/*
1222		 * In an encryption scenario hmac needs
1223		 * to be appended after payload
1224		 */
1225		dst = req->dst;
1226		offset = inputlen;
1227		while (offset >= dst->length) {
1228			offset -= dst->length;
1229			dst = sg_next(dst);
1230			if (!dst)
1231				return -ENOENT;
1232		}
1233
1234		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1235	} else {
1236		/*
1237		 * In a decryption scenario calculated hmac for received
1238		 * payload needs to be compare with hmac received
1239		 */
1240		status = sg_copy_buffer(req->src, sg_nents(req->src),
1241					rctx->fctx.hmac.s.hmac_recv, mac_len,
1242					inputlen, true);
1243		if (status != mac_len)
1244			return -EINVAL;
1245
1246		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1247		req_info->out[argcnt].size = mac_len;
1248		argcnt++;
1249	}
1250
1251	req_info->out_cnt = argcnt;
1252	return 0;
1253
1254error_free:
1255	kfree(ptr);
1256	return status;
1257}
1258
1259static int aead_do_fallback(struct aead_request *req, bool is_enc)
1260{
1261	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1262	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1263	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
1264	int ret;
1265
1266	if (ctx->fbk_cipher) {
1267		/* Store the cipher tfm and then use the fallback tfm */
1268		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1269		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1270					  req->base.complete, req->base.data);
1271		aead_request_set_crypt(&rctx->fbk_req, req->src,
1272				       req->dst, req->cryptlen, req->iv);
1273		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1274		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1275			       crypto_aead_decrypt(&rctx->fbk_req);
1276	} else {
1277		ret = -EINVAL;
1278	}
1279
1280	return ret;
1281}
1282
1283static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1284{
1285	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1286	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1287	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1288	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1289	struct pci_dev *pdev;
1290	int status, cpu_num;
1291
1292	/* Clear control words */
1293	rctx->ctrl_word.flags = 0;
1294	rctx->fctx.enc.enc_ctrl.u = 0;
1295
1296	req_info->callback = otx2_cpt_aead_callback;
1297	req_info->areq = &req->base;
1298	req_info->req_type = reg_type;
1299	req_info->is_enc = enc;
1300	req_info->is_trunc_hmac = false;
1301
1302	switch (reg_type) {
1303	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1304		status = create_aead_input_list(req, enc);
1305		if (status)
1306			return status;
1307		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1308		break;
1309
1310	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1311		create_aead_null_input_list(req, enc,
1312					    crypto_aead_authsize(tfm));
1313		status = create_aead_null_output_list(req, enc,
1314						crypto_aead_authsize(tfm));
1315		if (status)
1316			return status;
1317		break;
1318
1319	default:
1320		return -EINVAL;
1321	}
1322	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1323		return -EINVAL;
1324
1325	if (!req_info->req.param2 ||
1326	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1327	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1328		return aead_do_fallback(req, enc);
1329
1330	status = get_se_device(&pdev, &cpu_num);
1331	if (status)
1332		return status;
1333
1334	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1335
1336	/*
1337	 * We perform an asynchronous send and once
1338	 * the request is completed the driver would
1339	 * intimate through registered call back functions
1340	 */
1341	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1342}
1343
1344static int otx2_cpt_aead_encrypt(struct aead_request *req)
1345{
1346	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1347}
1348
1349static int otx2_cpt_aead_decrypt(struct aead_request *req)
1350{
1351	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1352}
1353
1354static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1355{
1356	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1357}
1358
1359static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1360{
1361	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1362}
1363
1364static struct skcipher_alg otx2_cpt_skciphers[] = { {
1365	.base.cra_name = "xts(aes)",
1366	.base.cra_driver_name = "cpt_xts_aes",
1367	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1368	.base.cra_blocksize = AES_BLOCK_SIZE,
1369	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1370	.base.cra_alignmask = 7,
1371	.base.cra_priority = 4001,
1372	.base.cra_module = THIS_MODULE,
1373
1374	.init = otx2_cpt_enc_dec_init,
1375	.exit = otx2_cpt_skcipher_exit,
1376	.ivsize = AES_BLOCK_SIZE,
1377	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1378	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1379	.setkey = otx2_cpt_skcipher_xts_setkey,
1380	.encrypt = otx2_cpt_skcipher_encrypt,
1381	.decrypt = otx2_cpt_skcipher_decrypt,
1382}, {
1383	.base.cra_name = "cbc(aes)",
1384	.base.cra_driver_name = "cpt_cbc_aes",
1385	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1386	.base.cra_blocksize = AES_BLOCK_SIZE,
1387	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1388	.base.cra_alignmask = 7,
1389	.base.cra_priority = 4001,
1390	.base.cra_module = THIS_MODULE,
1391
1392	.init = otx2_cpt_enc_dec_init,
1393	.exit = otx2_cpt_skcipher_exit,
1394	.ivsize = AES_BLOCK_SIZE,
1395	.min_keysize = AES_MIN_KEY_SIZE,
1396	.max_keysize = AES_MAX_KEY_SIZE,
1397	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1398	.encrypt = otx2_cpt_skcipher_encrypt,
1399	.decrypt = otx2_cpt_skcipher_decrypt,
1400}, {
1401	.base.cra_name = "ecb(aes)",
1402	.base.cra_driver_name = "cpt_ecb_aes",
1403	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1404	.base.cra_blocksize = AES_BLOCK_SIZE,
1405	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1406	.base.cra_alignmask = 7,
1407	.base.cra_priority = 4001,
1408	.base.cra_module = THIS_MODULE,
1409
1410	.init = otx2_cpt_enc_dec_init,
1411	.exit = otx2_cpt_skcipher_exit,
1412	.ivsize = 0,
1413	.min_keysize = AES_MIN_KEY_SIZE,
1414	.max_keysize = AES_MAX_KEY_SIZE,
1415	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1416	.encrypt = otx2_cpt_skcipher_encrypt,
1417	.decrypt = otx2_cpt_skcipher_decrypt,
1418}, {
1419	.base.cra_name = "cbc(des3_ede)",
1420	.base.cra_driver_name = "cpt_cbc_des3_ede",
1421	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1422	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1423	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1424	.base.cra_alignmask = 7,
1425	.base.cra_priority = 4001,
1426	.base.cra_module = THIS_MODULE,
1427
1428	.init = otx2_cpt_enc_dec_init,
1429	.exit = otx2_cpt_skcipher_exit,
1430	.min_keysize = DES3_EDE_KEY_SIZE,
1431	.max_keysize = DES3_EDE_KEY_SIZE,
1432	.ivsize = DES_BLOCK_SIZE,
1433	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1434	.encrypt = otx2_cpt_skcipher_encrypt,
1435	.decrypt = otx2_cpt_skcipher_decrypt,
1436}, {
1437	.base.cra_name = "ecb(des3_ede)",
1438	.base.cra_driver_name = "cpt_ecb_des3_ede",
1439	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1440	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1441	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1442	.base.cra_alignmask = 7,
1443	.base.cra_priority = 4001,
1444	.base.cra_module = THIS_MODULE,
1445
1446	.init = otx2_cpt_enc_dec_init,
1447	.exit = otx2_cpt_skcipher_exit,
1448	.min_keysize = DES3_EDE_KEY_SIZE,
1449	.max_keysize = DES3_EDE_KEY_SIZE,
1450	.ivsize = 0,
1451	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1452	.encrypt = otx2_cpt_skcipher_encrypt,
1453	.decrypt = otx2_cpt_skcipher_decrypt,
1454} };
1455
1456static struct aead_alg otx2_cpt_aeads[] = { {
1457	.base = {
1458		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1459		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1460		.cra_blocksize = AES_BLOCK_SIZE,
1461		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1462		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1463		.cra_priority = 4001,
1464		.cra_alignmask = 0,
1465		.cra_module = THIS_MODULE,
1466	},
1467	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1468	.exit = otx2_cpt_aead_exit,
1469	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1470	.setauthsize = otx2_cpt_aead_set_authsize,
1471	.encrypt = otx2_cpt_aead_encrypt,
1472	.decrypt = otx2_cpt_aead_decrypt,
1473	.ivsize = AES_BLOCK_SIZE,
1474	.maxauthsize = SHA1_DIGEST_SIZE,
1475}, {
1476	.base = {
1477		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1478		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1479		.cra_blocksize = AES_BLOCK_SIZE,
1480		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1481		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1482		.cra_priority = 4001,
1483		.cra_alignmask = 0,
1484		.cra_module = THIS_MODULE,
1485	},
1486	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1487	.exit = otx2_cpt_aead_exit,
1488	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1489	.setauthsize = otx2_cpt_aead_set_authsize,
1490	.encrypt = otx2_cpt_aead_encrypt,
1491	.decrypt = otx2_cpt_aead_decrypt,
1492	.ivsize = AES_BLOCK_SIZE,
1493	.maxauthsize = SHA256_DIGEST_SIZE,
1494}, {
1495	.base = {
1496		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1497		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1498		.cra_blocksize = AES_BLOCK_SIZE,
1499		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1500		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1501		.cra_priority = 4001,
1502		.cra_alignmask = 0,
1503		.cra_module = THIS_MODULE,
1504	},
1505	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1506	.exit = otx2_cpt_aead_exit,
1507	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1508	.setauthsize = otx2_cpt_aead_set_authsize,
1509	.encrypt = otx2_cpt_aead_encrypt,
1510	.decrypt = otx2_cpt_aead_decrypt,
1511	.ivsize = AES_BLOCK_SIZE,
1512	.maxauthsize = SHA384_DIGEST_SIZE,
1513}, {
1514	.base = {
1515		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1516		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1517		.cra_blocksize = AES_BLOCK_SIZE,
1518		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1519		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1520		.cra_priority = 4001,
1521		.cra_alignmask = 0,
1522		.cra_module = THIS_MODULE,
1523	},
1524	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1525	.exit = otx2_cpt_aead_exit,
1526	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1527	.setauthsize = otx2_cpt_aead_set_authsize,
1528	.encrypt = otx2_cpt_aead_encrypt,
1529	.decrypt = otx2_cpt_aead_decrypt,
1530	.ivsize = AES_BLOCK_SIZE,
1531	.maxauthsize = SHA512_DIGEST_SIZE,
1532}, {
1533	.base = {
1534		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1535		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1536		.cra_blocksize = 1,
1537		.cra_flags = CRYPTO_ALG_ASYNC,
1538		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1539		.cra_priority = 4001,
1540		.cra_alignmask = 0,
1541		.cra_module = THIS_MODULE,
1542	},
1543	.init = otx2_cpt_aead_ecb_null_sha1_init,
1544	.exit = otx2_cpt_aead_exit,
1545	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1546	.setauthsize = otx2_cpt_aead_null_set_authsize,
1547	.encrypt = otx2_cpt_aead_null_encrypt,
1548	.decrypt = otx2_cpt_aead_null_decrypt,
1549	.ivsize = 0,
1550	.maxauthsize = SHA1_DIGEST_SIZE,
1551}, {
1552	.base = {
1553		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1554		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1555		.cra_blocksize = 1,
1556		.cra_flags = CRYPTO_ALG_ASYNC,
1557		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1558		.cra_priority = 4001,
1559		.cra_alignmask = 0,
1560		.cra_module = THIS_MODULE,
1561	},
1562	.init = otx2_cpt_aead_ecb_null_sha256_init,
1563	.exit = otx2_cpt_aead_exit,
1564	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1565	.setauthsize = otx2_cpt_aead_null_set_authsize,
1566	.encrypt = otx2_cpt_aead_null_encrypt,
1567	.decrypt = otx2_cpt_aead_null_decrypt,
1568	.ivsize = 0,
1569	.maxauthsize = SHA256_DIGEST_SIZE,
1570}, {
1571	.base = {
1572		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1573		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1574		.cra_blocksize = 1,
1575		.cra_flags = CRYPTO_ALG_ASYNC,
1576		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1577		.cra_priority = 4001,
1578		.cra_alignmask = 0,
1579		.cra_module = THIS_MODULE,
1580	},
1581	.init = otx2_cpt_aead_ecb_null_sha384_init,
1582	.exit = otx2_cpt_aead_exit,
1583	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1584	.setauthsize = otx2_cpt_aead_null_set_authsize,
1585	.encrypt = otx2_cpt_aead_null_encrypt,
1586	.decrypt = otx2_cpt_aead_null_decrypt,
1587	.ivsize = 0,
1588	.maxauthsize = SHA384_DIGEST_SIZE,
1589}, {
1590	.base = {
1591		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1592		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1593		.cra_blocksize = 1,
1594		.cra_flags = CRYPTO_ALG_ASYNC,
1595		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1596		.cra_priority = 4001,
1597		.cra_alignmask = 0,
1598		.cra_module = THIS_MODULE,
1599	},
1600	.init = otx2_cpt_aead_ecb_null_sha512_init,
1601	.exit = otx2_cpt_aead_exit,
1602	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1603	.setauthsize = otx2_cpt_aead_null_set_authsize,
1604	.encrypt = otx2_cpt_aead_null_encrypt,
1605	.decrypt = otx2_cpt_aead_null_decrypt,
1606	.ivsize = 0,
1607	.maxauthsize = SHA512_DIGEST_SIZE,
1608}, {
1609	.base = {
1610		.cra_name = "rfc4106(gcm(aes))",
1611		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1612		.cra_blocksize = 1,
1613		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1614		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1615		.cra_priority = 4001,
1616		.cra_alignmask = 0,
1617		.cra_module = THIS_MODULE,
1618	},
1619	.init = otx2_cpt_aead_gcm_aes_init,
1620	.exit = otx2_cpt_aead_exit,
1621	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1622	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1623	.encrypt = otx2_cpt_aead_encrypt,
1624	.decrypt = otx2_cpt_aead_decrypt,
1625	.ivsize = AES_GCM_IV_SIZE,
1626	.maxauthsize = AES_GCM_ICV_SIZE,
1627} };
1628
1629static inline int cpt_register_algs(void)
1630{
1631	int i, err = 0;
1632
1633	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1634		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1635
1636	err = crypto_register_skciphers(otx2_cpt_skciphers,
1637					ARRAY_SIZE(otx2_cpt_skciphers));
1638	if (err)
1639		return err;
1640
1641	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1642		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1643
1644	err = crypto_register_aeads(otx2_cpt_aeads,
1645				    ARRAY_SIZE(otx2_cpt_aeads));
1646	if (err) {
1647		crypto_unregister_skciphers(otx2_cpt_skciphers,
1648					    ARRAY_SIZE(otx2_cpt_skciphers));
1649		return err;
1650	}
1651
1652	return 0;
1653}
1654
1655static inline void cpt_unregister_algs(void)
1656{
1657	crypto_unregister_skciphers(otx2_cpt_skciphers,
1658				    ARRAY_SIZE(otx2_cpt_skciphers));
1659	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1660}
1661
1662static int compare_func(const void *lptr, const void *rptr)
1663{
1664	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1665	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1666
1667	if (ldesc->dev->devfn < rdesc->dev->devfn)
1668		return -1;
1669	if (ldesc->dev->devfn > rdesc->dev->devfn)
1670		return 1;
1671	return 0;
1672}
1673
1674static void swap_func(void *lptr, void *rptr, int size)
1675{
1676	struct cpt_device_desc *ldesc = lptr;
1677	struct cpt_device_desc *rdesc = rptr;
1678
1679	swap(*ldesc, *rdesc);
1680}
1681
1682int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1683			 int num_queues, int num_devices)
1684{
1685	int ret = 0;
1686	int count;
1687
1688	mutex_lock(&mutex);
1689	count = atomic_read(&se_devices.count);
1690	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1691		dev_err(&pdev->dev, "No space to add a new device\n");
1692		ret = -ENOSPC;
1693		goto unlock;
1694	}
1695	se_devices.desc[count].num_queues = num_queues;
1696	se_devices.desc[count++].dev = pdev;
1697	atomic_inc(&se_devices.count);
1698
1699	if (atomic_read(&se_devices.count) == num_devices &&
1700	    is_crypto_registered == false) {
1701		if (cpt_register_algs()) {
1702			dev_err(&pdev->dev,
1703				"Error in registering crypto algorithms\n");
1704			ret =  -EINVAL;
1705			goto unlock;
1706		}
1707		try_module_get(mod);
1708		is_crypto_registered = true;
1709	}
1710	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1711	     compare_func, swap_func);
1712
1713unlock:
1714	mutex_unlock(&mutex);
1715	return ret;
1716}
1717
1718void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1719{
1720	struct cpt_device_table *dev_tbl;
1721	bool dev_found = false;
1722	int i, j, count;
1723
1724	mutex_lock(&mutex);
1725
1726	dev_tbl = &se_devices;
1727	count = atomic_read(&dev_tbl->count);
1728	for (i = 0; i < count; i++) {
1729		if (pdev == dev_tbl->desc[i].dev) {
1730			for (j = i; j < count-1; j++)
1731				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1732			dev_found = true;
1733			break;
1734		}
1735	}
1736
1737	if (!dev_found) {
1738		dev_err(&pdev->dev, "%s device not found\n", __func__);
1739		goto unlock;
1740	}
1741	if (atomic_dec_and_test(&se_devices.count)) {
1742		cpt_unregister_algs();
1743		module_put(mod);
1744		is_crypto_registered = false;
1745	}
1746
1747unlock:
1748	mutex_unlock(&mutex);
1749}
1750