1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <crypto/aes.h>
12#include <crypto/authenc.h>
13#include <crypto/cryptd.h>
14#include <crypto/des.h>
15#include <crypto/internal/aead.h>
16#include <crypto/sha1.h>
17#include <crypto/sha2.h>
18#include <crypto/xts.h>
19#include <crypto/scatterwalk.h>
20#include <linux/rtnetlink.h>
21#include <linux/sort.h>
22#include <linux/module.h>
23#include "otx_cptvf.h"
24#include "otx_cptvf_algs.h"
25#include "otx_cptvf_reqmgr.h"
26
27#define CPT_MAX_VF_NUM	64
28/* Size of salt in AES GCM mode */
29#define AES_GCM_SALT_SIZE	4
30/* Size of IV in AES GCM mode */
31#define AES_GCM_IV_SIZE		8
32/* Size of ICV (Integrity Check Value) in AES GCM mode */
33#define AES_GCM_ICV_SIZE	16
34/* Offset of IV in AES GCM mode */
35#define AES_GCM_IV_OFFSET	8
36#define CONTROL_WORD_LEN	8
37#define KEY2_OFFSET		48
38#define DMA_MODE_FLAG(dma_mode) \
39	(((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
40
41/* Truncated SHA digest size */
42#define SHA1_TRUNC_DIGEST_SIZE		12
43#define SHA256_TRUNC_DIGEST_SIZE	16
44#define SHA384_TRUNC_DIGEST_SIZE	24
45#define SHA512_TRUNC_DIGEST_SIZE	32
46
47static DEFINE_MUTEX(mutex);
48static int is_crypto_registered;
49
50struct cpt_device_desc {
51	enum otx_cptpf_type pf_type;
52	struct pci_dev *dev;
53	int num_queues;
54};
55
56struct cpt_device_table {
57	atomic_t count;
58	struct cpt_device_desc desc[CPT_MAX_VF_NUM];
59};
60
61static struct cpt_device_table se_devices = {
62	.count = ATOMIC_INIT(0)
63};
64
65static struct cpt_device_table ae_devices = {
66	.count = ATOMIC_INIT(0)
67};
68
69static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
70{
71	int count, ret = 0;
72
73	count = atomic_read(&se_devices.count);
74	if (count < 1)
75		return -ENODEV;
76
77	*cpu_num = get_cpu();
78
79	if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
80		/*
81		 * On OcteonTX platform there is one CPT instruction queue bound
82		 * to each VF. We get maximum performance if one CPT queue
83		 * is available for each cpu otherwise CPT queues need to be
84		 * shared between cpus.
85		 */
86		if (*cpu_num >= count)
87			*cpu_num %= count;
88		*pdev = se_devices.desc[*cpu_num].dev;
89	} else {
90		pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
91		ret = -EINVAL;
92	}
93	put_cpu();
94
95	return ret;
96}
97
98static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
99{
100	struct otx_cpt_req_ctx *rctx;
101	struct aead_request *req;
102	struct crypto_aead *tfm;
103
104	req = container_of(cpt_req->areq, struct aead_request, base);
105	tfm = crypto_aead_reqtfm(req);
106	rctx = aead_request_ctx_dma(req);
107	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
108		   rctx->fctx.hmac.s.hmac_recv,
109		   crypto_aead_authsize(tfm)) != 0)
110		return -EBADMSG;
111
112	return 0;
113}
114
115static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
116{
117	struct otx_cpt_info_buffer *cpt_info = arg2;
118	struct crypto_async_request *areq = arg1;
119	struct otx_cpt_req_info *cpt_req;
120	struct pci_dev *pdev;
121
122	if (!cpt_info)
123		goto complete;
124
125	cpt_req = cpt_info->req;
126	if (!status) {
127		/*
128		 * When selected cipher is NULL we need to manually
129		 * verify whether calculated hmac value matches
130		 * received hmac value
131		 */
132		if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
133		    !cpt_req->is_enc)
134			status = validate_hmac_cipher_null(cpt_req);
135	}
136	pdev = cpt_info->pdev;
137	do_request_cleanup(pdev, cpt_info);
138
139complete:
140	if (areq)
141		crypto_request_complete(areq, status);
142}
143
144static void output_iv_copyback(struct crypto_async_request *areq)
145{
146	struct otx_cpt_req_info *req_info;
147	struct skcipher_request *sreq;
148	struct crypto_skcipher *stfm;
149	struct otx_cpt_req_ctx *rctx;
150	struct otx_cpt_enc_ctx *ctx;
151	u32 start, ivsize;
152
153	sreq = container_of(areq, struct skcipher_request, base);
154	stfm = crypto_skcipher_reqtfm(sreq);
155	ctx = crypto_skcipher_ctx(stfm);
156	if (ctx->cipher_type == OTX_CPT_AES_CBC ||
157	    ctx->cipher_type == OTX_CPT_DES3_CBC) {
158		rctx = skcipher_request_ctx_dma(sreq);
159		req_info = &rctx->cpt_req;
160		ivsize = crypto_skcipher_ivsize(stfm);
161		start = sreq->cryptlen - ivsize;
162
163		if (req_info->is_enc) {
164			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
165						 ivsize, 0);
166		} else {
167			if (sreq->src != sreq->dst) {
168				scatterwalk_map_and_copy(sreq->iv, sreq->src,
169							 start, ivsize, 0);
170			} else {
171				memcpy(sreq->iv, req_info->iv_out, ivsize);
172				kfree(req_info->iv_out);
173			}
174		}
175	}
176}
177
178static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
179{
180	struct otx_cpt_info_buffer *cpt_info = arg2;
181	struct crypto_async_request *areq = arg1;
182	struct pci_dev *pdev;
183
184	if (areq) {
185		if (!status)
186			output_iv_copyback(areq);
187		if (cpt_info) {
188			pdev = cpt_info->pdev;
189			do_request_cleanup(pdev, cpt_info);
190		}
191		crypto_request_complete(areq, status);
192	}
193}
194
195static inline void update_input_data(struct otx_cpt_req_info *req_info,
196				     struct scatterlist *inp_sg,
197				     u32 nbytes, u32 *argcnt)
198{
199	req_info->req.dlen += nbytes;
200
201	while (nbytes) {
202		u32 len = min(nbytes, inp_sg->length);
203		u8 *ptr = sg_virt(inp_sg);
204
205		req_info->in[*argcnt].vptr = (void *)ptr;
206		req_info->in[*argcnt].size = len;
207		nbytes -= len;
208		++(*argcnt);
209		inp_sg = sg_next(inp_sg);
210	}
211}
212
213static inline void update_output_data(struct otx_cpt_req_info *req_info,
214				      struct scatterlist *outp_sg,
215				      u32 offset, u32 nbytes, u32 *argcnt)
216{
217	req_info->rlen += nbytes;
218
219	while (nbytes) {
220		u32 len = min(nbytes, outp_sg->length - offset);
221		u8 *ptr = sg_virt(outp_sg);
222
223		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
224		req_info->out[*argcnt].size = len;
225		nbytes -= len;
226		++(*argcnt);
227		offset = 0;
228		outp_sg = sg_next(outp_sg);
229	}
230}
231
232static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
233				 u32 *argcnt)
234{
235	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
236	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
237	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
238	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
239	struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
240	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
241	int ivsize = crypto_skcipher_ivsize(stfm);
242	u32 start = req->cryptlen - ivsize;
243	gfp_t flags;
244
245	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246			GFP_KERNEL : GFP_ATOMIC;
247	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
248	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
249
250	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
251				DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
252	if (enc) {
253		req_info->req.opcode.s.minor = 2;
254	} else {
255		req_info->req.opcode.s.minor = 3;
256		if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
257		    ctx->cipher_type == OTX_CPT_DES3_CBC) &&
258		    req->src == req->dst) {
259			req_info->iv_out = kmalloc(ivsize, flags);
260			if (!req_info->iv_out)
261				return -ENOMEM;
262
263			scatterwalk_map_and_copy(req_info->iv_out, req->src,
264						 start, ivsize, 0);
265		}
266	}
267	/* Encryption data length */
268	req_info->req.param1 = req->cryptlen;
269	/* Authentication data length */
270	req_info->req.param2 = 0;
271
272	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
273	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
274	fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
275
276	if (ctx->cipher_type == OTX_CPT_AES_XTS)
277		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
278	else
279		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
280
281	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
282
283	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
284
285	/*
286	 * Storing  Packet Data Information in offset
287	 * Control Word First 8 bytes
288	 */
289	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
290	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
291	req_info->req.dlen += CONTROL_WORD_LEN;
292	++(*argcnt);
293
294	req_info->in[*argcnt].vptr = (u8 *)fctx;
295	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
296	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
297
298	++(*argcnt);
299
300	return 0;
301}
302
303static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
304				    u32 enc_iv_len)
305{
306	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
307	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
308	u32 argcnt =  0;
309	int ret;
310
311	ret = create_ctx_hdr(req, enc, &argcnt);
312	if (ret)
313		return ret;
314
315	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
316	req_info->incnt = argcnt;
317
318	return 0;
319}
320
321static inline void create_output_list(struct skcipher_request *req,
322				      u32 enc_iv_len)
323{
324	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
325	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
326	u32 argcnt = 0;
327
328	/*
329	 * OUTPUT Buffer Processing
330	 * AES encryption/decryption output would be
331	 * received in the following format
332	 *
333	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
334	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
335	 */
336	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
337	req_info->outcnt = argcnt;
338}
339
340static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
341{
342	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
343	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
344	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
345	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
346	struct pci_dev *pdev;
347	int status, cpu_num;
348
349	/* Validate that request doesn't exceed maximum CPT supported size */
350	if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
351		return -E2BIG;
352
353	/* Clear control words */
354	rctx->ctrl_word.flags = 0;
355	rctx->fctx.enc.enc_ctrl.flags = 0;
356
357	status = create_input_list(req, enc, enc_iv_len);
358	if (status)
359		return status;
360	create_output_list(req, enc_iv_len);
361
362	status = get_se_device(&pdev, &cpu_num);
363	if (status)
364		return status;
365
366	req_info->callback = (void *)otx_cpt_skcipher_callback;
367	req_info->areq = &req->base;
368	req_info->req_type = OTX_CPT_ENC_DEC_REQ;
369	req_info->is_enc = enc;
370	req_info->is_trunc_hmac = false;
371	req_info->ctrl.s.grp = 0;
372
373	/*
374	 * We perform an asynchronous send and once
375	 * the request is completed the driver would
376	 * intimate through registered call back functions
377	 */
378	status = otx_cpt_do_request(pdev, req_info, cpu_num);
379
380	return status;
381}
382
383static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
384{
385	return cpt_enc_dec(req, true);
386}
387
388static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
389{
390	return cpt_enc_dec(req, false);
391}
392
393static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
394				       const u8 *key, u32 keylen)
395{
396	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
397	const u8 *key2 = key + (keylen / 2);
398	const u8 *key1 = key;
399	int ret;
400
401	ret = xts_verify_key(tfm, key, keylen);
402	if (ret)
403		return ret;
404	ctx->key_len = keylen;
405	memcpy(ctx->enc_key, key1, keylen / 2);
406	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
407	ctx->cipher_type = OTX_CPT_AES_XTS;
408	switch (ctx->key_len) {
409	case 2 * AES_KEYSIZE_128:
410		ctx->key_type = OTX_CPT_AES_128_BIT;
411		break;
412	case 2 * AES_KEYSIZE_256:
413		ctx->key_type = OTX_CPT_AES_256_BIT;
414		break;
415	default:
416		return -EINVAL;
417	}
418
419	return 0;
420}
421
422static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
423			  u32 keylen, u8 cipher_type)
424{
425	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
426
427	if (keylen != DES3_EDE_KEY_SIZE)
428		return -EINVAL;
429
430	ctx->key_len = keylen;
431	ctx->cipher_type = cipher_type;
432
433	memcpy(ctx->enc_key, key, keylen);
434
435	return 0;
436}
437
438static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
439			  u32 keylen, u8 cipher_type)
440{
441	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
442
443	switch (keylen) {
444	case AES_KEYSIZE_128:
445		ctx->key_type = OTX_CPT_AES_128_BIT;
446		break;
447	case AES_KEYSIZE_192:
448		ctx->key_type = OTX_CPT_AES_192_BIT;
449		break;
450	case AES_KEYSIZE_256:
451		ctx->key_type = OTX_CPT_AES_256_BIT;
452		break;
453	default:
454		return -EINVAL;
455	}
456	ctx->key_len = keylen;
457	ctx->cipher_type = cipher_type;
458
459	memcpy(ctx->enc_key, key, keylen);
460
461	return 0;
462}
463
464static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
465					   const u8 *key, u32 keylen)
466{
467	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
468}
469
470static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
471					   const u8 *key, u32 keylen)
472{
473	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
474}
475
476static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
477					   const u8 *key, u32 keylen)
478{
479	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
480}
481
482static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
483					    const u8 *key, u32 keylen)
484{
485	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
486}
487
488static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
489					    const u8 *key, u32 keylen)
490{
491	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
492}
493
494static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
495{
496	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
497
498	memset(ctx, 0, sizeof(*ctx));
499	/*
500	 * Additional memory for skcipher_request is
501	 * allocated since the cryptd daemon uses
502	 * this memory for request_ctx information
503	 */
504	crypto_skcipher_set_reqsize_dma(
505		tfm, sizeof(struct otx_cpt_req_ctx) +
506		     sizeof(struct skcipher_request));
507
508	return 0;
509}
510
511static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
512{
513	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
514
515	ctx->cipher_type = cipher_type;
516	ctx->mac_type = mac_type;
517
518	/*
519	 * When selected cipher is NULL we use HMAC opcode instead of
520	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
521	 * for calculating ipad and opad
522	 */
523	if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
524		switch (ctx->mac_type) {
525		case OTX_CPT_SHA1:
526			ctx->hashalg = crypto_alloc_shash("sha1", 0,
527							  CRYPTO_ALG_ASYNC);
528			if (IS_ERR(ctx->hashalg))
529				return PTR_ERR(ctx->hashalg);
530			break;
531
532		case OTX_CPT_SHA256:
533			ctx->hashalg = crypto_alloc_shash("sha256", 0,
534							  CRYPTO_ALG_ASYNC);
535			if (IS_ERR(ctx->hashalg))
536				return PTR_ERR(ctx->hashalg);
537			break;
538
539		case OTX_CPT_SHA384:
540			ctx->hashalg = crypto_alloc_shash("sha384", 0,
541							  CRYPTO_ALG_ASYNC);
542			if (IS_ERR(ctx->hashalg))
543				return PTR_ERR(ctx->hashalg);
544			break;
545
546		case OTX_CPT_SHA512:
547			ctx->hashalg = crypto_alloc_shash("sha512", 0,
548							  CRYPTO_ALG_ASYNC);
549			if (IS_ERR(ctx->hashalg))
550				return PTR_ERR(ctx->hashalg);
551			break;
552		}
553	}
554
555	crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
556
557	return 0;
558}
559
560static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
561{
562	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
563}
564
565static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
566{
567	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
568}
569
570static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
571{
572	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
573}
574
575static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
576{
577	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
578}
579
580static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
581{
582	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
583}
584
585static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
586{
587	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
588}
589
590static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
591{
592	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
593}
594
595static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
596{
597	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
598}
599
600static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
601{
602	return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
603}
604
605static void otx_cpt_aead_exit(struct crypto_aead *tfm)
606{
607	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
608
609	kfree(ctx->ipad);
610	kfree(ctx->opad);
611	if (ctx->hashalg)
612		crypto_free_shash(ctx->hashalg);
613	kfree(ctx->sdesc);
614}
615
616/*
617 * This is the Integrity Check Value validation (aka the authentication tag
618 * length)
619 */
620static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
621				     unsigned int authsize)
622{
623	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
624
625	switch (ctx->mac_type) {
626	case OTX_CPT_SHA1:
627		if (authsize != SHA1_DIGEST_SIZE &&
628		    authsize != SHA1_TRUNC_DIGEST_SIZE)
629			return -EINVAL;
630
631		if (authsize == SHA1_TRUNC_DIGEST_SIZE)
632			ctx->is_trunc_hmac = true;
633		break;
634
635	case OTX_CPT_SHA256:
636		if (authsize != SHA256_DIGEST_SIZE &&
637		    authsize != SHA256_TRUNC_DIGEST_SIZE)
638			return -EINVAL;
639
640		if (authsize == SHA256_TRUNC_DIGEST_SIZE)
641			ctx->is_trunc_hmac = true;
642		break;
643
644	case OTX_CPT_SHA384:
645		if (authsize != SHA384_DIGEST_SIZE &&
646		    authsize != SHA384_TRUNC_DIGEST_SIZE)
647			return -EINVAL;
648
649		if (authsize == SHA384_TRUNC_DIGEST_SIZE)
650			ctx->is_trunc_hmac = true;
651		break;
652
653	case OTX_CPT_SHA512:
654		if (authsize != SHA512_DIGEST_SIZE &&
655		    authsize != SHA512_TRUNC_DIGEST_SIZE)
656			return -EINVAL;
657
658		if (authsize == SHA512_TRUNC_DIGEST_SIZE)
659			ctx->is_trunc_hmac = true;
660		break;
661
662	case OTX_CPT_MAC_NULL:
663		if (ctx->cipher_type == OTX_CPT_AES_GCM) {
664			if (authsize != AES_GCM_ICV_SIZE)
665				return -EINVAL;
666		} else
667			return -EINVAL;
668		break;
669
670	default:
671		return -EINVAL;
672	}
673
674	tfm->authsize = authsize;
675	return 0;
676}
677
678static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
679{
680	struct otx_cpt_sdesc *sdesc;
681	int size;
682
683	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
684	sdesc = kmalloc(size, GFP_KERNEL);
685	if (!sdesc)
686		return NULL;
687
688	sdesc->shash.tfm = alg;
689
690	return sdesc;
691}
692
693static inline void swap_data32(void *buf, u32 len)
694{
695	cpu_to_be32_array(buf, buf, len / 4);
696}
697
698static inline void swap_data64(void *buf, u32 len)
699{
700	__be64 *dst = buf;
701	u64 *src = buf;
702	int i = 0;
703
704	for (i = 0 ; i < len / 8; i++, src++, dst++)
705		*dst = cpu_to_be64p(src);
706}
707
708static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
709{
710	struct sha512_state *sha512;
711	struct sha256_state *sha256;
712	struct sha1_state *sha1;
713
714	switch (mac_type) {
715	case OTX_CPT_SHA1:
716		sha1 = (struct sha1_state *) in_pad;
717		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
718		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
719		break;
720
721	case OTX_CPT_SHA256:
722		sha256 = (struct sha256_state *) in_pad;
723		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
724		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
725		break;
726
727	case OTX_CPT_SHA384:
728	case OTX_CPT_SHA512:
729		sha512 = (struct sha512_state *) in_pad;
730		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
731		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
732		break;
733
734	default:
735		return -EINVAL;
736	}
737
738	return 0;
739}
740
741static int aead_hmac_init(struct crypto_aead *cipher)
742{
743	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
744	int state_size = crypto_shash_statesize(ctx->hashalg);
745	int ds = crypto_shash_digestsize(ctx->hashalg);
746	int bs = crypto_shash_blocksize(ctx->hashalg);
747	int authkeylen = ctx->auth_key_len;
748	u8 *ipad = NULL, *opad = NULL;
749	int ret = 0, icount = 0;
750
751	ctx->sdesc = alloc_sdesc(ctx->hashalg);
752	if (!ctx->sdesc)
753		return -ENOMEM;
754
755	ctx->ipad = kzalloc(bs, GFP_KERNEL);
756	if (!ctx->ipad) {
757		ret = -ENOMEM;
758		goto calc_fail;
759	}
760
761	ctx->opad = kzalloc(bs, GFP_KERNEL);
762	if (!ctx->opad) {
763		ret = -ENOMEM;
764		goto calc_fail;
765	}
766
767	ipad = kzalloc(state_size, GFP_KERNEL);
768	if (!ipad) {
769		ret = -ENOMEM;
770		goto calc_fail;
771	}
772
773	opad = kzalloc(state_size, GFP_KERNEL);
774	if (!opad) {
775		ret = -ENOMEM;
776		goto calc_fail;
777	}
778
779	if (authkeylen > bs) {
780		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
781					  authkeylen, ipad);
782		if (ret)
783			goto calc_fail;
784
785		authkeylen = ds;
786	} else {
787		memcpy(ipad, ctx->key, authkeylen);
788	}
789
790	memset(ipad + authkeylen, 0, bs - authkeylen);
791	memcpy(opad, ipad, bs);
792
793	for (icount = 0; icount < bs; icount++) {
794		ipad[icount] ^= 0x36;
795		opad[icount] ^= 0x5c;
796	}
797
798	/*
799	 * Partial Hash calculated from the software
800	 * algorithm is retrieved for IPAD & OPAD
801	 */
802
803	/* IPAD Calculation */
804	crypto_shash_init(&ctx->sdesc->shash);
805	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
806	crypto_shash_export(&ctx->sdesc->shash, ipad);
807	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
808	if (ret)
809		goto calc_fail;
810
811	/* OPAD Calculation */
812	crypto_shash_init(&ctx->sdesc->shash);
813	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
814	crypto_shash_export(&ctx->sdesc->shash, opad);
815	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
816	if (ret)
817		goto calc_fail;
818
819	kfree(ipad);
820	kfree(opad);
821
822	return 0;
823
824calc_fail:
825	kfree(ctx->ipad);
826	ctx->ipad = NULL;
827	kfree(ctx->opad);
828	ctx->opad = NULL;
829	kfree(ipad);
830	kfree(opad);
831	kfree(ctx->sdesc);
832	ctx->sdesc = NULL;
833
834	return ret;
835}
836
837static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
838					   const unsigned char *key,
839					   unsigned int keylen)
840{
841	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
842	struct crypto_authenc_key_param *param;
843	int enckeylen = 0, authkeylen = 0;
844	struct rtattr *rta = (void *)key;
845	int status = -EINVAL;
846
847	if (!RTA_OK(rta, keylen))
848		goto badkey;
849
850	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
851		goto badkey;
852
853	if (RTA_PAYLOAD(rta) < sizeof(*param))
854		goto badkey;
855
856	param = RTA_DATA(rta);
857	enckeylen = be32_to_cpu(param->enckeylen);
858	key += RTA_ALIGN(rta->rta_len);
859	keylen -= RTA_ALIGN(rta->rta_len);
860	if (keylen < enckeylen)
861		goto badkey;
862
863	if (keylen > OTX_CPT_MAX_KEY_SIZE)
864		goto badkey;
865
866	authkeylen = keylen - enckeylen;
867	memcpy(ctx->key, key, keylen);
868
869	switch (enckeylen) {
870	case AES_KEYSIZE_128:
871		ctx->key_type = OTX_CPT_AES_128_BIT;
872		break;
873	case AES_KEYSIZE_192:
874		ctx->key_type = OTX_CPT_AES_192_BIT;
875		break;
876	case AES_KEYSIZE_256:
877		ctx->key_type = OTX_CPT_AES_256_BIT;
878		break;
879	default:
880		/* Invalid key length */
881		goto badkey;
882	}
883
884	ctx->enc_key_len = enckeylen;
885	ctx->auth_key_len = authkeylen;
886
887	status = aead_hmac_init(cipher);
888	if (status)
889		goto badkey;
890
891	return 0;
892badkey:
893	return status;
894}
895
896static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
897					    const unsigned char *key,
898					    unsigned int keylen)
899{
900	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
901	struct crypto_authenc_key_param *param;
902	struct rtattr *rta = (void *)key;
903	int enckeylen = 0;
904
905	if (!RTA_OK(rta, keylen))
906		goto badkey;
907
908	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
909		goto badkey;
910
911	if (RTA_PAYLOAD(rta) < sizeof(*param))
912		goto badkey;
913
914	param = RTA_DATA(rta);
915	enckeylen = be32_to_cpu(param->enckeylen);
916	key += RTA_ALIGN(rta->rta_len);
917	keylen -= RTA_ALIGN(rta->rta_len);
918	if (enckeylen != 0)
919		goto badkey;
920
921	if (keylen > OTX_CPT_MAX_KEY_SIZE)
922		goto badkey;
923
924	memcpy(ctx->key, key, keylen);
925	ctx->enc_key_len = enckeylen;
926	ctx->auth_key_len = keylen;
927	return 0;
928badkey:
929	return -EINVAL;
930}
931
932static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
933				       const unsigned char *key,
934				       unsigned int keylen)
935{
936	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
937
938	/*
939	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
940	 * and salt (4 bytes)
941	 */
942	switch (keylen) {
943	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
944		ctx->key_type = OTX_CPT_AES_128_BIT;
945		ctx->enc_key_len = AES_KEYSIZE_128;
946		break;
947	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
948		ctx->key_type = OTX_CPT_AES_192_BIT;
949		ctx->enc_key_len = AES_KEYSIZE_192;
950		break;
951	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
952		ctx->key_type = OTX_CPT_AES_256_BIT;
953		ctx->enc_key_len = AES_KEYSIZE_256;
954		break;
955	default:
956		/* Invalid key and salt length */
957		return -EINVAL;
958	}
959
960	/* Store encryption key and salt */
961	memcpy(ctx->key, key, keylen);
962
963	return 0;
964}
965
966static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
967				      u32 *argcnt)
968{
969	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
970	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
971	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
972	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
973	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
974	int mac_len = crypto_aead_authsize(tfm);
975	int ds;
976
977	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
978
979	switch (ctx->cipher_type) {
980	case OTX_CPT_AES_CBC:
981		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
982		/* Copy encryption key to context */
983		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
984		       ctx->enc_key_len);
985		/* Copy IV to context */
986		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
987
988		ds = crypto_shash_digestsize(ctx->hashalg);
989		if (ctx->mac_type == OTX_CPT_SHA384)
990			ds = SHA512_DIGEST_SIZE;
991		if (ctx->ipad)
992			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
993		if (ctx->opad)
994			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
995		break;
996
997	case OTX_CPT_AES_GCM:
998		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
999		/* Copy encryption key to context */
1000		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1001		/* Copy salt to context */
1002		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1003		       AES_GCM_SALT_SIZE);
1004
1005		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1006		break;
1007
1008	default:
1009		/* Unknown cipher type */
1010		return -EINVAL;
1011	}
1012	rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1013
1014	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1015	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1016	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1017				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1018	if (enc) {
1019		req_info->req.opcode.s.minor = 2;
1020		req_info->req.param1 = req->cryptlen;
1021		req_info->req.param2 = req->cryptlen + req->assoclen;
1022	} else {
1023		req_info->req.opcode.s.minor = 3;
1024		req_info->req.param1 = req->cryptlen - mac_len;
1025		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1026	}
1027
1028	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1029	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1030	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1031	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1032	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1033
1034	/*
1035	 * Storing Packet Data Information in offset
1036	 * Control Word First 8 bytes
1037	 */
1038	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1039	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1040	req_info->req.dlen += CONTROL_WORD_LEN;
1041	++(*argcnt);
1042
1043	req_info->in[*argcnt].vptr = (u8 *)fctx;
1044	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1045	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1046	++(*argcnt);
1047
1048	return 0;
1049}
1050
1051static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1052				      u32 enc)
1053{
1054	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1055	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1056	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1057	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1058
1059	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1060	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1061	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1062				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1063	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1064
1065	req_info->req.opcode.s.minor = 0;
1066	req_info->req.param1 = ctx->auth_key_len;
1067	req_info->req.param2 = ctx->mac_type << 8;
1068
1069	/* Add authentication key */
1070	req_info->in[*argcnt].vptr = ctx->key;
1071	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1072	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1073	++(*argcnt);
1074
1075	return 0;
1076}
1077
1078static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1079{
1080	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1081	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1082	u32 inputlen =  req->cryptlen + req->assoclen;
1083	u32 status, argcnt = 0;
1084
1085	status = create_aead_ctx_hdr(req, enc, &argcnt);
1086	if (status)
1087		return status;
1088	update_input_data(req_info, req->src, inputlen, &argcnt);
1089	req_info->incnt = argcnt;
1090
1091	return 0;
1092}
1093
1094static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1095					  u32 mac_len)
1096{
1097	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1098	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1099	u32 argcnt = 0, outputlen = 0;
1100
1101	if (enc)
1102		outputlen = req->cryptlen +  req->assoclen + mac_len;
1103	else
1104		outputlen = req->cryptlen + req->assoclen - mac_len;
1105
1106	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1107	req_info->outcnt = argcnt;
1108
1109	return 0;
1110}
1111
1112static inline u32 create_aead_null_input_list(struct aead_request *req,
1113					      u32 enc, u32 mac_len)
1114{
1115	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1116	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1117	u32 inputlen, argcnt = 0;
1118
1119	if (enc)
1120		inputlen =  req->cryptlen + req->assoclen;
1121	else
1122		inputlen =  req->cryptlen + req->assoclen - mac_len;
1123
1124	create_hmac_ctx_hdr(req, &argcnt, enc);
1125	update_input_data(req_info, req->src, inputlen, &argcnt);
1126	req_info->incnt = argcnt;
1127
1128	return 0;
1129}
1130
1131static inline u32 create_aead_null_output_list(struct aead_request *req,
1132					       u32 enc, u32 mac_len)
1133{
1134	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1135	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1136	struct scatterlist *dst;
1137	u8 *ptr = NULL;
1138	int argcnt = 0, status, offset;
1139	u32 inputlen;
1140
1141	if (enc)
1142		inputlen =  req->cryptlen + req->assoclen;
1143	else
1144		inputlen =  req->cryptlen + req->assoclen - mac_len;
1145
1146	/*
1147	 * If source and destination are different
1148	 * then copy payload to destination
1149	 */
1150	if (req->src != req->dst) {
1151
1152		ptr = kmalloc(inputlen, (req_info->areq->flags &
1153					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1154					 GFP_KERNEL : GFP_ATOMIC);
1155		if (!ptr) {
1156			status = -ENOMEM;
1157			goto error;
1158		}
1159
1160		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1161					   inputlen);
1162		if (status != inputlen) {
1163			status = -EINVAL;
1164			goto error_free;
1165		}
1166		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1167					     inputlen);
1168		if (status != inputlen) {
1169			status = -EINVAL;
1170			goto error_free;
1171		}
1172		kfree(ptr);
1173	}
1174
1175	if (enc) {
1176		/*
1177		 * In an encryption scenario hmac needs
1178		 * to be appended after payload
1179		 */
1180		dst = req->dst;
1181		offset = inputlen;
1182		while (offset >= dst->length) {
1183			offset -= dst->length;
1184			dst = sg_next(dst);
1185			if (!dst) {
1186				status = -ENOENT;
1187				goto error;
1188			}
1189		}
1190
1191		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1192	} else {
1193		/*
1194		 * In a decryption scenario calculated hmac for received
1195		 * payload needs to be compare with hmac received
1196		 */
1197		status = sg_copy_buffer(req->src, sg_nents(req->src),
1198					rctx->fctx.hmac.s.hmac_recv, mac_len,
1199					inputlen, true);
1200		if (status != mac_len) {
1201			status = -EINVAL;
1202			goto error;
1203		}
1204
1205		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1206		req_info->out[argcnt].size = mac_len;
1207		argcnt++;
1208	}
1209
1210	req_info->outcnt = argcnt;
1211	return 0;
1212
1213error_free:
1214	kfree(ptr);
1215error:
1216	return status;
1217}
1218
1219static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1220{
1221	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1222	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1223	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1224	struct pci_dev *pdev;
1225	u32 status, cpu_num;
1226
1227	/* Clear control words */
1228	rctx->ctrl_word.flags = 0;
1229	rctx->fctx.enc.enc_ctrl.flags = 0;
1230
1231	req_info->callback = otx_cpt_aead_callback;
1232	req_info->areq = &req->base;
1233	req_info->req_type = reg_type;
1234	req_info->is_enc = enc;
1235	req_info->is_trunc_hmac = false;
1236
1237	switch (reg_type) {
1238	case OTX_CPT_AEAD_ENC_DEC_REQ:
1239		status = create_aead_input_list(req, enc);
1240		if (status)
1241			return status;
1242		status = create_aead_output_list(req, enc,
1243						 crypto_aead_authsize(tfm));
1244		if (status)
1245			return status;
1246		break;
1247
1248	case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1249		status = create_aead_null_input_list(req, enc,
1250						     crypto_aead_authsize(tfm));
1251		if (status)
1252			return status;
1253		status = create_aead_null_output_list(req, enc,
1254						crypto_aead_authsize(tfm));
1255		if (status)
1256			return status;
1257		break;
1258
1259	default:
1260		return -EINVAL;
1261	}
1262
1263	/* Validate that request doesn't exceed maximum CPT supported size */
1264	if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1265	    req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1266		return -E2BIG;
1267
1268	status = get_se_device(&pdev, &cpu_num);
1269	if (status)
1270		return status;
1271
1272	req_info->ctrl.s.grp = 0;
1273
1274	status = otx_cpt_do_request(pdev, req_info, cpu_num);
1275	/*
1276	 * We perform an asynchronous send and once
1277	 * the request is completed the driver would
1278	 * intimate through registered call back functions
1279	 */
1280	return status;
1281}
1282
1283static int otx_cpt_aead_encrypt(struct aead_request *req)
1284{
1285	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1286}
1287
1288static int otx_cpt_aead_decrypt(struct aead_request *req)
1289{
1290	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1291}
1292
1293static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1294{
1295	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1296}
1297
1298static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1299{
1300	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1301}
1302
1303static struct skcipher_alg otx_cpt_skciphers[] = { {
1304	.base.cra_name = "xts(aes)",
1305	.base.cra_driver_name = "cpt_xts_aes",
1306	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1307	.base.cra_blocksize = AES_BLOCK_SIZE,
1308	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1309	.base.cra_alignmask = 7,
1310	.base.cra_priority = 4001,
1311	.base.cra_module = THIS_MODULE,
1312
1313	.init = otx_cpt_enc_dec_init,
1314	.ivsize = AES_BLOCK_SIZE,
1315	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1316	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1317	.setkey = otx_cpt_skcipher_xts_setkey,
1318	.encrypt = otx_cpt_skcipher_encrypt,
1319	.decrypt = otx_cpt_skcipher_decrypt,
1320}, {
1321	.base.cra_name = "cbc(aes)",
1322	.base.cra_driver_name = "cpt_cbc_aes",
1323	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1324	.base.cra_blocksize = AES_BLOCK_SIZE,
1325	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1326	.base.cra_alignmask = 7,
1327	.base.cra_priority = 4001,
1328	.base.cra_module = THIS_MODULE,
1329
1330	.init = otx_cpt_enc_dec_init,
1331	.ivsize = AES_BLOCK_SIZE,
1332	.min_keysize = AES_MIN_KEY_SIZE,
1333	.max_keysize = AES_MAX_KEY_SIZE,
1334	.setkey = otx_cpt_skcipher_cbc_aes_setkey,
1335	.encrypt = otx_cpt_skcipher_encrypt,
1336	.decrypt = otx_cpt_skcipher_decrypt,
1337}, {
1338	.base.cra_name = "ecb(aes)",
1339	.base.cra_driver_name = "cpt_ecb_aes",
1340	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1341	.base.cra_blocksize = AES_BLOCK_SIZE,
1342	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1343	.base.cra_alignmask = 7,
1344	.base.cra_priority = 4001,
1345	.base.cra_module = THIS_MODULE,
1346
1347	.init = otx_cpt_enc_dec_init,
1348	.ivsize = 0,
1349	.min_keysize = AES_MIN_KEY_SIZE,
1350	.max_keysize = AES_MAX_KEY_SIZE,
1351	.setkey = otx_cpt_skcipher_ecb_aes_setkey,
1352	.encrypt = otx_cpt_skcipher_encrypt,
1353	.decrypt = otx_cpt_skcipher_decrypt,
1354}, {
1355	.base.cra_name = "cfb(aes)",
1356	.base.cra_driver_name = "cpt_cfb_aes",
1357	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1358	.base.cra_blocksize = AES_BLOCK_SIZE,
1359	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1360	.base.cra_alignmask = 7,
1361	.base.cra_priority = 4001,
1362	.base.cra_module = THIS_MODULE,
1363
1364	.init = otx_cpt_enc_dec_init,
1365	.ivsize = AES_BLOCK_SIZE,
1366	.min_keysize = AES_MIN_KEY_SIZE,
1367	.max_keysize = AES_MAX_KEY_SIZE,
1368	.setkey = otx_cpt_skcipher_cfb_aes_setkey,
1369	.encrypt = otx_cpt_skcipher_encrypt,
1370	.decrypt = otx_cpt_skcipher_decrypt,
1371}, {
1372	.base.cra_name = "cbc(des3_ede)",
1373	.base.cra_driver_name = "cpt_cbc_des3_ede",
1374	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1375	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1376	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1377	.base.cra_alignmask = 7,
1378	.base.cra_priority = 4001,
1379	.base.cra_module = THIS_MODULE,
1380
1381	.init = otx_cpt_enc_dec_init,
1382	.min_keysize = DES3_EDE_KEY_SIZE,
1383	.max_keysize = DES3_EDE_KEY_SIZE,
1384	.ivsize = DES_BLOCK_SIZE,
1385	.setkey = otx_cpt_skcipher_cbc_des3_setkey,
1386	.encrypt = otx_cpt_skcipher_encrypt,
1387	.decrypt = otx_cpt_skcipher_decrypt,
1388}, {
1389	.base.cra_name = "ecb(des3_ede)",
1390	.base.cra_driver_name = "cpt_ecb_des3_ede",
1391	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1392	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1393	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1394	.base.cra_alignmask = 7,
1395	.base.cra_priority = 4001,
1396	.base.cra_module = THIS_MODULE,
1397
1398	.init = otx_cpt_enc_dec_init,
1399	.min_keysize = DES3_EDE_KEY_SIZE,
1400	.max_keysize = DES3_EDE_KEY_SIZE,
1401	.ivsize = 0,
1402	.setkey = otx_cpt_skcipher_ecb_des3_setkey,
1403	.encrypt = otx_cpt_skcipher_encrypt,
1404	.decrypt = otx_cpt_skcipher_decrypt,
1405} };
1406
1407static struct aead_alg otx_cpt_aeads[] = { {
1408	.base = {
1409		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1410		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1411		.cra_blocksize = AES_BLOCK_SIZE,
1412		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1413		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1414		.cra_priority = 4001,
1415		.cra_alignmask = 0,
1416		.cra_module = THIS_MODULE,
1417	},
1418	.init = otx_cpt_aead_cbc_aes_sha1_init,
1419	.exit = otx_cpt_aead_exit,
1420	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1421	.setauthsize = otx_cpt_aead_set_authsize,
1422	.encrypt = otx_cpt_aead_encrypt,
1423	.decrypt = otx_cpt_aead_decrypt,
1424	.ivsize = AES_BLOCK_SIZE,
1425	.maxauthsize = SHA1_DIGEST_SIZE,
1426}, {
1427	.base = {
1428		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1429		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1430		.cra_blocksize = AES_BLOCK_SIZE,
1431		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1432		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1433		.cra_priority = 4001,
1434		.cra_alignmask = 0,
1435		.cra_module = THIS_MODULE,
1436	},
1437	.init = otx_cpt_aead_cbc_aes_sha256_init,
1438	.exit = otx_cpt_aead_exit,
1439	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1440	.setauthsize = otx_cpt_aead_set_authsize,
1441	.encrypt = otx_cpt_aead_encrypt,
1442	.decrypt = otx_cpt_aead_decrypt,
1443	.ivsize = AES_BLOCK_SIZE,
1444	.maxauthsize = SHA256_DIGEST_SIZE,
1445}, {
1446	.base = {
1447		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1448		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1449		.cra_blocksize = AES_BLOCK_SIZE,
1450		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1451		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1452		.cra_priority = 4001,
1453		.cra_alignmask = 0,
1454		.cra_module = THIS_MODULE,
1455	},
1456	.init = otx_cpt_aead_cbc_aes_sha384_init,
1457	.exit = otx_cpt_aead_exit,
1458	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1459	.setauthsize = otx_cpt_aead_set_authsize,
1460	.encrypt = otx_cpt_aead_encrypt,
1461	.decrypt = otx_cpt_aead_decrypt,
1462	.ivsize = AES_BLOCK_SIZE,
1463	.maxauthsize = SHA384_DIGEST_SIZE,
1464}, {
1465	.base = {
1466		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1467		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1468		.cra_blocksize = AES_BLOCK_SIZE,
1469		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1470		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1471		.cra_priority = 4001,
1472		.cra_alignmask = 0,
1473		.cra_module = THIS_MODULE,
1474	},
1475	.init = otx_cpt_aead_cbc_aes_sha512_init,
1476	.exit = otx_cpt_aead_exit,
1477	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1478	.setauthsize = otx_cpt_aead_set_authsize,
1479	.encrypt = otx_cpt_aead_encrypt,
1480	.decrypt = otx_cpt_aead_decrypt,
1481	.ivsize = AES_BLOCK_SIZE,
1482	.maxauthsize = SHA512_DIGEST_SIZE,
1483}, {
1484	.base = {
1485		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1486		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1487		.cra_blocksize = 1,
1488		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1489		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1490		.cra_priority = 4001,
1491		.cra_alignmask = 0,
1492		.cra_module = THIS_MODULE,
1493	},
1494	.init = otx_cpt_aead_ecb_null_sha1_init,
1495	.exit = otx_cpt_aead_exit,
1496	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1497	.setauthsize = otx_cpt_aead_set_authsize,
1498	.encrypt = otx_cpt_aead_null_encrypt,
1499	.decrypt = otx_cpt_aead_null_decrypt,
1500	.ivsize = 0,
1501	.maxauthsize = SHA1_DIGEST_SIZE,
1502}, {
1503	.base = {
1504		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1505		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1506		.cra_blocksize = 1,
1507		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1508		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1509		.cra_priority = 4001,
1510		.cra_alignmask = 0,
1511		.cra_module = THIS_MODULE,
1512	},
1513	.init = otx_cpt_aead_ecb_null_sha256_init,
1514	.exit = otx_cpt_aead_exit,
1515	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1516	.setauthsize = otx_cpt_aead_set_authsize,
1517	.encrypt = otx_cpt_aead_null_encrypt,
1518	.decrypt = otx_cpt_aead_null_decrypt,
1519	.ivsize = 0,
1520	.maxauthsize = SHA256_DIGEST_SIZE,
1521}, {
1522	.base = {
1523		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1524		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1525		.cra_blocksize = 1,
1526		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1527		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1528		.cra_priority = 4001,
1529		.cra_alignmask = 0,
1530		.cra_module = THIS_MODULE,
1531	},
1532	.init = otx_cpt_aead_ecb_null_sha384_init,
1533	.exit = otx_cpt_aead_exit,
1534	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1535	.setauthsize = otx_cpt_aead_set_authsize,
1536	.encrypt = otx_cpt_aead_null_encrypt,
1537	.decrypt = otx_cpt_aead_null_decrypt,
1538	.ivsize = 0,
1539	.maxauthsize = SHA384_DIGEST_SIZE,
1540}, {
1541	.base = {
1542		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1543		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1544		.cra_blocksize = 1,
1545		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1546		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1547		.cra_priority = 4001,
1548		.cra_alignmask = 0,
1549		.cra_module = THIS_MODULE,
1550	},
1551	.init = otx_cpt_aead_ecb_null_sha512_init,
1552	.exit = otx_cpt_aead_exit,
1553	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1554	.setauthsize = otx_cpt_aead_set_authsize,
1555	.encrypt = otx_cpt_aead_null_encrypt,
1556	.decrypt = otx_cpt_aead_null_decrypt,
1557	.ivsize = 0,
1558	.maxauthsize = SHA512_DIGEST_SIZE,
1559}, {
1560	.base = {
1561		.cra_name = "rfc4106(gcm(aes))",
1562		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1563		.cra_blocksize = 1,
1564		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1565		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1566		.cra_priority = 4001,
1567		.cra_alignmask = 0,
1568		.cra_module = THIS_MODULE,
1569	},
1570	.init = otx_cpt_aead_gcm_aes_init,
1571	.exit = otx_cpt_aead_exit,
1572	.setkey = otx_cpt_aead_gcm_aes_setkey,
1573	.setauthsize = otx_cpt_aead_set_authsize,
1574	.encrypt = otx_cpt_aead_encrypt,
1575	.decrypt = otx_cpt_aead_decrypt,
1576	.ivsize = AES_GCM_IV_SIZE,
1577	.maxauthsize = AES_GCM_ICV_SIZE,
1578} };
1579
1580static inline int is_any_alg_used(void)
1581{
1582	int i;
1583
1584	for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1585		if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1586			return true;
1587	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1588		if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1589			return true;
1590	return false;
1591}
1592
1593static inline int cpt_register_algs(void)
1594{
1595	int i, err = 0;
1596
1597	if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1598		for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1599			otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1600
1601		err = crypto_register_skciphers(otx_cpt_skciphers,
1602						ARRAY_SIZE(otx_cpt_skciphers));
1603		if (err)
1604			return err;
1605	}
1606
1607	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1608		otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1609
1610	err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1611	if (err) {
1612		crypto_unregister_skciphers(otx_cpt_skciphers,
1613					    ARRAY_SIZE(otx_cpt_skciphers));
1614		return err;
1615	}
1616
1617	return 0;
1618}
1619
1620static inline void cpt_unregister_algs(void)
1621{
1622	crypto_unregister_skciphers(otx_cpt_skciphers,
1623				    ARRAY_SIZE(otx_cpt_skciphers));
1624	crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1625}
1626
1627static int compare_func(const void *lptr, const void *rptr)
1628{
1629	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1630	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1631
1632	if (ldesc->dev->devfn < rdesc->dev->devfn)
1633		return -1;
1634	if (ldesc->dev->devfn > rdesc->dev->devfn)
1635		return 1;
1636	return 0;
1637}
1638
1639static void swap_func(void *lptr, void *rptr, int size)
1640{
1641	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1642	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1643
1644	swap(*ldesc, *rdesc);
1645}
1646
1647int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1648			enum otx_cptpf_type pf_type,
1649			enum otx_cptvf_type engine_type,
1650			int num_queues, int num_devices)
1651{
1652	int ret = 0;
1653	int count;
1654
1655	mutex_lock(&mutex);
1656	switch (engine_type) {
1657	case OTX_CPT_SE_TYPES:
1658		count = atomic_read(&se_devices.count);
1659		if (count >= CPT_MAX_VF_NUM) {
1660			dev_err(&pdev->dev, "No space to add a new device\n");
1661			ret = -ENOSPC;
1662			goto err;
1663		}
1664		se_devices.desc[count].pf_type = pf_type;
1665		se_devices.desc[count].num_queues = num_queues;
1666		se_devices.desc[count++].dev = pdev;
1667		atomic_inc(&se_devices.count);
1668
1669		if (atomic_read(&se_devices.count) == num_devices &&
1670		    is_crypto_registered == false) {
1671			if (cpt_register_algs()) {
1672				dev_err(&pdev->dev,
1673				   "Error in registering crypto algorithms\n");
1674				ret =  -EINVAL;
1675				goto err;
1676			}
1677			try_module_get(mod);
1678			is_crypto_registered = true;
1679		}
1680		sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1681		     compare_func, swap_func);
1682		break;
1683
1684	case OTX_CPT_AE_TYPES:
1685		count = atomic_read(&ae_devices.count);
1686		if (count >= CPT_MAX_VF_NUM) {
1687			dev_err(&pdev->dev, "No space to a add new device\n");
1688			ret = -ENOSPC;
1689			goto err;
1690		}
1691		ae_devices.desc[count].pf_type = pf_type;
1692		ae_devices.desc[count].num_queues = num_queues;
1693		ae_devices.desc[count++].dev = pdev;
1694		atomic_inc(&ae_devices.count);
1695		sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1696		     compare_func, swap_func);
1697		break;
1698
1699	default:
1700		dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1701		ret = BAD_OTX_CPTVF_TYPE;
1702	}
1703err:
1704	mutex_unlock(&mutex);
1705	return ret;
1706}
1707
1708void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1709			 enum otx_cptvf_type engine_type)
1710{
1711	struct cpt_device_table *dev_tbl;
1712	bool dev_found = false;
1713	int i, j, count;
1714
1715	mutex_lock(&mutex);
1716
1717	dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1718	count = atomic_read(&dev_tbl->count);
1719	for (i = 0; i < count; i++)
1720		if (pdev == dev_tbl->desc[i].dev) {
1721			for (j = i; j < count-1; j++)
1722				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1723			dev_found = true;
1724			break;
1725		}
1726
1727	if (!dev_found) {
1728		dev_err(&pdev->dev, "%s device not found\n", __func__);
1729		goto exit;
1730	}
1731
1732	if (engine_type != OTX_CPT_AE_TYPES) {
1733		if (atomic_dec_and_test(&se_devices.count) &&
1734		    !is_any_alg_used()) {
1735			cpt_unregister_algs();
1736			module_put(mod);
1737			is_crypto_registered = false;
1738		}
1739	} else
1740		atomic_dec(&ae_devices.count);
1741exit:
1742	mutex_unlock(&mutex);
1743}
1744