1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Freescale FSL CAAM support for crypto API over QI backend.
4 * Based on caamalg.c
5 *
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2019 NXP
8 */
9
10#include "compat.h"
11#include "ctrl.h"
12#include "regs.h"
13#include "intern.h"
14#include "desc_constr.h"
15#include "error.h"
16#include "sg_sw_qm.h"
17#include "key_gen.h"
18#include "qi.h"
19#include "jr.h"
20#include "caamalg_desc.h"
21#include <crypto/xts.h>
22#include <asm/unaligned.h>
23
24/*
25 * crypto alg
26 */
27#define CAAM_CRA_PRIORITY		2000
28/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
29#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
30					 SHA512_DIGEST_SIZE * 2)
31
32#define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
33					 CAAM_MAX_KEY_SIZE)
34#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
35
36struct caam_alg_entry {
37	int class1_alg_type;
38	int class2_alg_type;
39	bool rfc3686;
40	bool geniv;
41	bool nodkp;
42};
43
44struct caam_aead_alg {
45	struct aead_alg aead;
46	struct caam_alg_entry caam;
47	bool registered;
48};
49
50struct caam_skcipher_alg {
51	struct skcipher_alg skcipher;
52	struct caam_alg_entry caam;
53	bool registered;
54};
55
56/*
57 * per-session context
58 */
59struct caam_ctx {
60	struct device *jrdev;
61	u32 sh_desc_enc[DESC_MAX_USED_LEN];
62	u32 sh_desc_dec[DESC_MAX_USED_LEN];
63	u8 key[CAAM_MAX_KEY_SIZE];
64	dma_addr_t key_dma;
65	enum dma_data_direction dir;
66	struct alginfo adata;
67	struct alginfo cdata;
68	unsigned int authsize;
69	struct device *qidev;
70	spinlock_t lock;	/* Protects multiple init of driver context */
71	struct caam_drv_ctx *drv_ctx[NUM_OP];
72	bool xts_key_fallback;
73	struct crypto_skcipher *fallback;
74};
75
76struct caam_skcipher_req_ctx {
77	struct skcipher_request fallback_req;
78};
79
80static int aead_set_sh_desc(struct crypto_aead *aead)
81{
82	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
83						 typeof(*alg), aead);
84	struct caam_ctx *ctx = crypto_aead_ctx(aead);
85	unsigned int ivsize = crypto_aead_ivsize(aead);
86	u32 ctx1_iv_off = 0;
87	u32 *nonce = NULL;
88	unsigned int data_len[2];
89	u32 inl_mask;
90	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
91			       OP_ALG_AAI_CTR_MOD128);
92	const bool is_rfc3686 = alg->caam.rfc3686;
93	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
94
95	if (!ctx->cdata.keylen || !ctx->authsize)
96		return 0;
97
98	/*
99	 * AES-CTR needs to load IV in CONTEXT1 reg
100	 * at an offset of 128bits (16bytes)
101	 * CONTEXT1[255:128] = IV
102	 */
103	if (ctr_mode)
104		ctx1_iv_off = 16;
105
106	/*
107	 * RFC3686 specific:
108	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
109	 */
110	if (is_rfc3686) {
111		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
112		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
113				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
114	}
115
116	/*
117	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
118	 * in invalid opcodes (last bytes of user key) in the resulting
119	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
120	 * addresses are needed.
121	 */
122	ctx->adata.key_virt = ctx->key;
123	ctx->adata.key_dma = ctx->key_dma;
124
125	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
126	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
127
128	data_len[0] = ctx->adata.keylen_pad;
129	data_len[1] = ctx->cdata.keylen;
130
131	if (alg->caam.geniv)
132		goto skip_enc;
133
134	/* aead_encrypt shared descriptor */
135	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
136			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
137			      DESC_JOB_IO_LEN, data_len, &inl_mask,
138			      ARRAY_SIZE(data_len)) < 0)
139		return -EINVAL;
140
141	ctx->adata.key_inline = !!(inl_mask & 1);
142	ctx->cdata.key_inline = !!(inl_mask & 2);
143
144	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
145			       ivsize, ctx->authsize, is_rfc3686, nonce,
146			       ctx1_iv_off, true, ctrlpriv->era);
147
148skip_enc:
149	/* aead_decrypt shared descriptor */
150	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
151			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
152			      DESC_JOB_IO_LEN, data_len, &inl_mask,
153			      ARRAY_SIZE(data_len)) < 0)
154		return -EINVAL;
155
156	ctx->adata.key_inline = !!(inl_mask & 1);
157	ctx->cdata.key_inline = !!(inl_mask & 2);
158
159	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
160			       ivsize, ctx->authsize, alg->caam.geniv,
161			       is_rfc3686, nonce, ctx1_iv_off, true,
162			       ctrlpriv->era);
163
164	if (!alg->caam.geniv)
165		goto skip_givenc;
166
167	/* aead_givencrypt shared descriptor */
168	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
169			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
170			      DESC_JOB_IO_LEN, data_len, &inl_mask,
171			      ARRAY_SIZE(data_len)) < 0)
172		return -EINVAL;
173
174	ctx->adata.key_inline = !!(inl_mask & 1);
175	ctx->cdata.key_inline = !!(inl_mask & 2);
176
177	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
178				  ivsize, ctx->authsize, is_rfc3686, nonce,
179				  ctx1_iv_off, true, ctrlpriv->era);
180
181skip_givenc:
182	return 0;
183}
184
185static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
186{
187	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
188
189	ctx->authsize = authsize;
190	aead_set_sh_desc(authenc);
191
192	return 0;
193}
194
195static int aead_setkey(struct crypto_aead *aead, const u8 *key,
196		       unsigned int keylen)
197{
198	struct caam_ctx *ctx = crypto_aead_ctx(aead);
199	struct device *jrdev = ctx->jrdev;
200	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
201	struct crypto_authenc_keys keys;
202	int ret = 0;
203
204	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
205		goto badkey;
206
207	dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
208		keys.authkeylen + keys.enckeylen, keys.enckeylen,
209		keys.authkeylen);
210	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
211			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
212
213	/*
214	 * If DKP is supported, use it in the shared descriptor to generate
215	 * the split key.
216	 */
217	if (ctrlpriv->era >= 6) {
218		ctx->adata.keylen = keys.authkeylen;
219		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
220						      OP_ALG_ALGSEL_MASK);
221
222		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
223			goto badkey;
224
225		memcpy(ctx->key, keys.authkey, keys.authkeylen);
226		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
227		       keys.enckeylen);
228		dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
229					   ctx->adata.keylen_pad +
230					   keys.enckeylen, ctx->dir);
231		goto skip_split_key;
232	}
233
234	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
235			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
236			    keys.enckeylen);
237	if (ret)
238		goto badkey;
239
240	/* postpend encryption key to auth split key */
241	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
242	dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
243				   ctx->adata.keylen_pad + keys.enckeylen,
244				   ctx->dir);
245
246	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
247			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
248			     ctx->adata.keylen_pad + keys.enckeylen, 1);
249
250skip_split_key:
251	ctx->cdata.keylen = keys.enckeylen;
252
253	ret = aead_set_sh_desc(aead);
254	if (ret)
255		goto badkey;
256
257	/* Now update the driver contexts with the new shared descriptor */
258	if (ctx->drv_ctx[ENCRYPT]) {
259		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
260					  ctx->sh_desc_enc);
261		if (ret) {
262			dev_err(jrdev, "driver enc context update failed\n");
263			goto badkey;
264		}
265	}
266
267	if (ctx->drv_ctx[DECRYPT]) {
268		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
269					  ctx->sh_desc_dec);
270		if (ret) {
271			dev_err(jrdev, "driver dec context update failed\n");
272			goto badkey;
273		}
274	}
275
276	memzero_explicit(&keys, sizeof(keys));
277	return ret;
278badkey:
279	memzero_explicit(&keys, sizeof(keys));
280	return -EINVAL;
281}
282
283static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
284			    unsigned int keylen)
285{
286	struct crypto_authenc_keys keys;
287	int err;
288
289	err = crypto_authenc_extractkeys(&keys, key, keylen);
290	if (unlikely(err))
291		return err;
292
293	err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
294	      aead_setkey(aead, key, keylen);
295
296	memzero_explicit(&keys, sizeof(keys));
297	return err;
298}
299
300static int gcm_set_sh_desc(struct crypto_aead *aead)
301{
302	struct caam_ctx *ctx = crypto_aead_ctx(aead);
303	unsigned int ivsize = crypto_aead_ivsize(aead);
304	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
305			ctx->cdata.keylen;
306
307	if (!ctx->cdata.keylen || !ctx->authsize)
308		return 0;
309
310	/*
311	 * Job Descriptor and Shared Descriptor
312	 * must fit into the 64-word Descriptor h/w Buffer
313	 */
314	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
315		ctx->cdata.key_inline = true;
316		ctx->cdata.key_virt = ctx->key;
317	} else {
318		ctx->cdata.key_inline = false;
319		ctx->cdata.key_dma = ctx->key_dma;
320	}
321
322	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
323			      ctx->authsize, true);
324
325	/*
326	 * Job Descriptor and Shared Descriptor
327	 * must fit into the 64-word Descriptor h/w Buffer
328	 */
329	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
330		ctx->cdata.key_inline = true;
331		ctx->cdata.key_virt = ctx->key;
332	} else {
333		ctx->cdata.key_inline = false;
334		ctx->cdata.key_dma = ctx->key_dma;
335	}
336
337	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
338			      ctx->authsize, true);
339
340	return 0;
341}
342
343static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
344{
345	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
346	int err;
347
348	err = crypto_gcm_check_authsize(authsize);
349	if (err)
350		return err;
351
352	ctx->authsize = authsize;
353	gcm_set_sh_desc(authenc);
354
355	return 0;
356}
357
358static int gcm_setkey(struct crypto_aead *aead,
359		      const u8 *key, unsigned int keylen)
360{
361	struct caam_ctx *ctx = crypto_aead_ctx(aead);
362	struct device *jrdev = ctx->jrdev;
363	int ret;
364
365	ret = aes_check_keylen(keylen);
366	if (ret)
367		return ret;
368
369	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
370			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
371
372	memcpy(ctx->key, key, keylen);
373	dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
374				   ctx->dir);
375	ctx->cdata.keylen = keylen;
376
377	ret = gcm_set_sh_desc(aead);
378	if (ret)
379		return ret;
380
381	/* Now update the driver contexts with the new shared descriptor */
382	if (ctx->drv_ctx[ENCRYPT]) {
383		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
384					  ctx->sh_desc_enc);
385		if (ret) {
386			dev_err(jrdev, "driver enc context update failed\n");
387			return ret;
388		}
389	}
390
391	if (ctx->drv_ctx[DECRYPT]) {
392		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
393					  ctx->sh_desc_dec);
394		if (ret) {
395			dev_err(jrdev, "driver dec context update failed\n");
396			return ret;
397		}
398	}
399
400	return 0;
401}
402
403static int rfc4106_set_sh_desc(struct crypto_aead *aead)
404{
405	struct caam_ctx *ctx = crypto_aead_ctx(aead);
406	unsigned int ivsize = crypto_aead_ivsize(aead);
407	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
408			ctx->cdata.keylen;
409
410	if (!ctx->cdata.keylen || !ctx->authsize)
411		return 0;
412
413	ctx->cdata.key_virt = ctx->key;
414
415	/*
416	 * Job Descriptor and Shared Descriptor
417	 * must fit into the 64-word Descriptor h/w Buffer
418	 */
419	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
420		ctx->cdata.key_inline = true;
421	} else {
422		ctx->cdata.key_inline = false;
423		ctx->cdata.key_dma = ctx->key_dma;
424	}
425
426	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
427				  ctx->authsize, true);
428
429	/*
430	 * Job Descriptor and Shared Descriptor
431	 * must fit into the 64-word Descriptor h/w Buffer
432	 */
433	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
434		ctx->cdata.key_inline = true;
435	} else {
436		ctx->cdata.key_inline = false;
437		ctx->cdata.key_dma = ctx->key_dma;
438	}
439
440	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
441				  ctx->authsize, true);
442
443	return 0;
444}
445
446static int rfc4106_setauthsize(struct crypto_aead *authenc,
447			       unsigned int authsize)
448{
449	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
450	int err;
451
452	err = crypto_rfc4106_check_authsize(authsize);
453	if (err)
454		return err;
455
456	ctx->authsize = authsize;
457	rfc4106_set_sh_desc(authenc);
458
459	return 0;
460}
461
462static int rfc4106_setkey(struct crypto_aead *aead,
463			  const u8 *key, unsigned int keylen)
464{
465	struct caam_ctx *ctx = crypto_aead_ctx(aead);
466	struct device *jrdev = ctx->jrdev;
467	int ret;
468
469	ret = aes_check_keylen(keylen - 4);
470	if (ret)
471		return ret;
472
473	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
474			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
475
476	memcpy(ctx->key, key, keylen);
477	/*
478	 * The last four bytes of the key material are used as the salt value
479	 * in the nonce. Update the AES key length.
480	 */
481	ctx->cdata.keylen = keylen - 4;
482	dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
483				   ctx->cdata.keylen, ctx->dir);
484
485	ret = rfc4106_set_sh_desc(aead);
486	if (ret)
487		return ret;
488
489	/* Now update the driver contexts with the new shared descriptor */
490	if (ctx->drv_ctx[ENCRYPT]) {
491		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
492					  ctx->sh_desc_enc);
493		if (ret) {
494			dev_err(jrdev, "driver enc context update failed\n");
495			return ret;
496		}
497	}
498
499	if (ctx->drv_ctx[DECRYPT]) {
500		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
501					  ctx->sh_desc_dec);
502		if (ret) {
503			dev_err(jrdev, "driver dec context update failed\n");
504			return ret;
505		}
506	}
507
508	return 0;
509}
510
511static int rfc4543_set_sh_desc(struct crypto_aead *aead)
512{
513	struct caam_ctx *ctx = crypto_aead_ctx(aead);
514	unsigned int ivsize = crypto_aead_ivsize(aead);
515	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
516			ctx->cdata.keylen;
517
518	if (!ctx->cdata.keylen || !ctx->authsize)
519		return 0;
520
521	ctx->cdata.key_virt = ctx->key;
522
523	/*
524	 * Job Descriptor and Shared Descriptor
525	 * must fit into the 64-word Descriptor h/w Buffer
526	 */
527	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
528		ctx->cdata.key_inline = true;
529	} else {
530		ctx->cdata.key_inline = false;
531		ctx->cdata.key_dma = ctx->key_dma;
532	}
533
534	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
535				  ctx->authsize, true);
536
537	/*
538	 * Job Descriptor and Shared Descriptor
539	 * must fit into the 64-word Descriptor h/w Buffer
540	 */
541	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
542		ctx->cdata.key_inline = true;
543	} else {
544		ctx->cdata.key_inline = false;
545		ctx->cdata.key_dma = ctx->key_dma;
546	}
547
548	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
549				  ctx->authsize, true);
550
551	return 0;
552}
553
554static int rfc4543_setauthsize(struct crypto_aead *authenc,
555			       unsigned int authsize)
556{
557	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
558
559	if (authsize != 16)
560		return -EINVAL;
561
562	ctx->authsize = authsize;
563	rfc4543_set_sh_desc(authenc);
564
565	return 0;
566}
567
568static int rfc4543_setkey(struct crypto_aead *aead,
569			  const u8 *key, unsigned int keylen)
570{
571	struct caam_ctx *ctx = crypto_aead_ctx(aead);
572	struct device *jrdev = ctx->jrdev;
573	int ret;
574
575	ret = aes_check_keylen(keylen - 4);
576	if (ret)
577		return ret;
578
579	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
580			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
581
582	memcpy(ctx->key, key, keylen);
583	/*
584	 * The last four bytes of the key material are used as the salt value
585	 * in the nonce. Update the AES key length.
586	 */
587	ctx->cdata.keylen = keylen - 4;
588	dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
589				   ctx->cdata.keylen, ctx->dir);
590
591	ret = rfc4543_set_sh_desc(aead);
592	if (ret)
593		return ret;
594
595	/* Now update the driver contexts with the new shared descriptor */
596	if (ctx->drv_ctx[ENCRYPT]) {
597		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
598					  ctx->sh_desc_enc);
599		if (ret) {
600			dev_err(jrdev, "driver enc context update failed\n");
601			return ret;
602		}
603	}
604
605	if (ctx->drv_ctx[DECRYPT]) {
606		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
607					  ctx->sh_desc_dec);
608		if (ret) {
609			dev_err(jrdev, "driver dec context update failed\n");
610			return ret;
611		}
612	}
613
614	return 0;
615}
616
617static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
618			   unsigned int keylen, const u32 ctx1_iv_off)
619{
620	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
621	struct caam_skcipher_alg *alg =
622		container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
623			     skcipher);
624	struct device *jrdev = ctx->jrdev;
625	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
626	const bool is_rfc3686 = alg->caam.rfc3686;
627	int ret = 0;
628
629	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
630			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
631
632	ctx->cdata.keylen = keylen;
633	ctx->cdata.key_virt = key;
634	ctx->cdata.key_inline = true;
635
636	/* skcipher encrypt, decrypt shared descriptors */
637	cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
638				   is_rfc3686, ctx1_iv_off);
639	cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
640				   is_rfc3686, ctx1_iv_off);
641
642	/* Now update the driver contexts with the new shared descriptor */
643	if (ctx->drv_ctx[ENCRYPT]) {
644		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
645					  ctx->sh_desc_enc);
646		if (ret) {
647			dev_err(jrdev, "driver enc context update failed\n");
648			return -EINVAL;
649		}
650	}
651
652	if (ctx->drv_ctx[DECRYPT]) {
653		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
654					  ctx->sh_desc_dec);
655		if (ret) {
656			dev_err(jrdev, "driver dec context update failed\n");
657			return -EINVAL;
658		}
659	}
660
661	return ret;
662}
663
664static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
665			       const u8 *key, unsigned int keylen)
666{
667	int err;
668
669	err = aes_check_keylen(keylen);
670	if (err)
671		return err;
672
673	return skcipher_setkey(skcipher, key, keylen, 0);
674}
675
676static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
677				   const u8 *key, unsigned int keylen)
678{
679	u32 ctx1_iv_off;
680	int err;
681
682	/*
683	 * RFC3686 specific:
684	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
685	 *	| *key = {KEY, NONCE}
686	 */
687	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
688	keylen -= CTR_RFC3686_NONCE_SIZE;
689
690	err = aes_check_keylen(keylen);
691	if (err)
692		return err;
693
694	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
695}
696
697static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
698			       const u8 *key, unsigned int keylen)
699{
700	u32 ctx1_iv_off;
701	int err;
702
703	/*
704	 * AES-CTR needs to load IV in CONTEXT1 reg
705	 * at an offset of 128bits (16bytes)
706	 * CONTEXT1[255:128] = IV
707	 */
708	ctx1_iv_off = 16;
709
710	err = aes_check_keylen(keylen);
711	if (err)
712		return err;
713
714	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
715}
716
717static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
718				const u8 *key, unsigned int keylen)
719{
720	return verify_skcipher_des3_key(skcipher, key) ?:
721	       skcipher_setkey(skcipher, key, keylen, 0);
722}
723
724static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
725			       const u8 *key, unsigned int keylen)
726{
727	return verify_skcipher_des_key(skcipher, key) ?:
728	       skcipher_setkey(skcipher, key, keylen, 0);
729}
730
731static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
732			       unsigned int keylen)
733{
734	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
735	struct device *jrdev = ctx->jrdev;
736	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
737	int ret = 0;
738	int err;
739
740	err = xts_verify_key(skcipher, key, keylen);
741	if (err) {
742		dev_dbg(jrdev, "key size mismatch\n");
743		return err;
744	}
745
746	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
747		ctx->xts_key_fallback = true;
748
749	if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
750		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
751		if (err)
752			return err;
753	}
754
755	ctx->cdata.keylen = keylen;
756	ctx->cdata.key_virt = key;
757	ctx->cdata.key_inline = true;
758
759	/* xts skcipher encrypt, decrypt shared descriptors */
760	cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
761	cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
762
763	/* Now update the driver contexts with the new shared descriptor */
764	if (ctx->drv_ctx[ENCRYPT]) {
765		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
766					  ctx->sh_desc_enc);
767		if (ret) {
768			dev_err(jrdev, "driver enc context update failed\n");
769			return -EINVAL;
770		}
771	}
772
773	if (ctx->drv_ctx[DECRYPT]) {
774		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
775					  ctx->sh_desc_dec);
776		if (ret) {
777			dev_err(jrdev, "driver dec context update failed\n");
778			return -EINVAL;
779		}
780	}
781
782	return ret;
783}
784
785/*
786 * aead_edesc - s/w-extended aead descriptor
787 * @src_nents: number of segments in input scatterlist
788 * @dst_nents: number of segments in output scatterlist
789 * @iv_dma: dma address of iv for checking continuity and link table
790 * @qm_sg_bytes: length of dma mapped h/w link table
791 * @qm_sg_dma: bus physical mapped address of h/w link table
792 * @assoclen: associated data length, in CAAM endianness
793 * @assoclen_dma: bus physical mapped address of req->assoclen
794 * @drv_req: driver-specific request structure
795 * @sgt: the h/w link table, followed by IV
796 */
797struct aead_edesc {
798	int src_nents;
799	int dst_nents;
800	dma_addr_t iv_dma;
801	int qm_sg_bytes;
802	dma_addr_t qm_sg_dma;
803	unsigned int assoclen;
804	dma_addr_t assoclen_dma;
805	struct caam_drv_req drv_req;
806	struct qm_sg_entry sgt[];
807};
808
809/*
810 * skcipher_edesc - s/w-extended skcipher descriptor
811 * @src_nents: number of segments in input scatterlist
812 * @dst_nents: number of segments in output scatterlist
813 * @iv_dma: dma address of iv for checking continuity and link table
814 * @qm_sg_bytes: length of dma mapped h/w link table
815 * @qm_sg_dma: bus physical mapped address of h/w link table
816 * @drv_req: driver-specific request structure
817 * @sgt: the h/w link table, followed by IV
818 */
819struct skcipher_edesc {
820	int src_nents;
821	int dst_nents;
822	dma_addr_t iv_dma;
823	int qm_sg_bytes;
824	dma_addr_t qm_sg_dma;
825	struct caam_drv_req drv_req;
826	struct qm_sg_entry sgt[];
827};
828
829static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
830					enum optype type)
831{
832	/*
833	 * This function is called on the fast path with values of 'type'
834	 * known at compile time. Invalid arguments are not expected and
835	 * thus no checks are made.
836	 */
837	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
838	u32 *desc;
839
840	if (unlikely(!drv_ctx)) {
841		spin_lock(&ctx->lock);
842
843		/* Read again to check if some other core init drv_ctx */
844		drv_ctx = ctx->drv_ctx[type];
845		if (!drv_ctx) {
846			int cpu;
847
848			if (type == ENCRYPT)
849				desc = ctx->sh_desc_enc;
850			else /* (type == DECRYPT) */
851				desc = ctx->sh_desc_dec;
852
853			cpu = smp_processor_id();
854			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
855			if (!IS_ERR_OR_NULL(drv_ctx))
856				drv_ctx->op_type = type;
857
858			ctx->drv_ctx[type] = drv_ctx;
859		}
860
861		spin_unlock(&ctx->lock);
862	}
863
864	return drv_ctx;
865}
866
867static void caam_unmap(struct device *dev, struct scatterlist *src,
868		       struct scatterlist *dst, int src_nents,
869		       int dst_nents, dma_addr_t iv_dma, int ivsize,
870		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
871		       int qm_sg_bytes)
872{
873	if (dst != src) {
874		if (src_nents)
875			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
876		if (dst_nents)
877			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
878	} else {
879		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
880	}
881
882	if (iv_dma)
883		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
884	if (qm_sg_bytes)
885		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
886}
887
888static void aead_unmap(struct device *dev,
889		       struct aead_edesc *edesc,
890		       struct aead_request *req)
891{
892	struct crypto_aead *aead = crypto_aead_reqtfm(req);
893	int ivsize = crypto_aead_ivsize(aead);
894
895	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
896		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
897		   edesc->qm_sg_bytes);
898	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
899}
900
901static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
902			   struct skcipher_request *req)
903{
904	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
905	int ivsize = crypto_skcipher_ivsize(skcipher);
906
907	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
908		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
909		   edesc->qm_sg_bytes);
910}
911
912static void aead_done(struct caam_drv_req *drv_req, u32 status)
913{
914	struct device *qidev;
915	struct aead_edesc *edesc;
916	struct aead_request *aead_req = drv_req->app_ctx;
917	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
918	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
919	int ecode = 0;
920
921	qidev = caam_ctx->qidev;
922
923	if (unlikely(status))
924		ecode = caam_jr_strstatus(qidev, status);
925
926	edesc = container_of(drv_req, typeof(*edesc), drv_req);
927	aead_unmap(qidev, edesc, aead_req);
928
929	aead_request_complete(aead_req, ecode);
930	qi_cache_free(edesc);
931}
932
933/*
934 * allocate and map the aead extended descriptor
935 */
936static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
937					   bool encrypt)
938{
939	struct crypto_aead *aead = crypto_aead_reqtfm(req);
940	struct caam_ctx *ctx = crypto_aead_ctx(aead);
941	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
942						 typeof(*alg), aead);
943	struct device *qidev = ctx->qidev;
944	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
945		       GFP_KERNEL : GFP_ATOMIC;
946	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
947	int src_len, dst_len = 0;
948	struct aead_edesc *edesc;
949	dma_addr_t qm_sg_dma, iv_dma = 0;
950	int ivsize = 0;
951	unsigned int authsize = ctx->authsize;
952	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
953	int in_len, out_len;
954	struct qm_sg_entry *sg_table, *fd_sgt;
955	struct caam_drv_ctx *drv_ctx;
956
957	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
958	if (IS_ERR_OR_NULL(drv_ctx))
959		return (struct aead_edesc *)drv_ctx;
960
961	/* allocate space for base edesc and hw desc commands, link tables */
962	edesc = qi_cache_alloc(GFP_DMA | flags);
963	if (unlikely(!edesc)) {
964		dev_err(qidev, "could not allocate extended descriptor\n");
965		return ERR_PTR(-ENOMEM);
966	}
967
968	if (likely(req->src == req->dst)) {
969		src_len = req->assoclen + req->cryptlen +
970			  (encrypt ? authsize : 0);
971
972		src_nents = sg_nents_for_len(req->src, src_len);
973		if (unlikely(src_nents < 0)) {
974			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
975				src_len);
976			qi_cache_free(edesc);
977			return ERR_PTR(src_nents);
978		}
979
980		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
981					      DMA_BIDIRECTIONAL);
982		if (unlikely(!mapped_src_nents)) {
983			dev_err(qidev, "unable to map source\n");
984			qi_cache_free(edesc);
985			return ERR_PTR(-ENOMEM);
986		}
987	} else {
988		src_len = req->assoclen + req->cryptlen;
989		dst_len = src_len + (encrypt ? authsize : (-authsize));
990
991		src_nents = sg_nents_for_len(req->src, src_len);
992		if (unlikely(src_nents < 0)) {
993			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
994				src_len);
995			qi_cache_free(edesc);
996			return ERR_PTR(src_nents);
997		}
998
999		dst_nents = sg_nents_for_len(req->dst, dst_len);
1000		if (unlikely(dst_nents < 0)) {
1001			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1002				dst_len);
1003			qi_cache_free(edesc);
1004			return ERR_PTR(dst_nents);
1005		}
1006
1007		if (src_nents) {
1008			mapped_src_nents = dma_map_sg(qidev, req->src,
1009						      src_nents, DMA_TO_DEVICE);
1010			if (unlikely(!mapped_src_nents)) {
1011				dev_err(qidev, "unable to map source\n");
1012				qi_cache_free(edesc);
1013				return ERR_PTR(-ENOMEM);
1014			}
1015		} else {
1016			mapped_src_nents = 0;
1017		}
1018
1019		if (dst_nents) {
1020			mapped_dst_nents = dma_map_sg(qidev, req->dst,
1021						      dst_nents,
1022						      DMA_FROM_DEVICE);
1023			if (unlikely(!mapped_dst_nents)) {
1024				dev_err(qidev, "unable to map destination\n");
1025				dma_unmap_sg(qidev, req->src, src_nents,
1026					     DMA_TO_DEVICE);
1027				qi_cache_free(edesc);
1028				return ERR_PTR(-ENOMEM);
1029			}
1030		} else {
1031			mapped_dst_nents = 0;
1032		}
1033	}
1034
1035	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
1036		ivsize = crypto_aead_ivsize(aead);
1037
1038	/*
1039	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1040	 * Input is not contiguous.
1041	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1042	 * the end of the table by allocating more S/G entries. Logic:
1043	 * if (src != dst && output S/G)
1044	 *      pad output S/G, if needed
1045	 * else if (src == dst && S/G)
1046	 *      overlapping S/Gs; pad one of them
1047	 * else if (input S/G) ...
1048	 *      pad input S/G, if needed
1049	 */
1050	qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
1051	if (mapped_dst_nents > 1)
1052		qm_sg_ents += pad_sg_nents(mapped_dst_nents);
1053	else if ((req->src == req->dst) && (mapped_src_nents > 1))
1054		qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
1055				 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
1056	else
1057		qm_sg_ents = pad_sg_nents(qm_sg_ents);
1058
1059	sg_table = &edesc->sgt[0];
1060	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1061	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
1062		     CAAM_QI_MEMCACHE_SIZE)) {
1063		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1064			qm_sg_ents, ivsize);
1065		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1066			   0, DMA_NONE, 0, 0);
1067		qi_cache_free(edesc);
1068		return ERR_PTR(-ENOMEM);
1069	}
1070
1071	if (ivsize) {
1072		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1073
1074		/* Make sure IV is located in a DMAable area */
1075		memcpy(iv, req->iv, ivsize);
1076
1077		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1078		if (dma_mapping_error(qidev, iv_dma)) {
1079			dev_err(qidev, "unable to map IV\n");
1080			caam_unmap(qidev, req->src, req->dst, src_nents,
1081				   dst_nents, 0, 0, DMA_NONE, 0, 0);
1082			qi_cache_free(edesc);
1083			return ERR_PTR(-ENOMEM);
1084		}
1085	}
1086
1087	edesc->src_nents = src_nents;
1088	edesc->dst_nents = dst_nents;
1089	edesc->iv_dma = iv_dma;
1090	edesc->drv_req.app_ctx = req;
1091	edesc->drv_req.cbk = aead_done;
1092	edesc->drv_req.drv_ctx = drv_ctx;
1093
1094	edesc->assoclen = cpu_to_caam32(req->assoclen);
1095	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1096					     DMA_TO_DEVICE);
1097	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1098		dev_err(qidev, "unable to map assoclen\n");
1099		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1100			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1101		qi_cache_free(edesc);
1102		return ERR_PTR(-ENOMEM);
1103	}
1104
1105	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1106	qm_sg_index++;
1107	if (ivsize) {
1108		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1109		qm_sg_index++;
1110	}
1111	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1112	qm_sg_index += mapped_src_nents;
1113
1114	if (mapped_dst_nents > 1)
1115		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
1116
1117	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1118	if (dma_mapping_error(qidev, qm_sg_dma)) {
1119		dev_err(qidev, "unable to map S/G table\n");
1120		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1121		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1122			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1123		qi_cache_free(edesc);
1124		return ERR_PTR(-ENOMEM);
1125	}
1126
1127	edesc->qm_sg_dma = qm_sg_dma;
1128	edesc->qm_sg_bytes = qm_sg_bytes;
1129
1130	out_len = req->assoclen + req->cryptlen +
1131		  (encrypt ? ctx->authsize : (-ctx->authsize));
1132	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1133
1134	fd_sgt = &edesc->drv_req.fd_sgt[0];
1135	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1136
1137	if (req->dst == req->src) {
1138		if (mapped_src_nents == 1)
1139			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1140					 out_len, 0);
1141		else
1142			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1143					     (1 + !!ivsize) * sizeof(*sg_table),
1144					     out_len, 0);
1145	} else if (mapped_dst_nents <= 1) {
1146		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1147				 0);
1148	} else {
1149		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1150				     qm_sg_index, out_len, 0);
1151	}
1152
1153	return edesc;
1154}
1155
1156static inline int aead_crypt(struct aead_request *req, bool encrypt)
1157{
1158	struct aead_edesc *edesc;
1159	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1160	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1161	int ret;
1162
1163	if (unlikely(caam_congested))
1164		return -EAGAIN;
1165
1166	/* allocate extended descriptor */
1167	edesc = aead_edesc_alloc(req, encrypt);
1168	if (IS_ERR_OR_NULL(edesc))
1169		return PTR_ERR(edesc);
1170
1171	/* Create and submit job descriptor */
1172	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1173	if (!ret) {
1174		ret = -EINPROGRESS;
1175	} else {
1176		aead_unmap(ctx->qidev, edesc, req);
1177		qi_cache_free(edesc);
1178	}
1179
1180	return ret;
1181}
1182
1183static int aead_encrypt(struct aead_request *req)
1184{
1185	return aead_crypt(req, true);
1186}
1187
1188static int aead_decrypt(struct aead_request *req)
1189{
1190	return aead_crypt(req, false);
1191}
1192
1193static int ipsec_gcm_encrypt(struct aead_request *req)
1194{
1195	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1196					   true);
1197}
1198
1199static int ipsec_gcm_decrypt(struct aead_request *req)
1200{
1201	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1202					   false);
1203}
1204
1205static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1206{
1207	struct skcipher_edesc *edesc;
1208	struct skcipher_request *req = drv_req->app_ctx;
1209	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1210	struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1211	struct device *qidev = caam_ctx->qidev;
1212	int ivsize = crypto_skcipher_ivsize(skcipher);
1213	int ecode = 0;
1214
1215	dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1216
1217	edesc = container_of(drv_req, typeof(*edesc), drv_req);
1218
1219	if (status)
1220		ecode = caam_jr_strstatus(qidev, status);
1221
1222	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1223			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1224			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1225	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1226		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1227		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1228
1229	skcipher_unmap(qidev, edesc, req);
1230
1231	/*
1232	 * The crypto API expects us to set the IV (req->iv) to the last
1233	 * ciphertext block (CBC mode) or last counter (CTR mode).
1234	 * This is used e.g. by the CTS mode.
1235	 */
1236	if (!ecode)
1237		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1238		       ivsize);
1239
1240	qi_cache_free(edesc);
1241	skcipher_request_complete(req, ecode);
1242}
1243
1244static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1245						   bool encrypt)
1246{
1247	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1248	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1249	struct device *qidev = ctx->qidev;
1250	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1251		       GFP_KERNEL : GFP_ATOMIC;
1252	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1253	struct skcipher_edesc *edesc;
1254	dma_addr_t iv_dma;
1255	u8 *iv;
1256	int ivsize = crypto_skcipher_ivsize(skcipher);
1257	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1258	struct qm_sg_entry *sg_table, *fd_sgt;
1259	struct caam_drv_ctx *drv_ctx;
1260
1261	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1262	if (IS_ERR_OR_NULL(drv_ctx))
1263		return (struct skcipher_edesc *)drv_ctx;
1264
1265	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1266	if (unlikely(src_nents < 0)) {
1267		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1268			req->cryptlen);
1269		return ERR_PTR(src_nents);
1270	}
1271
1272	if (unlikely(req->src != req->dst)) {
1273		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1274		if (unlikely(dst_nents < 0)) {
1275			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1276				req->cryptlen);
1277			return ERR_PTR(dst_nents);
1278		}
1279
1280		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1281					      DMA_TO_DEVICE);
1282		if (unlikely(!mapped_src_nents)) {
1283			dev_err(qidev, "unable to map source\n");
1284			return ERR_PTR(-ENOMEM);
1285		}
1286
1287		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1288					      DMA_FROM_DEVICE);
1289		if (unlikely(!mapped_dst_nents)) {
1290			dev_err(qidev, "unable to map destination\n");
1291			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1292			return ERR_PTR(-ENOMEM);
1293		}
1294	} else {
1295		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1296					      DMA_BIDIRECTIONAL);
1297		if (unlikely(!mapped_src_nents)) {
1298			dev_err(qidev, "unable to map source\n");
1299			return ERR_PTR(-ENOMEM);
1300		}
1301	}
1302
1303	qm_sg_ents = 1 + mapped_src_nents;
1304	dst_sg_idx = qm_sg_ents;
1305
1306	/*
1307	 * Input, output HW S/G tables: [IV, src][dst, IV]
1308	 * IV entries point to the same buffer
1309	 * If src == dst, S/G entries are reused (S/G tables overlap)
1310	 *
1311	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1312	 * the end of the table by allocating more S/G entries.
1313	 */
1314	if (req->src != req->dst)
1315		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1316	else
1317		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1318
1319	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1320	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1321		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1322		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1323			qm_sg_ents, ivsize);
1324		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1325			   0, DMA_NONE, 0, 0);
1326		return ERR_PTR(-ENOMEM);
1327	}
1328
1329	/* allocate space for base edesc, link tables and IV */
1330	edesc = qi_cache_alloc(GFP_DMA | flags);
1331	if (unlikely(!edesc)) {
1332		dev_err(qidev, "could not allocate extended descriptor\n");
1333		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1334			   0, DMA_NONE, 0, 0);
1335		return ERR_PTR(-ENOMEM);
1336	}
1337
1338	/* Make sure IV is located in a DMAable area */
1339	sg_table = &edesc->sgt[0];
1340	iv = (u8 *)(sg_table + qm_sg_ents);
1341	memcpy(iv, req->iv, ivsize);
1342
1343	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
1344	if (dma_mapping_error(qidev, iv_dma)) {
1345		dev_err(qidev, "unable to map IV\n");
1346		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1347			   0, DMA_NONE, 0, 0);
1348		qi_cache_free(edesc);
1349		return ERR_PTR(-ENOMEM);
1350	}
1351
1352	edesc->src_nents = src_nents;
1353	edesc->dst_nents = dst_nents;
1354	edesc->iv_dma = iv_dma;
1355	edesc->qm_sg_bytes = qm_sg_bytes;
1356	edesc->drv_req.app_ctx = req;
1357	edesc->drv_req.cbk = skcipher_done;
1358	edesc->drv_req.drv_ctx = drv_ctx;
1359
1360	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1361	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1362
1363	if (req->src != req->dst)
1364		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1365
1366	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1367			 ivsize, 0);
1368
1369	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1370					  DMA_TO_DEVICE);
1371	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1372		dev_err(qidev, "unable to map S/G table\n");
1373		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1374			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1375		qi_cache_free(edesc);
1376		return ERR_PTR(-ENOMEM);
1377	}
1378
1379	fd_sgt = &edesc->drv_req.fd_sgt[0];
1380
1381	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1382				  ivsize + req->cryptlen, 0);
1383
1384	if (req->src == req->dst)
1385		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1386				     sizeof(*sg_table), req->cryptlen + ivsize,
1387				     0);
1388	else
1389		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1390				     sizeof(*sg_table), req->cryptlen + ivsize,
1391				     0);
1392
1393	return edesc;
1394}
1395
1396static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1397{
1398	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1399	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1400
1401	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1402}
1403
1404static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1405{
1406	struct skcipher_edesc *edesc;
1407	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1408	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1409	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1410	int ret;
1411
1412	/*
1413	 * XTS is expected to return an error even for input length = 0
1414	 * Note that the case input length < block size will be caught during
1415	 * HW offloading and return an error.
1416	 */
1417	if (!req->cryptlen && !ctx->fallback)
1418		return 0;
1419
1420	if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1421			      ctx->xts_key_fallback)) {
1422		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1423
1424		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1425		skcipher_request_set_callback(&rctx->fallback_req,
1426					      req->base.flags,
1427					      req->base.complete,
1428					      req->base.data);
1429		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1430					   req->dst, req->cryptlen, req->iv);
1431
1432		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1433				 crypto_skcipher_decrypt(&rctx->fallback_req);
1434	}
1435
1436	if (unlikely(caam_congested))
1437		return -EAGAIN;
1438
1439	/* allocate extended descriptor */
1440	edesc = skcipher_edesc_alloc(req, encrypt);
1441	if (IS_ERR(edesc))
1442		return PTR_ERR(edesc);
1443
1444	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1445	if (!ret) {
1446		ret = -EINPROGRESS;
1447	} else {
1448		skcipher_unmap(ctx->qidev, edesc, req);
1449		qi_cache_free(edesc);
1450	}
1451
1452	return ret;
1453}
1454
1455static int skcipher_encrypt(struct skcipher_request *req)
1456{
1457	return skcipher_crypt(req, true);
1458}
1459
1460static int skcipher_decrypt(struct skcipher_request *req)
1461{
1462	return skcipher_crypt(req, false);
1463}
1464
1465static struct caam_skcipher_alg driver_algs[] = {
1466	{
1467		.skcipher = {
1468			.base = {
1469				.cra_name = "cbc(aes)",
1470				.cra_driver_name = "cbc-aes-caam-qi",
1471				.cra_blocksize = AES_BLOCK_SIZE,
1472			},
1473			.setkey = aes_skcipher_setkey,
1474			.encrypt = skcipher_encrypt,
1475			.decrypt = skcipher_decrypt,
1476			.min_keysize = AES_MIN_KEY_SIZE,
1477			.max_keysize = AES_MAX_KEY_SIZE,
1478			.ivsize = AES_BLOCK_SIZE,
1479		},
1480		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1481	},
1482	{
1483		.skcipher = {
1484			.base = {
1485				.cra_name = "cbc(des3_ede)",
1486				.cra_driver_name = "cbc-3des-caam-qi",
1487				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1488			},
1489			.setkey = des3_skcipher_setkey,
1490			.encrypt = skcipher_encrypt,
1491			.decrypt = skcipher_decrypt,
1492			.min_keysize = DES3_EDE_KEY_SIZE,
1493			.max_keysize = DES3_EDE_KEY_SIZE,
1494			.ivsize = DES3_EDE_BLOCK_SIZE,
1495		},
1496		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1497	},
1498	{
1499		.skcipher = {
1500			.base = {
1501				.cra_name = "cbc(des)",
1502				.cra_driver_name = "cbc-des-caam-qi",
1503				.cra_blocksize = DES_BLOCK_SIZE,
1504			},
1505			.setkey = des_skcipher_setkey,
1506			.encrypt = skcipher_encrypt,
1507			.decrypt = skcipher_decrypt,
1508			.min_keysize = DES_KEY_SIZE,
1509			.max_keysize = DES_KEY_SIZE,
1510			.ivsize = DES_BLOCK_SIZE,
1511		},
1512		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1513	},
1514	{
1515		.skcipher = {
1516			.base = {
1517				.cra_name = "ctr(aes)",
1518				.cra_driver_name = "ctr-aes-caam-qi",
1519				.cra_blocksize = 1,
1520			},
1521			.setkey = ctr_skcipher_setkey,
1522			.encrypt = skcipher_encrypt,
1523			.decrypt = skcipher_decrypt,
1524			.min_keysize = AES_MIN_KEY_SIZE,
1525			.max_keysize = AES_MAX_KEY_SIZE,
1526			.ivsize = AES_BLOCK_SIZE,
1527			.chunksize = AES_BLOCK_SIZE,
1528		},
1529		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1530					OP_ALG_AAI_CTR_MOD128,
1531	},
1532	{
1533		.skcipher = {
1534			.base = {
1535				.cra_name = "rfc3686(ctr(aes))",
1536				.cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1537				.cra_blocksize = 1,
1538			},
1539			.setkey = rfc3686_skcipher_setkey,
1540			.encrypt = skcipher_encrypt,
1541			.decrypt = skcipher_decrypt,
1542			.min_keysize = AES_MIN_KEY_SIZE +
1543				       CTR_RFC3686_NONCE_SIZE,
1544			.max_keysize = AES_MAX_KEY_SIZE +
1545				       CTR_RFC3686_NONCE_SIZE,
1546			.ivsize = CTR_RFC3686_IV_SIZE,
1547			.chunksize = AES_BLOCK_SIZE,
1548		},
1549		.caam = {
1550			.class1_alg_type = OP_ALG_ALGSEL_AES |
1551					   OP_ALG_AAI_CTR_MOD128,
1552			.rfc3686 = true,
1553		},
1554	},
1555	{
1556		.skcipher = {
1557			.base = {
1558				.cra_name = "xts(aes)",
1559				.cra_driver_name = "xts-aes-caam-qi",
1560				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1561				.cra_blocksize = AES_BLOCK_SIZE,
1562			},
1563			.setkey = xts_skcipher_setkey,
1564			.encrypt = skcipher_encrypt,
1565			.decrypt = skcipher_decrypt,
1566			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1567			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1568			.ivsize = AES_BLOCK_SIZE,
1569		},
1570		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1571	},
1572};
1573
1574static struct caam_aead_alg driver_aeads[] = {
1575	{
1576		.aead = {
1577			.base = {
1578				.cra_name = "rfc4106(gcm(aes))",
1579				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1580				.cra_blocksize = 1,
1581			},
1582			.setkey = rfc4106_setkey,
1583			.setauthsize = rfc4106_setauthsize,
1584			.encrypt = ipsec_gcm_encrypt,
1585			.decrypt = ipsec_gcm_decrypt,
1586			.ivsize = 8,
1587			.maxauthsize = AES_BLOCK_SIZE,
1588		},
1589		.caam = {
1590			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1591			.nodkp = true,
1592		},
1593	},
1594	{
1595		.aead = {
1596			.base = {
1597				.cra_name = "rfc4543(gcm(aes))",
1598				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1599				.cra_blocksize = 1,
1600			},
1601			.setkey = rfc4543_setkey,
1602			.setauthsize = rfc4543_setauthsize,
1603			.encrypt = ipsec_gcm_encrypt,
1604			.decrypt = ipsec_gcm_decrypt,
1605			.ivsize = 8,
1606			.maxauthsize = AES_BLOCK_SIZE,
1607		},
1608		.caam = {
1609			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1610			.nodkp = true,
1611		},
1612	},
1613	/* Galois Counter Mode */
1614	{
1615		.aead = {
1616			.base = {
1617				.cra_name = "gcm(aes)",
1618				.cra_driver_name = "gcm-aes-caam-qi",
1619				.cra_blocksize = 1,
1620			},
1621			.setkey = gcm_setkey,
1622			.setauthsize = gcm_setauthsize,
1623			.encrypt = aead_encrypt,
1624			.decrypt = aead_decrypt,
1625			.ivsize = 12,
1626			.maxauthsize = AES_BLOCK_SIZE,
1627		},
1628		.caam = {
1629			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1630			.nodkp = true,
1631		}
1632	},
1633	/* single-pass ipsec_esp descriptor */
1634	{
1635		.aead = {
1636			.base = {
1637				.cra_name = "authenc(hmac(md5),cbc(aes))",
1638				.cra_driver_name = "authenc-hmac-md5-"
1639						   "cbc-aes-caam-qi",
1640				.cra_blocksize = AES_BLOCK_SIZE,
1641			},
1642			.setkey = aead_setkey,
1643			.setauthsize = aead_setauthsize,
1644			.encrypt = aead_encrypt,
1645			.decrypt = aead_decrypt,
1646			.ivsize = AES_BLOCK_SIZE,
1647			.maxauthsize = MD5_DIGEST_SIZE,
1648		},
1649		.caam = {
1650			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1651			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1652					   OP_ALG_AAI_HMAC_PRECOMP,
1653		}
1654	},
1655	{
1656		.aead = {
1657			.base = {
1658				.cra_name = "echainiv(authenc(hmac(md5),"
1659					    "cbc(aes)))",
1660				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1661						   "cbc-aes-caam-qi",
1662				.cra_blocksize = AES_BLOCK_SIZE,
1663			},
1664			.setkey = aead_setkey,
1665			.setauthsize = aead_setauthsize,
1666			.encrypt = aead_encrypt,
1667			.decrypt = aead_decrypt,
1668			.ivsize = AES_BLOCK_SIZE,
1669			.maxauthsize = MD5_DIGEST_SIZE,
1670		},
1671		.caam = {
1672			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1673			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1674					   OP_ALG_AAI_HMAC_PRECOMP,
1675			.geniv = true,
1676		}
1677	},
1678	{
1679		.aead = {
1680			.base = {
1681				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1682				.cra_driver_name = "authenc-hmac-sha1-"
1683						   "cbc-aes-caam-qi",
1684				.cra_blocksize = AES_BLOCK_SIZE,
1685			},
1686			.setkey = aead_setkey,
1687			.setauthsize = aead_setauthsize,
1688			.encrypt = aead_encrypt,
1689			.decrypt = aead_decrypt,
1690			.ivsize = AES_BLOCK_SIZE,
1691			.maxauthsize = SHA1_DIGEST_SIZE,
1692		},
1693		.caam = {
1694			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1695			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1696					   OP_ALG_AAI_HMAC_PRECOMP,
1697		}
1698	},
1699	{
1700		.aead = {
1701			.base = {
1702				.cra_name = "echainiv(authenc(hmac(sha1),"
1703					    "cbc(aes)))",
1704				.cra_driver_name = "echainiv-authenc-"
1705						   "hmac-sha1-cbc-aes-caam-qi",
1706				.cra_blocksize = AES_BLOCK_SIZE,
1707			},
1708			.setkey = aead_setkey,
1709			.setauthsize = aead_setauthsize,
1710			.encrypt = aead_encrypt,
1711			.decrypt = aead_decrypt,
1712			.ivsize = AES_BLOCK_SIZE,
1713			.maxauthsize = SHA1_DIGEST_SIZE,
1714		},
1715		.caam = {
1716			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1717			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1718					   OP_ALG_AAI_HMAC_PRECOMP,
1719			.geniv = true,
1720		},
1721	},
1722	{
1723		.aead = {
1724			.base = {
1725				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1726				.cra_driver_name = "authenc-hmac-sha224-"
1727						   "cbc-aes-caam-qi",
1728				.cra_blocksize = AES_BLOCK_SIZE,
1729			},
1730			.setkey = aead_setkey,
1731			.setauthsize = aead_setauthsize,
1732			.encrypt = aead_encrypt,
1733			.decrypt = aead_decrypt,
1734			.ivsize = AES_BLOCK_SIZE,
1735			.maxauthsize = SHA224_DIGEST_SIZE,
1736		},
1737		.caam = {
1738			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1739			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1740					   OP_ALG_AAI_HMAC_PRECOMP,
1741		}
1742	},
1743	{
1744		.aead = {
1745			.base = {
1746				.cra_name = "echainiv(authenc(hmac(sha224),"
1747					    "cbc(aes)))",
1748				.cra_driver_name = "echainiv-authenc-"
1749						   "hmac-sha224-cbc-aes-caam-qi",
1750				.cra_blocksize = AES_BLOCK_SIZE,
1751			},
1752			.setkey = aead_setkey,
1753			.setauthsize = aead_setauthsize,
1754			.encrypt = aead_encrypt,
1755			.decrypt = aead_decrypt,
1756			.ivsize = AES_BLOCK_SIZE,
1757			.maxauthsize = SHA224_DIGEST_SIZE,
1758		},
1759		.caam = {
1760			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1761			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1762					   OP_ALG_AAI_HMAC_PRECOMP,
1763			.geniv = true,
1764		}
1765	},
1766	{
1767		.aead = {
1768			.base = {
1769				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1770				.cra_driver_name = "authenc-hmac-sha256-"
1771						   "cbc-aes-caam-qi",
1772				.cra_blocksize = AES_BLOCK_SIZE,
1773			},
1774			.setkey = aead_setkey,
1775			.setauthsize = aead_setauthsize,
1776			.encrypt = aead_encrypt,
1777			.decrypt = aead_decrypt,
1778			.ivsize = AES_BLOCK_SIZE,
1779			.maxauthsize = SHA256_DIGEST_SIZE,
1780		},
1781		.caam = {
1782			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1783			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1784					   OP_ALG_AAI_HMAC_PRECOMP,
1785		}
1786	},
1787	{
1788		.aead = {
1789			.base = {
1790				.cra_name = "echainiv(authenc(hmac(sha256),"
1791					    "cbc(aes)))",
1792				.cra_driver_name = "echainiv-authenc-"
1793						   "hmac-sha256-cbc-aes-"
1794						   "caam-qi",
1795				.cra_blocksize = AES_BLOCK_SIZE,
1796			},
1797			.setkey = aead_setkey,
1798			.setauthsize = aead_setauthsize,
1799			.encrypt = aead_encrypt,
1800			.decrypt = aead_decrypt,
1801			.ivsize = AES_BLOCK_SIZE,
1802			.maxauthsize = SHA256_DIGEST_SIZE,
1803		},
1804		.caam = {
1805			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1806			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1807					   OP_ALG_AAI_HMAC_PRECOMP,
1808			.geniv = true,
1809		}
1810	},
1811	{
1812		.aead = {
1813			.base = {
1814				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1815				.cra_driver_name = "authenc-hmac-sha384-"
1816						   "cbc-aes-caam-qi",
1817				.cra_blocksize = AES_BLOCK_SIZE,
1818			},
1819			.setkey = aead_setkey,
1820			.setauthsize = aead_setauthsize,
1821			.encrypt = aead_encrypt,
1822			.decrypt = aead_decrypt,
1823			.ivsize = AES_BLOCK_SIZE,
1824			.maxauthsize = SHA384_DIGEST_SIZE,
1825		},
1826		.caam = {
1827			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1828			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1829					   OP_ALG_AAI_HMAC_PRECOMP,
1830		}
1831	},
1832	{
1833		.aead = {
1834			.base = {
1835				.cra_name = "echainiv(authenc(hmac(sha384),"
1836					    "cbc(aes)))",
1837				.cra_driver_name = "echainiv-authenc-"
1838						   "hmac-sha384-cbc-aes-"
1839						   "caam-qi",
1840				.cra_blocksize = AES_BLOCK_SIZE,
1841			},
1842			.setkey = aead_setkey,
1843			.setauthsize = aead_setauthsize,
1844			.encrypt = aead_encrypt,
1845			.decrypt = aead_decrypt,
1846			.ivsize = AES_BLOCK_SIZE,
1847			.maxauthsize = SHA384_DIGEST_SIZE,
1848		},
1849		.caam = {
1850			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1851			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1852					   OP_ALG_AAI_HMAC_PRECOMP,
1853			.geniv = true,
1854		}
1855	},
1856	{
1857		.aead = {
1858			.base = {
1859				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1860				.cra_driver_name = "authenc-hmac-sha512-"
1861						   "cbc-aes-caam-qi",
1862				.cra_blocksize = AES_BLOCK_SIZE,
1863			},
1864			.setkey = aead_setkey,
1865			.setauthsize = aead_setauthsize,
1866			.encrypt = aead_encrypt,
1867			.decrypt = aead_decrypt,
1868			.ivsize = AES_BLOCK_SIZE,
1869			.maxauthsize = SHA512_DIGEST_SIZE,
1870		},
1871		.caam = {
1872			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1873			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1874					   OP_ALG_AAI_HMAC_PRECOMP,
1875		}
1876	},
1877	{
1878		.aead = {
1879			.base = {
1880				.cra_name = "echainiv(authenc(hmac(sha512),"
1881					    "cbc(aes)))",
1882				.cra_driver_name = "echainiv-authenc-"
1883						   "hmac-sha512-cbc-aes-"
1884						   "caam-qi",
1885				.cra_blocksize = AES_BLOCK_SIZE,
1886			},
1887			.setkey = aead_setkey,
1888			.setauthsize = aead_setauthsize,
1889			.encrypt = aead_encrypt,
1890			.decrypt = aead_decrypt,
1891			.ivsize = AES_BLOCK_SIZE,
1892			.maxauthsize = SHA512_DIGEST_SIZE,
1893		},
1894		.caam = {
1895			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1896			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1897					   OP_ALG_AAI_HMAC_PRECOMP,
1898			.geniv = true,
1899		}
1900	},
1901	{
1902		.aead = {
1903			.base = {
1904				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1905				.cra_driver_name = "authenc-hmac-md5-"
1906						   "cbc-des3_ede-caam-qi",
1907				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1908			},
1909			.setkey = des3_aead_setkey,
1910			.setauthsize = aead_setauthsize,
1911			.encrypt = aead_encrypt,
1912			.decrypt = aead_decrypt,
1913			.ivsize = DES3_EDE_BLOCK_SIZE,
1914			.maxauthsize = MD5_DIGEST_SIZE,
1915		},
1916		.caam = {
1917			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1918			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1919					   OP_ALG_AAI_HMAC_PRECOMP,
1920		}
1921	},
1922	{
1923		.aead = {
1924			.base = {
1925				.cra_name = "echainiv(authenc(hmac(md5),"
1926					    "cbc(des3_ede)))",
1927				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1928						   "cbc-des3_ede-caam-qi",
1929				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1930			},
1931			.setkey = des3_aead_setkey,
1932			.setauthsize = aead_setauthsize,
1933			.encrypt = aead_encrypt,
1934			.decrypt = aead_decrypt,
1935			.ivsize = DES3_EDE_BLOCK_SIZE,
1936			.maxauthsize = MD5_DIGEST_SIZE,
1937		},
1938		.caam = {
1939			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1940			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1941					   OP_ALG_AAI_HMAC_PRECOMP,
1942			.geniv = true,
1943		}
1944	},
1945	{
1946		.aead = {
1947			.base = {
1948				.cra_name = "authenc(hmac(sha1),"
1949					    "cbc(des3_ede))",
1950				.cra_driver_name = "authenc-hmac-sha1-"
1951						   "cbc-des3_ede-caam-qi",
1952				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1953			},
1954			.setkey = des3_aead_setkey,
1955			.setauthsize = aead_setauthsize,
1956			.encrypt = aead_encrypt,
1957			.decrypt = aead_decrypt,
1958			.ivsize = DES3_EDE_BLOCK_SIZE,
1959			.maxauthsize = SHA1_DIGEST_SIZE,
1960		},
1961		.caam = {
1962			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1963			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1964					   OP_ALG_AAI_HMAC_PRECOMP,
1965		},
1966	},
1967	{
1968		.aead = {
1969			.base = {
1970				.cra_name = "echainiv(authenc(hmac(sha1),"
1971					    "cbc(des3_ede)))",
1972				.cra_driver_name = "echainiv-authenc-"
1973						   "hmac-sha1-"
1974						   "cbc-des3_ede-caam-qi",
1975				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1976			},
1977			.setkey = des3_aead_setkey,
1978			.setauthsize = aead_setauthsize,
1979			.encrypt = aead_encrypt,
1980			.decrypt = aead_decrypt,
1981			.ivsize = DES3_EDE_BLOCK_SIZE,
1982			.maxauthsize = SHA1_DIGEST_SIZE,
1983		},
1984		.caam = {
1985			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1986			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1987					   OP_ALG_AAI_HMAC_PRECOMP,
1988			.geniv = true,
1989		}
1990	},
1991	{
1992		.aead = {
1993			.base = {
1994				.cra_name = "authenc(hmac(sha224),"
1995					    "cbc(des3_ede))",
1996				.cra_driver_name = "authenc-hmac-sha224-"
1997						   "cbc-des3_ede-caam-qi",
1998				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1999			},
2000			.setkey = des3_aead_setkey,
2001			.setauthsize = aead_setauthsize,
2002			.encrypt = aead_encrypt,
2003			.decrypt = aead_decrypt,
2004			.ivsize = DES3_EDE_BLOCK_SIZE,
2005			.maxauthsize = SHA224_DIGEST_SIZE,
2006		},
2007		.caam = {
2008			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2009			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2010					   OP_ALG_AAI_HMAC_PRECOMP,
2011		},
2012	},
2013	{
2014		.aead = {
2015			.base = {
2016				.cra_name = "echainiv(authenc(hmac(sha224),"
2017					    "cbc(des3_ede)))",
2018				.cra_driver_name = "echainiv-authenc-"
2019						   "hmac-sha224-"
2020						   "cbc-des3_ede-caam-qi",
2021				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2022			},
2023			.setkey = des3_aead_setkey,
2024			.setauthsize = aead_setauthsize,
2025			.encrypt = aead_encrypt,
2026			.decrypt = aead_decrypt,
2027			.ivsize = DES3_EDE_BLOCK_SIZE,
2028			.maxauthsize = SHA224_DIGEST_SIZE,
2029		},
2030		.caam = {
2031			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2032			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2033					   OP_ALG_AAI_HMAC_PRECOMP,
2034			.geniv = true,
2035		}
2036	},
2037	{
2038		.aead = {
2039			.base = {
2040				.cra_name = "authenc(hmac(sha256),"
2041					    "cbc(des3_ede))",
2042				.cra_driver_name = "authenc-hmac-sha256-"
2043						   "cbc-des3_ede-caam-qi",
2044				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2045			},
2046			.setkey = des3_aead_setkey,
2047			.setauthsize = aead_setauthsize,
2048			.encrypt = aead_encrypt,
2049			.decrypt = aead_decrypt,
2050			.ivsize = DES3_EDE_BLOCK_SIZE,
2051			.maxauthsize = SHA256_DIGEST_SIZE,
2052		},
2053		.caam = {
2054			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2055			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2056					   OP_ALG_AAI_HMAC_PRECOMP,
2057		},
2058	},
2059	{
2060		.aead = {
2061			.base = {
2062				.cra_name = "echainiv(authenc(hmac(sha256),"
2063					    "cbc(des3_ede)))",
2064				.cra_driver_name = "echainiv-authenc-"
2065						   "hmac-sha256-"
2066						   "cbc-des3_ede-caam-qi",
2067				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2068			},
2069			.setkey = des3_aead_setkey,
2070			.setauthsize = aead_setauthsize,
2071			.encrypt = aead_encrypt,
2072			.decrypt = aead_decrypt,
2073			.ivsize = DES3_EDE_BLOCK_SIZE,
2074			.maxauthsize = SHA256_DIGEST_SIZE,
2075		},
2076		.caam = {
2077			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2078			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2079					   OP_ALG_AAI_HMAC_PRECOMP,
2080			.geniv = true,
2081		}
2082	},
2083	{
2084		.aead = {
2085			.base = {
2086				.cra_name = "authenc(hmac(sha384),"
2087					    "cbc(des3_ede))",
2088				.cra_driver_name = "authenc-hmac-sha384-"
2089						   "cbc-des3_ede-caam-qi",
2090				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2091			},
2092			.setkey = des3_aead_setkey,
2093			.setauthsize = aead_setauthsize,
2094			.encrypt = aead_encrypt,
2095			.decrypt = aead_decrypt,
2096			.ivsize = DES3_EDE_BLOCK_SIZE,
2097			.maxauthsize = SHA384_DIGEST_SIZE,
2098		},
2099		.caam = {
2100			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2101			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2102					   OP_ALG_AAI_HMAC_PRECOMP,
2103		},
2104	},
2105	{
2106		.aead = {
2107			.base = {
2108				.cra_name = "echainiv(authenc(hmac(sha384),"
2109					    "cbc(des3_ede)))",
2110				.cra_driver_name = "echainiv-authenc-"
2111						   "hmac-sha384-"
2112						   "cbc-des3_ede-caam-qi",
2113				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2114			},
2115			.setkey = des3_aead_setkey,
2116			.setauthsize = aead_setauthsize,
2117			.encrypt = aead_encrypt,
2118			.decrypt = aead_decrypt,
2119			.ivsize = DES3_EDE_BLOCK_SIZE,
2120			.maxauthsize = SHA384_DIGEST_SIZE,
2121		},
2122		.caam = {
2123			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2124			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2125					   OP_ALG_AAI_HMAC_PRECOMP,
2126			.geniv = true,
2127		}
2128	},
2129	{
2130		.aead = {
2131			.base = {
2132				.cra_name = "authenc(hmac(sha512),"
2133					    "cbc(des3_ede))",
2134				.cra_driver_name = "authenc-hmac-sha512-"
2135						   "cbc-des3_ede-caam-qi",
2136				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2137			},
2138			.setkey = des3_aead_setkey,
2139			.setauthsize = aead_setauthsize,
2140			.encrypt = aead_encrypt,
2141			.decrypt = aead_decrypt,
2142			.ivsize = DES3_EDE_BLOCK_SIZE,
2143			.maxauthsize = SHA512_DIGEST_SIZE,
2144		},
2145		.caam = {
2146			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2147			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2148					   OP_ALG_AAI_HMAC_PRECOMP,
2149		},
2150	},
2151	{
2152		.aead = {
2153			.base = {
2154				.cra_name = "echainiv(authenc(hmac(sha512),"
2155					    "cbc(des3_ede)))",
2156				.cra_driver_name = "echainiv-authenc-"
2157						   "hmac-sha512-"
2158						   "cbc-des3_ede-caam-qi",
2159				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2160			},
2161			.setkey = des3_aead_setkey,
2162			.setauthsize = aead_setauthsize,
2163			.encrypt = aead_encrypt,
2164			.decrypt = aead_decrypt,
2165			.ivsize = DES3_EDE_BLOCK_SIZE,
2166			.maxauthsize = SHA512_DIGEST_SIZE,
2167		},
2168		.caam = {
2169			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2170			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2171					   OP_ALG_AAI_HMAC_PRECOMP,
2172			.geniv = true,
2173		}
2174	},
2175	{
2176		.aead = {
2177			.base = {
2178				.cra_name = "authenc(hmac(md5),cbc(des))",
2179				.cra_driver_name = "authenc-hmac-md5-"
2180						   "cbc-des-caam-qi",
2181				.cra_blocksize = DES_BLOCK_SIZE,
2182			},
2183			.setkey = aead_setkey,
2184			.setauthsize = aead_setauthsize,
2185			.encrypt = aead_encrypt,
2186			.decrypt = aead_decrypt,
2187			.ivsize = DES_BLOCK_SIZE,
2188			.maxauthsize = MD5_DIGEST_SIZE,
2189		},
2190		.caam = {
2191			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2192			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2193					   OP_ALG_AAI_HMAC_PRECOMP,
2194		},
2195	},
2196	{
2197		.aead = {
2198			.base = {
2199				.cra_name = "echainiv(authenc(hmac(md5),"
2200					    "cbc(des)))",
2201				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2202						   "cbc-des-caam-qi",
2203				.cra_blocksize = DES_BLOCK_SIZE,
2204			},
2205			.setkey = aead_setkey,
2206			.setauthsize = aead_setauthsize,
2207			.encrypt = aead_encrypt,
2208			.decrypt = aead_decrypt,
2209			.ivsize = DES_BLOCK_SIZE,
2210			.maxauthsize = MD5_DIGEST_SIZE,
2211		},
2212		.caam = {
2213			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2214			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2215					   OP_ALG_AAI_HMAC_PRECOMP,
2216			.geniv = true,
2217		}
2218	},
2219	{
2220		.aead = {
2221			.base = {
2222				.cra_name = "authenc(hmac(sha1),cbc(des))",
2223				.cra_driver_name = "authenc-hmac-sha1-"
2224						   "cbc-des-caam-qi",
2225				.cra_blocksize = DES_BLOCK_SIZE,
2226			},
2227			.setkey = aead_setkey,
2228			.setauthsize = aead_setauthsize,
2229			.encrypt = aead_encrypt,
2230			.decrypt = aead_decrypt,
2231			.ivsize = DES_BLOCK_SIZE,
2232			.maxauthsize = SHA1_DIGEST_SIZE,
2233		},
2234		.caam = {
2235			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2236			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2237					   OP_ALG_AAI_HMAC_PRECOMP,
2238		},
2239	},
2240	{
2241		.aead = {
2242			.base = {
2243				.cra_name = "echainiv(authenc(hmac(sha1),"
2244					    "cbc(des)))",
2245				.cra_driver_name = "echainiv-authenc-"
2246						   "hmac-sha1-cbc-des-caam-qi",
2247				.cra_blocksize = DES_BLOCK_SIZE,
2248			},
2249			.setkey = aead_setkey,
2250			.setauthsize = aead_setauthsize,
2251			.encrypt = aead_encrypt,
2252			.decrypt = aead_decrypt,
2253			.ivsize = DES_BLOCK_SIZE,
2254			.maxauthsize = SHA1_DIGEST_SIZE,
2255		},
2256		.caam = {
2257			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2258			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2259					   OP_ALG_AAI_HMAC_PRECOMP,
2260			.geniv = true,
2261		}
2262	},
2263	{
2264		.aead = {
2265			.base = {
2266				.cra_name = "authenc(hmac(sha224),cbc(des))",
2267				.cra_driver_name = "authenc-hmac-sha224-"
2268						   "cbc-des-caam-qi",
2269				.cra_blocksize = DES_BLOCK_SIZE,
2270			},
2271			.setkey = aead_setkey,
2272			.setauthsize = aead_setauthsize,
2273			.encrypt = aead_encrypt,
2274			.decrypt = aead_decrypt,
2275			.ivsize = DES_BLOCK_SIZE,
2276			.maxauthsize = SHA224_DIGEST_SIZE,
2277		},
2278		.caam = {
2279			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2280			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2281					   OP_ALG_AAI_HMAC_PRECOMP,
2282		},
2283	},
2284	{
2285		.aead = {
2286			.base = {
2287				.cra_name = "echainiv(authenc(hmac(sha224),"
2288					    "cbc(des)))",
2289				.cra_driver_name = "echainiv-authenc-"
2290						   "hmac-sha224-cbc-des-"
2291						   "caam-qi",
2292				.cra_blocksize = DES_BLOCK_SIZE,
2293			},
2294			.setkey = aead_setkey,
2295			.setauthsize = aead_setauthsize,
2296			.encrypt = aead_encrypt,
2297			.decrypt = aead_decrypt,
2298			.ivsize = DES_BLOCK_SIZE,
2299			.maxauthsize = SHA224_DIGEST_SIZE,
2300		},
2301		.caam = {
2302			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2303			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2304					   OP_ALG_AAI_HMAC_PRECOMP,
2305			.geniv = true,
2306		}
2307	},
2308	{
2309		.aead = {
2310			.base = {
2311				.cra_name = "authenc(hmac(sha256),cbc(des))",
2312				.cra_driver_name = "authenc-hmac-sha256-"
2313						   "cbc-des-caam-qi",
2314				.cra_blocksize = DES_BLOCK_SIZE,
2315			},
2316			.setkey = aead_setkey,
2317			.setauthsize = aead_setauthsize,
2318			.encrypt = aead_encrypt,
2319			.decrypt = aead_decrypt,
2320			.ivsize = DES_BLOCK_SIZE,
2321			.maxauthsize = SHA256_DIGEST_SIZE,
2322		},
2323		.caam = {
2324			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2325			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2326					   OP_ALG_AAI_HMAC_PRECOMP,
2327		},
2328	},
2329	{
2330		.aead = {
2331			.base = {
2332				.cra_name = "echainiv(authenc(hmac(sha256),"
2333					    "cbc(des)))",
2334				.cra_driver_name = "echainiv-authenc-"
2335						   "hmac-sha256-cbc-des-"
2336						   "caam-qi",
2337				.cra_blocksize = DES_BLOCK_SIZE,
2338			},
2339			.setkey = aead_setkey,
2340			.setauthsize = aead_setauthsize,
2341			.encrypt = aead_encrypt,
2342			.decrypt = aead_decrypt,
2343			.ivsize = DES_BLOCK_SIZE,
2344			.maxauthsize = SHA256_DIGEST_SIZE,
2345		},
2346		.caam = {
2347			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2348			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2349					   OP_ALG_AAI_HMAC_PRECOMP,
2350			.geniv = true,
2351		},
2352	},
2353	{
2354		.aead = {
2355			.base = {
2356				.cra_name = "authenc(hmac(sha384),cbc(des))",
2357				.cra_driver_name = "authenc-hmac-sha384-"
2358						   "cbc-des-caam-qi",
2359				.cra_blocksize = DES_BLOCK_SIZE,
2360			},
2361			.setkey = aead_setkey,
2362			.setauthsize = aead_setauthsize,
2363			.encrypt = aead_encrypt,
2364			.decrypt = aead_decrypt,
2365			.ivsize = DES_BLOCK_SIZE,
2366			.maxauthsize = SHA384_DIGEST_SIZE,
2367		},
2368		.caam = {
2369			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2370			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2371					   OP_ALG_AAI_HMAC_PRECOMP,
2372		},
2373	},
2374	{
2375		.aead = {
2376			.base = {
2377				.cra_name = "echainiv(authenc(hmac(sha384),"
2378					    "cbc(des)))",
2379				.cra_driver_name = "echainiv-authenc-"
2380						   "hmac-sha384-cbc-des-"
2381						   "caam-qi",
2382				.cra_blocksize = DES_BLOCK_SIZE,
2383			},
2384			.setkey = aead_setkey,
2385			.setauthsize = aead_setauthsize,
2386			.encrypt = aead_encrypt,
2387			.decrypt = aead_decrypt,
2388			.ivsize = DES_BLOCK_SIZE,
2389			.maxauthsize = SHA384_DIGEST_SIZE,
2390		},
2391		.caam = {
2392			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2393			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2394					   OP_ALG_AAI_HMAC_PRECOMP,
2395			.geniv = true,
2396		}
2397	},
2398	{
2399		.aead = {
2400			.base = {
2401				.cra_name = "authenc(hmac(sha512),cbc(des))",
2402				.cra_driver_name = "authenc-hmac-sha512-"
2403						   "cbc-des-caam-qi",
2404				.cra_blocksize = DES_BLOCK_SIZE,
2405			},
2406			.setkey = aead_setkey,
2407			.setauthsize = aead_setauthsize,
2408			.encrypt = aead_encrypt,
2409			.decrypt = aead_decrypt,
2410			.ivsize = DES_BLOCK_SIZE,
2411			.maxauthsize = SHA512_DIGEST_SIZE,
2412		},
2413		.caam = {
2414			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2415			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2416					   OP_ALG_AAI_HMAC_PRECOMP,
2417		}
2418	},
2419	{
2420		.aead = {
2421			.base = {
2422				.cra_name = "echainiv(authenc(hmac(sha512),"
2423					    "cbc(des)))",
2424				.cra_driver_name = "echainiv-authenc-"
2425						   "hmac-sha512-cbc-des-"
2426						   "caam-qi",
2427				.cra_blocksize = DES_BLOCK_SIZE,
2428			},
2429			.setkey = aead_setkey,
2430			.setauthsize = aead_setauthsize,
2431			.encrypt = aead_encrypt,
2432			.decrypt = aead_decrypt,
2433			.ivsize = DES_BLOCK_SIZE,
2434			.maxauthsize = SHA512_DIGEST_SIZE,
2435		},
2436		.caam = {
2437			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2438			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2439					   OP_ALG_AAI_HMAC_PRECOMP,
2440			.geniv = true,
2441		}
2442	},
2443};
2444
2445static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2446			    bool uses_dkp)
2447{
2448	struct caam_drv_private *priv;
2449	struct device *dev;
2450
2451	/*
2452	 * distribute tfms across job rings to ensure in-order
2453	 * crypto request processing per tfm
2454	 */
2455	ctx->jrdev = caam_jr_alloc();
2456	if (IS_ERR(ctx->jrdev)) {
2457		pr_err("Job Ring Device allocation for transform failed\n");
2458		return PTR_ERR(ctx->jrdev);
2459	}
2460
2461	dev = ctx->jrdev->parent;
2462	priv = dev_get_drvdata(dev);
2463	if (priv->era >= 6 && uses_dkp)
2464		ctx->dir = DMA_BIDIRECTIONAL;
2465	else
2466		ctx->dir = DMA_TO_DEVICE;
2467
2468	ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
2469				      ctx->dir);
2470	if (dma_mapping_error(dev, ctx->key_dma)) {
2471		dev_err(dev, "unable to map key\n");
2472		caam_jr_free(ctx->jrdev);
2473		return -ENOMEM;
2474	}
2475
2476	/* copy descriptor header template value */
2477	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2478	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2479
2480	ctx->qidev = dev;
2481
2482	spin_lock_init(&ctx->lock);
2483	ctx->drv_ctx[ENCRYPT] = NULL;
2484	ctx->drv_ctx[DECRYPT] = NULL;
2485
2486	return 0;
2487}
2488
2489static int caam_cra_init(struct crypto_skcipher *tfm)
2490{
2491	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2492	struct caam_skcipher_alg *caam_alg =
2493		container_of(alg, typeof(*caam_alg), skcipher);
2494	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2495	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2496	int ret = 0;
2497
2498	if (alg_aai == OP_ALG_AAI_XTS) {
2499		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
2500		struct crypto_skcipher *fallback;
2501
2502		fallback = crypto_alloc_skcipher(tfm_name, 0,
2503						 CRYPTO_ALG_NEED_FALLBACK);
2504		if (IS_ERR(fallback)) {
2505			pr_err("Failed to allocate %s fallback: %ld\n",
2506			       tfm_name, PTR_ERR(fallback));
2507			return PTR_ERR(fallback);
2508		}
2509
2510		ctx->fallback = fallback;
2511		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
2512					    crypto_skcipher_reqsize(fallback));
2513	}
2514
2515	ret = caam_init_common(ctx, &caam_alg->caam, false);
2516	if (ret && ctx->fallback)
2517		crypto_free_skcipher(ctx->fallback);
2518
2519	return ret;
2520}
2521
2522static int caam_aead_init(struct crypto_aead *tfm)
2523{
2524	struct aead_alg *alg = crypto_aead_alg(tfm);
2525	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2526						      aead);
2527	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2528
2529	return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
2530}
2531
2532static void caam_exit_common(struct caam_ctx *ctx)
2533{
2534	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2535	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2536
2537	dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
2538			 ctx->dir);
2539
2540	caam_jr_free(ctx->jrdev);
2541}
2542
2543static void caam_cra_exit(struct crypto_skcipher *tfm)
2544{
2545	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2546
2547	if (ctx->fallback)
2548		crypto_free_skcipher(ctx->fallback);
2549	caam_exit_common(ctx);
2550}
2551
2552static void caam_aead_exit(struct crypto_aead *tfm)
2553{
2554	caam_exit_common(crypto_aead_ctx(tfm));
2555}
2556
2557void caam_qi_algapi_exit(void)
2558{
2559	int i;
2560
2561	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2562		struct caam_aead_alg *t_alg = driver_aeads + i;
2563
2564		if (t_alg->registered)
2565			crypto_unregister_aead(&t_alg->aead);
2566	}
2567
2568	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2569		struct caam_skcipher_alg *t_alg = driver_algs + i;
2570
2571		if (t_alg->registered)
2572			crypto_unregister_skcipher(&t_alg->skcipher);
2573	}
2574}
2575
2576static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2577{
2578	struct skcipher_alg *alg = &t_alg->skcipher;
2579
2580	alg->base.cra_module = THIS_MODULE;
2581	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2582	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2583	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2584				CRYPTO_ALG_KERN_DRIVER_ONLY);
2585
2586	alg->init = caam_cra_init;
2587	alg->exit = caam_cra_exit;
2588}
2589
2590static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2591{
2592	struct aead_alg *alg = &t_alg->aead;
2593
2594	alg->base.cra_module = THIS_MODULE;
2595	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2596	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2597	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2598			      CRYPTO_ALG_KERN_DRIVER_ONLY;
2599
2600	alg->init = caam_aead_init;
2601	alg->exit = caam_aead_exit;
2602}
2603
2604int caam_qi_algapi_init(struct device *ctrldev)
2605{
2606	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2607	int i = 0, err = 0;
2608	u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
2609	unsigned int md_limit = SHA512_DIGEST_SIZE;
2610	bool registered = false;
2611
2612	/* Make sure this runs only on (DPAA 1.x) QI */
2613	if (!priv->qi_present || caam_dpaa2)
2614		return 0;
2615
2616	/*
2617	 * Register crypto algorithms the device supports.
2618	 * First, detect presence and attributes of DES, AES, and MD blocks.
2619	 */
2620	if (priv->era < 10) {
2621		u32 cha_vid, cha_inst;
2622
2623		cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2624		aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2625		md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2626
2627		cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2628		des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2629			   CHA_ID_LS_DES_SHIFT;
2630		aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2631		md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2632	} else {
2633		u32 aesa, mdha;
2634
2635		aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2636		mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2637
2638		aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2639		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2640
2641		des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2642		aes_inst = aesa & CHA_VER_NUM_MASK;
2643		md_inst = mdha & CHA_VER_NUM_MASK;
2644	}
2645
2646	/* If MD is present, limit digest size based on LP256 */
2647	if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
2648		md_limit = SHA256_DIGEST_SIZE;
2649
2650	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2651		struct caam_skcipher_alg *t_alg = driver_algs + i;
2652		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
2653
2654		/* Skip DES algorithms if not supported by device */
2655		if (!des_inst &&
2656		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2657		     (alg_sel == OP_ALG_ALGSEL_DES)))
2658			continue;
2659
2660		/* Skip AES algorithms if not supported by device */
2661		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2662			continue;
2663
2664		caam_skcipher_alg_init(t_alg);
2665
2666		err = crypto_register_skcipher(&t_alg->skcipher);
2667		if (err) {
2668			dev_warn(ctrldev, "%s alg registration failed\n",
2669				 t_alg->skcipher.base.cra_driver_name);
2670			continue;
2671		}
2672
2673		t_alg->registered = true;
2674		registered = true;
2675	}
2676
2677	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2678		struct caam_aead_alg *t_alg = driver_aeads + i;
2679		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2680				 OP_ALG_ALGSEL_MASK;
2681		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2682				 OP_ALG_ALGSEL_MASK;
2683		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2684
2685		/* Skip DES algorithms if not supported by device */
2686		if (!des_inst &&
2687		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2688		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2689			continue;
2690
2691		/* Skip AES algorithms if not supported by device */
2692		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2693			continue;
2694
2695		/*
2696		 * Check support for AES algorithms not available
2697		 * on LP devices.
2698		 */
2699		if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2700			continue;
2701
2702		/*
2703		 * Skip algorithms requiring message digests
2704		 * if MD or MD size is not supported by device.
2705		 */
2706		if (c2_alg_sel &&
2707		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2708			continue;
2709
2710		caam_aead_alg_init(t_alg);
2711
2712		err = crypto_register_aead(&t_alg->aead);
2713		if (err) {
2714			pr_warn("%s alg registration failed\n",
2715				t_alg->aead.base.cra_driver_name);
2716			continue;
2717		}
2718
2719		t_alg->registered = true;
2720		registered = true;
2721	}
2722
2723	if (registered)
2724		dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
2725
2726	return err;
2727}
2728