1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
24 
25 #define CAAM_CRA_PRIORITY	2000
26 
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 				 SHA512_DIGEST_SIZE * 2)
30 
31 /*
32  * This is a a cache of buffers, from which the users of CAAM QI driver
33  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34  * NOTE: A more elegant solution would be to have some headroom in the frames
35  *       being processed. This can be added by the dpaa2-eth driver. This would
36  *       pose a problem for userspace application processing which cannot
37  *       know of this limitation. So for now, this will work.
38  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
39  */
40 static struct kmem_cache *qi_cache;
41 
42 struct caam_alg_entry {
43 	struct device *dev;
44 	int class1_alg_type;
45 	int class2_alg_type;
46 	bool rfc3686;
47 	bool geniv;
48 	bool nodkp;
49 };
50 
51 struct caam_aead_alg {
52 	struct aead_alg aead;
53 	struct caam_alg_entry caam;
54 	bool registered;
55 };
56 
57 struct caam_skcipher_alg {
58 	struct skcipher_alg skcipher;
59 	struct caam_alg_entry caam;
60 	bool registered;
61 };
62 
63 /**
64  * struct caam_ctx - per-session context
65  * @flc: Flow Contexts array
66  * @key:  [authentication key], encryption key
67  * @flc_dma: I/O virtual addresses of the Flow Contexts
68  * @key_dma: I/O virtual address of the key
69  * @dir: DMA direction for mapping key and Flow Contexts
70  * @dev: dpseci device
71  * @adata: authentication algorithm details
72  * @cdata: encryption algorithm details
73  * @authsize: authentication tag (a.k.a. ICV / MAC) size
74  */
75 struct caam_ctx {
76 	struct caam_flc flc[NUM_OP];
77 	u8 key[CAAM_MAX_KEY_SIZE];
78 	dma_addr_t flc_dma[NUM_OP];
79 	dma_addr_t key_dma;
80 	enum dma_data_direction dir;
81 	struct device *dev;
82 	struct alginfo adata;
83 	struct alginfo cdata;
84 	unsigned int authsize;
85 	bool xts_key_fallback;
86 	struct crypto_skcipher *fallback;
87 };
88 
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, dma_addr_t iova_addr)89 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
90 				     dma_addr_t iova_addr)
91 {
92 	phys_addr_t phys_addr;
93 
94 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
95 				   iova_addr;
96 
97 	return phys_to_virt(phys_addr);
98 }
99 
100 /*
101  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
102  *
103  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
104  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
105  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
106  * hosting 16 SG entries.
107  *
108  * @flags - flags that would be used for the equivalent kmalloc(..) call
109  *
110  * Returns a pointer to a retrieved buffer on success or NULL on failure.
111  */
qi_cache_zalloc(gfp_t flags)112 static inline void *qi_cache_zalloc(gfp_t flags)
113 {
114 	return kmem_cache_zalloc(qi_cache, flags);
115 }
116 
117 /*
118  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
119  *
120  * @obj - buffer previously allocated by qi_cache_zalloc
121  *
122  * No checking is being done, the call is a passthrough call to
123  * kmem_cache_free(...)
124  */
qi_cache_free(void *obj)125 static inline void qi_cache_free(void *obj)
126 {
127 	kmem_cache_free(qi_cache, obj);
128 }
129 
to_caam_req(struct crypto_async_request *areq)130 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
131 {
132 	switch (crypto_tfm_alg_type(areq->tfm)) {
133 	case CRYPTO_ALG_TYPE_SKCIPHER:
134 		return skcipher_request_ctx(skcipher_request_cast(areq));
135 	case CRYPTO_ALG_TYPE_AEAD:
136 		return aead_request_ctx(container_of(areq, struct aead_request,
137 						     base));
138 	case CRYPTO_ALG_TYPE_AHASH:
139 		return ahash_request_ctx(ahash_request_cast(areq));
140 	default:
141 		return ERR_PTR(-EINVAL);
142 	}
143 }
144 
caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, int qm_sg_bytes)145 static void caam_unmap(struct device *dev, struct scatterlist *src,
146 		       struct scatterlist *dst, int src_nents,
147 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
148 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
149 		       int qm_sg_bytes)
150 {
151 	if (dst != src) {
152 		if (src_nents)
153 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 		if (dst_nents)
155 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
156 	} else {
157 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
158 	}
159 
160 	if (iv_dma)
161 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
162 
163 	if (qm_sg_bytes)
164 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
165 }
166 
aead_set_sh_desc(struct crypto_aead *aead)167 static int aead_set_sh_desc(struct crypto_aead *aead)
168 {
169 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 						 typeof(*alg), aead);
171 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
172 	unsigned int ivsize = crypto_aead_ivsize(aead);
173 	struct device *dev = ctx->dev;
174 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
175 	struct caam_flc *flc;
176 	u32 *desc;
177 	u32 ctx1_iv_off = 0;
178 	u32 *nonce = NULL;
179 	unsigned int data_len[2];
180 	u32 inl_mask;
181 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
182 			       OP_ALG_AAI_CTR_MOD128);
183 	const bool is_rfc3686 = alg->caam.rfc3686;
184 
185 	if (!ctx->cdata.keylen || !ctx->authsize)
186 		return 0;
187 
188 	/*
189 	 * AES-CTR needs to load IV in CONTEXT1 reg
190 	 * at an offset of 128bits (16bytes)
191 	 * CONTEXT1[255:128] = IV
192 	 */
193 	if (ctr_mode)
194 		ctx1_iv_off = 16;
195 
196 	/*
197 	 * RFC3686 specific:
198 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
199 	 */
200 	if (is_rfc3686) {
201 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
202 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
203 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
204 	}
205 
206 	/*
207 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
208 	 * in invalid opcodes (last bytes of user key) in the resulting
209 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
210 	 * addresses are needed.
211 	 */
212 	ctx->adata.key_virt = ctx->key;
213 	ctx->adata.key_dma = ctx->key_dma;
214 
215 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
216 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
217 
218 	data_len[0] = ctx->adata.keylen_pad;
219 	data_len[1] = ctx->cdata.keylen;
220 
221 	/* aead_encrypt shared descriptor */
222 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
223 						 DESC_QI_AEAD_ENC_LEN) +
224 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
225 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
226 			      ARRAY_SIZE(data_len)) < 0)
227 		return -EINVAL;
228 
229 	ctx->adata.key_inline = !!(inl_mask & 1);
230 	ctx->cdata.key_inline = !!(inl_mask & 2);
231 
232 	flc = &ctx->flc[ENCRYPT];
233 	desc = flc->sh_desc;
234 
235 	if (alg->caam.geniv)
236 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
237 					  ivsize, ctx->authsize, is_rfc3686,
238 					  nonce, ctx1_iv_off, true,
239 					  priv->sec_attr.era);
240 	else
241 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
242 				       ivsize, ctx->authsize, is_rfc3686, nonce,
243 				       ctx1_iv_off, true, priv->sec_attr.era);
244 
245 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
246 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
247 				   sizeof(flc->flc) + desc_bytes(desc),
248 				   ctx->dir);
249 
250 	/* aead_decrypt shared descriptor */
251 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
252 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
254 			      ARRAY_SIZE(data_len)) < 0)
255 		return -EINVAL;
256 
257 	ctx->adata.key_inline = !!(inl_mask & 1);
258 	ctx->cdata.key_inline = !!(inl_mask & 2);
259 
260 	flc = &ctx->flc[DECRYPT];
261 	desc = flc->sh_desc;
262 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
263 			       ivsize, ctx->authsize, alg->caam.geniv,
264 			       is_rfc3686, nonce, ctx1_iv_off, true,
265 			       priv->sec_attr.era);
266 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
267 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
268 				   sizeof(flc->flc) + desc_bytes(desc),
269 				   ctx->dir);
270 
271 	return 0;
272 }
273 
aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)274 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
275 {
276 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
277 
278 	ctx->authsize = authsize;
279 	aead_set_sh_desc(authenc);
280 
281 	return 0;
282 }
283 
aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)284 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
285 		       unsigned int keylen)
286 {
287 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 	struct device *dev = ctx->dev;
289 	struct crypto_authenc_keys keys;
290 
291 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
292 		goto badkey;
293 
294 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
295 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
296 		keys.authkeylen);
297 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
298 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
299 
300 	ctx->adata.keylen = keys.authkeylen;
301 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
302 					      OP_ALG_ALGSEL_MASK);
303 
304 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
305 		goto badkey;
306 
307 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
308 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
309 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
310 				   keys.enckeylen, ctx->dir);
311 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
312 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
313 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
314 
315 	ctx->cdata.keylen = keys.enckeylen;
316 
317 	memzero_explicit(&keys, sizeof(keys));
318 	return aead_set_sh_desc(aead);
319 badkey:
320 	memzero_explicit(&keys, sizeof(keys));
321 	return -EINVAL;
322 }
323 
des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)324 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
325 			    unsigned int keylen)
326 {
327 	struct crypto_authenc_keys keys;
328 	int err;
329 
330 	err = crypto_authenc_extractkeys(&keys, key, keylen);
331 	if (unlikely(err))
332 		goto out;
333 
334 	err = -EINVAL;
335 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
336 		goto out;
337 
338 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
339 	      aead_setkey(aead, key, keylen);
340 
341 out:
342 	memzero_explicit(&keys, sizeof(keys));
343 	return err;
344 }
345 
aead_edesc_alloc(struct aead_request *req, bool encrypt)346 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
347 					   bool encrypt)
348 {
349 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
350 	struct caam_request *req_ctx = aead_request_ctx(req);
351 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
352 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
353 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
354 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
355 						 typeof(*alg), aead);
356 	struct device *dev = ctx->dev;
357 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
358 		      GFP_KERNEL : GFP_ATOMIC;
359 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
360 	int src_len, dst_len = 0;
361 	struct aead_edesc *edesc;
362 	dma_addr_t qm_sg_dma, iv_dma = 0;
363 	int ivsize = 0;
364 	unsigned int authsize = ctx->authsize;
365 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
366 	int in_len, out_len;
367 	struct dpaa2_sg_entry *sg_table;
368 
369 	/* allocate space for base edesc, link tables and IV */
370 	edesc = qi_cache_zalloc(GFP_DMA | flags);
371 	if (unlikely(!edesc)) {
372 		dev_err(dev, "could not allocate extended descriptor\n");
373 		return ERR_PTR(-ENOMEM);
374 	}
375 
376 	if (unlikely(req->dst != req->src)) {
377 		src_len = req->assoclen + req->cryptlen;
378 		dst_len = src_len + (encrypt ? authsize : (-authsize));
379 
380 		src_nents = sg_nents_for_len(req->src, src_len);
381 		if (unlikely(src_nents < 0)) {
382 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
383 				src_len);
384 			qi_cache_free(edesc);
385 			return ERR_PTR(src_nents);
386 		}
387 
388 		dst_nents = sg_nents_for_len(req->dst, dst_len);
389 		if (unlikely(dst_nents < 0)) {
390 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
391 				dst_len);
392 			qi_cache_free(edesc);
393 			return ERR_PTR(dst_nents);
394 		}
395 
396 		if (src_nents) {
397 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
398 						      DMA_TO_DEVICE);
399 			if (unlikely(!mapped_src_nents)) {
400 				dev_err(dev, "unable to map source\n");
401 				qi_cache_free(edesc);
402 				return ERR_PTR(-ENOMEM);
403 			}
404 		} else {
405 			mapped_src_nents = 0;
406 		}
407 
408 		if (dst_nents) {
409 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
410 						      DMA_FROM_DEVICE);
411 			if (unlikely(!mapped_dst_nents)) {
412 				dev_err(dev, "unable to map destination\n");
413 				dma_unmap_sg(dev, req->src, src_nents,
414 					     DMA_TO_DEVICE);
415 				qi_cache_free(edesc);
416 				return ERR_PTR(-ENOMEM);
417 			}
418 		} else {
419 			mapped_dst_nents = 0;
420 		}
421 	} else {
422 		src_len = req->assoclen + req->cryptlen +
423 			  (encrypt ? authsize : 0);
424 
425 		src_nents = sg_nents_for_len(req->src, src_len);
426 		if (unlikely(src_nents < 0)) {
427 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
428 				src_len);
429 			qi_cache_free(edesc);
430 			return ERR_PTR(src_nents);
431 		}
432 
433 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
434 					      DMA_BIDIRECTIONAL);
435 		if (unlikely(!mapped_src_nents)) {
436 			dev_err(dev, "unable to map source\n");
437 			qi_cache_free(edesc);
438 			return ERR_PTR(-ENOMEM);
439 		}
440 	}
441 
442 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
443 		ivsize = crypto_aead_ivsize(aead);
444 
445 	/*
446 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
447 	 * Input is not contiguous.
448 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
449 	 * the end of the table by allocating more S/G entries. Logic:
450 	 * if (src != dst && output S/G)
451 	 *      pad output S/G, if needed
452 	 * else if (src == dst && S/G)
453 	 *      overlapping S/Gs; pad one of them
454 	 * else if (input S/G) ...
455 	 *      pad input S/G, if needed
456 	 */
457 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
458 	if (mapped_dst_nents > 1)
459 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
460 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
461 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
462 				  1 + !!ivsize +
463 				  pad_sg_nents(mapped_src_nents));
464 	else
465 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
466 
467 	sg_table = &edesc->sgt[0];
468 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
469 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
470 		     CAAM_QI_MEMCACHE_SIZE)) {
471 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
472 			qm_sg_nents, ivsize);
473 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
474 			   0, DMA_NONE, 0, 0);
475 		qi_cache_free(edesc);
476 		return ERR_PTR(-ENOMEM);
477 	}
478 
479 	if (ivsize) {
480 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
481 
482 		/* Make sure IV is located in a DMAable area */
483 		memcpy(iv, req->iv, ivsize);
484 
485 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
486 		if (dma_mapping_error(dev, iv_dma)) {
487 			dev_err(dev, "unable to map IV\n");
488 			caam_unmap(dev, req->src, req->dst, src_nents,
489 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
490 			qi_cache_free(edesc);
491 			return ERR_PTR(-ENOMEM);
492 		}
493 	}
494 
495 	edesc->src_nents = src_nents;
496 	edesc->dst_nents = dst_nents;
497 	edesc->iv_dma = iv_dma;
498 
499 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
500 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
501 		/*
502 		 * The associated data comes already with the IV but we need
503 		 * to skip it when we authenticate or encrypt...
504 		 */
505 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
506 	else
507 		edesc->assoclen = cpu_to_caam32(req->assoclen);
508 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
509 					     DMA_TO_DEVICE);
510 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
511 		dev_err(dev, "unable to map assoclen\n");
512 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
513 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
514 		qi_cache_free(edesc);
515 		return ERR_PTR(-ENOMEM);
516 	}
517 
518 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
519 	qm_sg_index++;
520 	if (ivsize) {
521 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
522 		qm_sg_index++;
523 	}
524 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
525 	qm_sg_index += mapped_src_nents;
526 
527 	if (mapped_dst_nents > 1)
528 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
529 
530 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 	if (dma_mapping_error(dev, qm_sg_dma)) {
532 		dev_err(dev, "unable to map S/G table\n");
533 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 		qi_cache_free(edesc);
537 		return ERR_PTR(-ENOMEM);
538 	}
539 
540 	edesc->qm_sg_dma = qm_sg_dma;
541 	edesc->qm_sg_bytes = qm_sg_bytes;
542 
543 	out_len = req->assoclen + req->cryptlen +
544 		  (encrypt ? ctx->authsize : (-ctx->authsize));
545 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
546 
547 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 	dpaa2_fl_set_final(in_fle, true);
549 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 	dpaa2_fl_set_len(in_fle, in_len);
552 
553 	if (req->dst == req->src) {
554 		if (mapped_src_nents == 1) {
555 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
557 		} else {
558 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 					  (1 + !!ivsize) * sizeof(*sg_table));
561 		}
562 	} else if (!mapped_dst_nents) {
563 		/*
564 		 * crypto engine requires the output entry to be present when
565 		 * "frame list" FD is used.
566 		 * Since engine does not support FMT=2'b11 (unused entry type),
567 		 * leaving out_fle zeroized is the best option.
568 		 */
569 		goto skip_out_fle;
570 	} else if (mapped_dst_nents == 1) {
571 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
573 	} else {
574 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
576 				  sizeof(*sg_table));
577 	}
578 
579 	dpaa2_fl_set_len(out_fle, out_len);
580 
581 skip_out_fle:
582 	return edesc;
583 }
584 
chachapoly_set_sh_desc(struct crypto_aead *aead)585 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
586 {
587 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
588 	unsigned int ivsize = crypto_aead_ivsize(aead);
589 	struct device *dev = ctx->dev;
590 	struct caam_flc *flc;
591 	u32 *desc;
592 
593 	if (!ctx->cdata.keylen || !ctx->authsize)
594 		return 0;
595 
596 	flc = &ctx->flc[ENCRYPT];
597 	desc = flc->sh_desc;
598 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 			       ctx->authsize, true, true);
600 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
602 				   sizeof(flc->flc) + desc_bytes(desc),
603 				   ctx->dir);
604 
605 	flc = &ctx->flc[DECRYPT];
606 	desc = flc->sh_desc;
607 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
608 			       ctx->authsize, false, true);
609 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
610 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
611 				   sizeof(flc->flc) + desc_bytes(desc),
612 				   ctx->dir);
613 
614 	return 0;
615 }
616 
chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize)617 static int chachapoly_setauthsize(struct crypto_aead *aead,
618 				  unsigned int authsize)
619 {
620 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
621 
622 	if (authsize != POLY1305_DIGEST_SIZE)
623 		return -EINVAL;
624 
625 	ctx->authsize = authsize;
626 	return chachapoly_set_sh_desc(aead);
627 }
628 
chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)629 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
630 			     unsigned int keylen)
631 {
632 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
633 	unsigned int ivsize = crypto_aead_ivsize(aead);
634 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
635 
636 	if (keylen != CHACHA_KEY_SIZE + saltlen)
637 		return -EINVAL;
638 
639 	memcpy(ctx->key, key, keylen);
640 	ctx->cdata.key_virt = ctx->key;
641 	ctx->cdata.keylen = keylen - saltlen;
642 
643 	return chachapoly_set_sh_desc(aead);
644 }
645 
gcm_set_sh_desc(struct crypto_aead *aead)646 static int gcm_set_sh_desc(struct crypto_aead *aead)
647 {
648 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
649 	struct device *dev = ctx->dev;
650 	unsigned int ivsize = crypto_aead_ivsize(aead);
651 	struct caam_flc *flc;
652 	u32 *desc;
653 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
654 			ctx->cdata.keylen;
655 
656 	if (!ctx->cdata.keylen || !ctx->authsize)
657 		return 0;
658 
659 	/*
660 	 * AES GCM encrypt shared descriptor
661 	 * Job Descriptor and Shared Descriptor
662 	 * must fit into the 64-word Descriptor h/w Buffer
663 	 */
664 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
665 		ctx->cdata.key_inline = true;
666 		ctx->cdata.key_virt = ctx->key;
667 	} else {
668 		ctx->cdata.key_inline = false;
669 		ctx->cdata.key_dma = ctx->key_dma;
670 	}
671 
672 	flc = &ctx->flc[ENCRYPT];
673 	desc = flc->sh_desc;
674 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
675 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
676 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
677 				   sizeof(flc->flc) + desc_bytes(desc),
678 				   ctx->dir);
679 
680 	/*
681 	 * Job Descriptor and Shared Descriptors
682 	 * must all fit into the 64-word Descriptor h/w Buffer
683 	 */
684 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
685 		ctx->cdata.key_inline = true;
686 		ctx->cdata.key_virt = ctx->key;
687 	} else {
688 		ctx->cdata.key_inline = false;
689 		ctx->cdata.key_dma = ctx->key_dma;
690 	}
691 
692 	flc = &ctx->flc[DECRYPT];
693 	desc = flc->sh_desc;
694 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
695 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
696 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
697 				   sizeof(flc->flc) + desc_bytes(desc),
698 				   ctx->dir);
699 
700 	return 0;
701 }
702 
gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)703 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
704 {
705 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
706 	int err;
707 
708 	err = crypto_gcm_check_authsize(authsize);
709 	if (err)
710 		return err;
711 
712 	ctx->authsize = authsize;
713 	gcm_set_sh_desc(authenc);
714 
715 	return 0;
716 }
717 
gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)718 static int gcm_setkey(struct crypto_aead *aead,
719 		      const u8 *key, unsigned int keylen)
720 {
721 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
722 	struct device *dev = ctx->dev;
723 	int ret;
724 
725 	ret = aes_check_keylen(keylen);
726 	if (ret)
727 		return ret;
728 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
729 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
730 
731 	memcpy(ctx->key, key, keylen);
732 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
733 	ctx->cdata.keylen = keylen;
734 
735 	return gcm_set_sh_desc(aead);
736 }
737 
rfc4106_set_sh_desc(struct crypto_aead *aead)738 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
739 {
740 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
741 	struct device *dev = ctx->dev;
742 	unsigned int ivsize = crypto_aead_ivsize(aead);
743 	struct caam_flc *flc;
744 	u32 *desc;
745 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
746 			ctx->cdata.keylen;
747 
748 	if (!ctx->cdata.keylen || !ctx->authsize)
749 		return 0;
750 
751 	ctx->cdata.key_virt = ctx->key;
752 
753 	/*
754 	 * RFC4106 encrypt shared descriptor
755 	 * Job Descriptor and Shared Descriptor
756 	 * must fit into the 64-word Descriptor h/w Buffer
757 	 */
758 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
759 		ctx->cdata.key_inline = true;
760 	} else {
761 		ctx->cdata.key_inline = false;
762 		ctx->cdata.key_dma = ctx->key_dma;
763 	}
764 
765 	flc = &ctx->flc[ENCRYPT];
766 	desc = flc->sh_desc;
767 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
768 				  true);
769 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
770 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
771 				   sizeof(flc->flc) + desc_bytes(desc),
772 				   ctx->dir);
773 
774 	/*
775 	 * Job Descriptor and Shared Descriptors
776 	 * must all fit into the 64-word Descriptor h/w Buffer
777 	 */
778 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
779 		ctx->cdata.key_inline = true;
780 	} else {
781 		ctx->cdata.key_inline = false;
782 		ctx->cdata.key_dma = ctx->key_dma;
783 	}
784 
785 	flc = &ctx->flc[DECRYPT];
786 	desc = flc->sh_desc;
787 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
788 				  true);
789 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
790 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
791 				   sizeof(flc->flc) + desc_bytes(desc),
792 				   ctx->dir);
793 
794 	return 0;
795 }
796 
rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize)797 static int rfc4106_setauthsize(struct crypto_aead *authenc,
798 			       unsigned int authsize)
799 {
800 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
801 	int err;
802 
803 	err = crypto_rfc4106_check_authsize(authsize);
804 	if (err)
805 		return err;
806 
807 	ctx->authsize = authsize;
808 	rfc4106_set_sh_desc(authenc);
809 
810 	return 0;
811 }
812 
rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)813 static int rfc4106_setkey(struct crypto_aead *aead,
814 			  const u8 *key, unsigned int keylen)
815 {
816 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
817 	struct device *dev = ctx->dev;
818 	int ret;
819 
820 	ret = aes_check_keylen(keylen - 4);
821 	if (ret)
822 		return ret;
823 
824 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
825 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
826 
827 	memcpy(ctx->key, key, keylen);
828 	/*
829 	 * The last four bytes of the key material are used as the salt value
830 	 * in the nonce. Update the AES key length.
831 	 */
832 	ctx->cdata.keylen = keylen - 4;
833 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
834 				   ctx->dir);
835 
836 	return rfc4106_set_sh_desc(aead);
837 }
838 
rfc4543_set_sh_desc(struct crypto_aead *aead)839 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
840 {
841 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
842 	struct device *dev = ctx->dev;
843 	unsigned int ivsize = crypto_aead_ivsize(aead);
844 	struct caam_flc *flc;
845 	u32 *desc;
846 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
847 			ctx->cdata.keylen;
848 
849 	if (!ctx->cdata.keylen || !ctx->authsize)
850 		return 0;
851 
852 	ctx->cdata.key_virt = ctx->key;
853 
854 	/*
855 	 * RFC4543 encrypt shared descriptor
856 	 * Job Descriptor and Shared Descriptor
857 	 * must fit into the 64-word Descriptor h/w Buffer
858 	 */
859 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
860 		ctx->cdata.key_inline = true;
861 	} else {
862 		ctx->cdata.key_inline = false;
863 		ctx->cdata.key_dma = ctx->key_dma;
864 	}
865 
866 	flc = &ctx->flc[ENCRYPT];
867 	desc = flc->sh_desc;
868 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
869 				  true);
870 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
871 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
872 				   sizeof(flc->flc) + desc_bytes(desc),
873 				   ctx->dir);
874 
875 	/*
876 	 * Job Descriptor and Shared Descriptors
877 	 * must all fit into the 64-word Descriptor h/w Buffer
878 	 */
879 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
880 		ctx->cdata.key_inline = true;
881 	} else {
882 		ctx->cdata.key_inline = false;
883 		ctx->cdata.key_dma = ctx->key_dma;
884 	}
885 
886 	flc = &ctx->flc[DECRYPT];
887 	desc = flc->sh_desc;
888 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
889 				  true);
890 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
891 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
892 				   sizeof(flc->flc) + desc_bytes(desc),
893 				   ctx->dir);
894 
895 	return 0;
896 }
897 
rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize)898 static int rfc4543_setauthsize(struct crypto_aead *authenc,
899 			       unsigned int authsize)
900 {
901 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
902 
903 	if (authsize != 16)
904 		return -EINVAL;
905 
906 	ctx->authsize = authsize;
907 	rfc4543_set_sh_desc(authenc);
908 
909 	return 0;
910 }
911 
rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen)912 static int rfc4543_setkey(struct crypto_aead *aead,
913 			  const u8 *key, unsigned int keylen)
914 {
915 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
916 	struct device *dev = ctx->dev;
917 	int ret;
918 
919 	ret = aes_check_keylen(keylen - 4);
920 	if (ret)
921 		return ret;
922 
923 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
924 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
925 
926 	memcpy(ctx->key, key, keylen);
927 	/*
928 	 * The last four bytes of the key material are used as the salt value
929 	 * in the nonce. Update the AES key length.
930 	 */
931 	ctx->cdata.keylen = keylen - 4;
932 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
933 				   ctx->dir);
934 
935 	return rfc4543_set_sh_desc(aead);
936 }
937 
skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off)938 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
939 			   unsigned int keylen, const u32 ctx1_iv_off)
940 {
941 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
942 	struct caam_skcipher_alg *alg =
943 		container_of(crypto_skcipher_alg(skcipher),
944 			     struct caam_skcipher_alg, skcipher);
945 	struct device *dev = ctx->dev;
946 	struct caam_flc *flc;
947 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
948 	u32 *desc;
949 	const bool is_rfc3686 = alg->caam.rfc3686;
950 
951 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
952 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
953 
954 	ctx->cdata.keylen = keylen;
955 	ctx->cdata.key_virt = key;
956 	ctx->cdata.key_inline = true;
957 
958 	/* skcipher_encrypt shared descriptor */
959 	flc = &ctx->flc[ENCRYPT];
960 	desc = flc->sh_desc;
961 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
962 				   ctx1_iv_off);
963 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
964 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
965 				   sizeof(flc->flc) + desc_bytes(desc),
966 				   ctx->dir);
967 
968 	/* skcipher_decrypt shared descriptor */
969 	flc = &ctx->flc[DECRYPT];
970 	desc = flc->sh_desc;
971 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
972 				   ctx1_iv_off);
973 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
974 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
975 				   sizeof(flc->flc) + desc_bytes(desc),
976 				   ctx->dir);
977 
978 	return 0;
979 }
980 
aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)981 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
982 			       const u8 *key, unsigned int keylen)
983 {
984 	int err;
985 
986 	err = aes_check_keylen(keylen);
987 	if (err)
988 		return err;
989 
990 	return skcipher_setkey(skcipher, key, keylen, 0);
991 }
992 
rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)993 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
994 				   const u8 *key, unsigned int keylen)
995 {
996 	u32 ctx1_iv_off;
997 	int err;
998 
999 	/*
1000 	 * RFC3686 specific:
1001 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1002 	 *	| *key = {KEY, NONCE}
1003 	 */
1004 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1005 	keylen -= CTR_RFC3686_NONCE_SIZE;
1006 
1007 	err = aes_check_keylen(keylen);
1008 	if (err)
1009 		return err;
1010 
1011 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1012 }
1013 
ctr_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)1014 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1015 			       const u8 *key, unsigned int keylen)
1016 {
1017 	u32 ctx1_iv_off;
1018 	int err;
1019 
1020 	/*
1021 	 * AES-CTR needs to load IV in CONTEXT1 reg
1022 	 * at an offset of 128bits (16bytes)
1023 	 * CONTEXT1[255:128] = IV
1024 	 */
1025 	ctx1_iv_off = 16;
1026 
1027 	err = aes_check_keylen(keylen);
1028 	if (err)
1029 		return err;
1030 
1031 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1032 }
1033 
chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)1034 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1035 				    const u8 *key, unsigned int keylen)
1036 {
1037 	if (keylen != CHACHA_KEY_SIZE)
1038 		return -EINVAL;
1039 
1040 	return skcipher_setkey(skcipher, key, keylen, 0);
1041 }
1042 
des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)1043 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1044 			       const u8 *key, unsigned int keylen)
1045 {
1046 	return verify_skcipher_des_key(skcipher, key) ?:
1047 	       skcipher_setkey(skcipher, key, keylen, 0);
1048 }
1049 
des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)1050 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1051 			        const u8 *key, unsigned int keylen)
1052 {
1053 	return verify_skcipher_des3_key(skcipher, key) ?:
1054 	       skcipher_setkey(skcipher, key, keylen, 0);
1055 }
1056 
xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen)1057 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1058 			       unsigned int keylen)
1059 {
1060 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1061 	struct device *dev = ctx->dev;
1062 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1063 	struct caam_flc *flc;
1064 	u32 *desc;
1065 	int err;
1066 
1067 	err = xts_verify_key(skcipher, key, keylen);
1068 	if (err) {
1069 		dev_dbg(dev, "key size mismatch\n");
1070 		return err;
1071 	}
1072 
1073 	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1074 		ctx->xts_key_fallback = true;
1075 
1076 	if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1077 		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1078 		if (err)
1079 			return err;
1080 	}
1081 
1082 	ctx->cdata.keylen = keylen;
1083 	ctx->cdata.key_virt = key;
1084 	ctx->cdata.key_inline = true;
1085 
1086 	/* xts_skcipher_encrypt shared descriptor */
1087 	flc = &ctx->flc[ENCRYPT];
1088 	desc = flc->sh_desc;
1089 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1090 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1091 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1092 				   sizeof(flc->flc) + desc_bytes(desc),
1093 				   ctx->dir);
1094 
1095 	/* xts_skcipher_decrypt shared descriptor */
1096 	flc = &ctx->flc[DECRYPT];
1097 	desc = flc->sh_desc;
1098 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1099 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1100 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1101 				   sizeof(flc->flc) + desc_bytes(desc),
1102 				   ctx->dir);
1103 
1104 	return 0;
1105 }
1106 
skcipher_edesc_alloc(struct skcipher_request *req)1107 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1108 {
1109 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1110 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1111 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1112 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1113 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1114 	struct device *dev = ctx->dev;
1115 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1116 		       GFP_KERNEL : GFP_ATOMIC;
1117 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1118 	struct skcipher_edesc *edesc;
1119 	dma_addr_t iv_dma;
1120 	u8 *iv;
1121 	int ivsize = crypto_skcipher_ivsize(skcipher);
1122 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1123 	struct dpaa2_sg_entry *sg_table;
1124 
1125 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1126 	if (unlikely(src_nents < 0)) {
1127 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1128 			req->cryptlen);
1129 		return ERR_PTR(src_nents);
1130 	}
1131 
1132 	if (unlikely(req->dst != req->src)) {
1133 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1134 		if (unlikely(dst_nents < 0)) {
1135 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1136 				req->cryptlen);
1137 			return ERR_PTR(dst_nents);
1138 		}
1139 
1140 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1141 					      DMA_TO_DEVICE);
1142 		if (unlikely(!mapped_src_nents)) {
1143 			dev_err(dev, "unable to map source\n");
1144 			return ERR_PTR(-ENOMEM);
1145 		}
1146 
1147 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1148 					      DMA_FROM_DEVICE);
1149 		if (unlikely(!mapped_dst_nents)) {
1150 			dev_err(dev, "unable to map destination\n");
1151 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1152 			return ERR_PTR(-ENOMEM);
1153 		}
1154 	} else {
1155 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1156 					      DMA_BIDIRECTIONAL);
1157 		if (unlikely(!mapped_src_nents)) {
1158 			dev_err(dev, "unable to map source\n");
1159 			return ERR_PTR(-ENOMEM);
1160 		}
1161 	}
1162 
1163 	qm_sg_ents = 1 + mapped_src_nents;
1164 	dst_sg_idx = qm_sg_ents;
1165 
1166 	/*
1167 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1168 	 * IV entries point to the same buffer
1169 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1170 	 *
1171 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1172 	 * the end of the table by allocating more S/G entries.
1173 	 */
1174 	if (req->src != req->dst)
1175 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1176 	else
1177 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1178 
1179 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1180 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1181 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1182 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1183 			qm_sg_ents, ivsize);
1184 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1185 			   0, DMA_NONE, 0, 0);
1186 		return ERR_PTR(-ENOMEM);
1187 	}
1188 
1189 	/* allocate space for base edesc, link tables and IV */
1190 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1191 	if (unlikely(!edesc)) {
1192 		dev_err(dev, "could not allocate extended descriptor\n");
1193 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1194 			   0, DMA_NONE, 0, 0);
1195 		return ERR_PTR(-ENOMEM);
1196 	}
1197 
1198 	/* Make sure IV is located in a DMAable area */
1199 	sg_table = &edesc->sgt[0];
1200 	iv = (u8 *)(sg_table + qm_sg_ents);
1201 	memcpy(iv, req->iv, ivsize);
1202 
1203 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1204 	if (dma_mapping_error(dev, iv_dma)) {
1205 		dev_err(dev, "unable to map IV\n");
1206 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1207 			   0, DMA_NONE, 0, 0);
1208 		qi_cache_free(edesc);
1209 		return ERR_PTR(-ENOMEM);
1210 	}
1211 
1212 	edesc->src_nents = src_nents;
1213 	edesc->dst_nents = dst_nents;
1214 	edesc->iv_dma = iv_dma;
1215 	edesc->qm_sg_bytes = qm_sg_bytes;
1216 
1217 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1218 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1219 
1220 	if (req->src != req->dst)
1221 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1222 
1223 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1224 			 ivsize, 0);
1225 
1226 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1227 					  DMA_TO_DEVICE);
1228 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1229 		dev_err(dev, "unable to map S/G table\n");
1230 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1231 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1232 		qi_cache_free(edesc);
1233 		return ERR_PTR(-ENOMEM);
1234 	}
1235 
1236 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1237 	dpaa2_fl_set_final(in_fle, true);
1238 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1239 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1240 
1241 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1242 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1243 
1244 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1245 
1246 	if (req->src == req->dst)
1247 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1248 				  sizeof(*sg_table));
1249 	else
1250 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1251 				  sizeof(*sg_table));
1252 
1253 	return edesc;
1254 }
1255 
aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req)1256 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1257 		       struct aead_request *req)
1258 {
1259 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1260 	int ivsize = crypto_aead_ivsize(aead);
1261 
1262 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1263 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1264 		   edesc->qm_sg_bytes);
1265 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1266 }
1267 
skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, struct skcipher_request *req)1268 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1269 			   struct skcipher_request *req)
1270 {
1271 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1272 	int ivsize = crypto_skcipher_ivsize(skcipher);
1273 
1274 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1275 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1276 		   edesc->qm_sg_bytes);
1277 }
1278 
aead_encrypt_done(void *cbk_ctx, u32 status)1279 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1280 {
1281 	struct crypto_async_request *areq = cbk_ctx;
1282 	struct aead_request *req = container_of(areq, struct aead_request,
1283 						base);
1284 	struct caam_request *req_ctx = to_caam_req(areq);
1285 	struct aead_edesc *edesc = req_ctx->edesc;
1286 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1287 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1288 	int ecode = 0;
1289 
1290 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1291 
1292 	if (unlikely(status))
1293 		ecode = caam_qi2_strstatus(ctx->dev, status);
1294 
1295 	aead_unmap(ctx->dev, edesc, req);
1296 	qi_cache_free(edesc);
1297 	aead_request_complete(req, ecode);
1298 }
1299 
aead_decrypt_done(void *cbk_ctx, u32 status)1300 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1301 {
1302 	struct crypto_async_request *areq = cbk_ctx;
1303 	struct aead_request *req = container_of(areq, struct aead_request,
1304 						base);
1305 	struct caam_request *req_ctx = to_caam_req(areq);
1306 	struct aead_edesc *edesc = req_ctx->edesc;
1307 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1308 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1309 	int ecode = 0;
1310 
1311 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1312 
1313 	if (unlikely(status))
1314 		ecode = caam_qi2_strstatus(ctx->dev, status);
1315 
1316 	aead_unmap(ctx->dev, edesc, req);
1317 	qi_cache_free(edesc);
1318 	aead_request_complete(req, ecode);
1319 }
1320 
aead_encrypt(struct aead_request *req)1321 static int aead_encrypt(struct aead_request *req)
1322 {
1323 	struct aead_edesc *edesc;
1324 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1325 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1326 	struct caam_request *caam_req = aead_request_ctx(req);
1327 	int ret;
1328 
1329 	/* allocate extended descriptor */
1330 	edesc = aead_edesc_alloc(req, true);
1331 	if (IS_ERR(edesc))
1332 		return PTR_ERR(edesc);
1333 
1334 	caam_req->flc = &ctx->flc[ENCRYPT];
1335 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1336 	caam_req->cbk = aead_encrypt_done;
1337 	caam_req->ctx = &req->base;
1338 	caam_req->edesc = edesc;
1339 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1340 	if (ret != -EINPROGRESS &&
1341 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1342 		aead_unmap(ctx->dev, edesc, req);
1343 		qi_cache_free(edesc);
1344 	}
1345 
1346 	return ret;
1347 }
1348 
aead_decrypt(struct aead_request *req)1349 static int aead_decrypt(struct aead_request *req)
1350 {
1351 	struct aead_edesc *edesc;
1352 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1353 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1354 	struct caam_request *caam_req = aead_request_ctx(req);
1355 	int ret;
1356 
1357 	/* allocate extended descriptor */
1358 	edesc = aead_edesc_alloc(req, false);
1359 	if (IS_ERR(edesc))
1360 		return PTR_ERR(edesc);
1361 
1362 	caam_req->flc = &ctx->flc[DECRYPT];
1363 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1364 	caam_req->cbk = aead_decrypt_done;
1365 	caam_req->ctx = &req->base;
1366 	caam_req->edesc = edesc;
1367 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1368 	if (ret != -EINPROGRESS &&
1369 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1370 		aead_unmap(ctx->dev, edesc, req);
1371 		qi_cache_free(edesc);
1372 	}
1373 
1374 	return ret;
1375 }
1376 
ipsec_gcm_encrypt(struct aead_request *req)1377 static int ipsec_gcm_encrypt(struct aead_request *req)
1378 {
1379 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1380 }
1381 
ipsec_gcm_decrypt(struct aead_request *req)1382 static int ipsec_gcm_decrypt(struct aead_request *req)
1383 {
1384 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1385 }
1386 
skcipher_encrypt_done(void *cbk_ctx, u32 status)1387 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1388 {
1389 	struct crypto_async_request *areq = cbk_ctx;
1390 	struct skcipher_request *req = skcipher_request_cast(areq);
1391 	struct caam_request *req_ctx = to_caam_req(areq);
1392 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1393 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1394 	struct skcipher_edesc *edesc = req_ctx->edesc;
1395 	int ecode = 0;
1396 	int ivsize = crypto_skcipher_ivsize(skcipher);
1397 
1398 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1399 
1400 	if (unlikely(status))
1401 		ecode = caam_qi2_strstatus(ctx->dev, status);
1402 
1403 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1404 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1405 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1406 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1407 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1408 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1409 
1410 	skcipher_unmap(ctx->dev, edesc, req);
1411 
1412 	/*
1413 	 * The crypto API expects us to set the IV (req->iv) to the last
1414 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1415 	 * This is used e.g. by the CTS mode.
1416 	 */
1417 	if (!ecode)
1418 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1419 		       ivsize);
1420 
1421 	qi_cache_free(edesc);
1422 	skcipher_request_complete(req, ecode);
1423 }
1424 
skcipher_decrypt_done(void *cbk_ctx, u32 status)1425 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1426 {
1427 	struct crypto_async_request *areq = cbk_ctx;
1428 	struct skcipher_request *req = skcipher_request_cast(areq);
1429 	struct caam_request *req_ctx = to_caam_req(areq);
1430 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1431 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1432 	struct skcipher_edesc *edesc = req_ctx->edesc;
1433 	int ecode = 0;
1434 	int ivsize = crypto_skcipher_ivsize(skcipher);
1435 
1436 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1437 
1438 	if (unlikely(status))
1439 		ecode = caam_qi2_strstatus(ctx->dev, status);
1440 
1441 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1442 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1443 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1444 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1445 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1446 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1447 
1448 	skcipher_unmap(ctx->dev, edesc, req);
1449 
1450 	/*
1451 	 * The crypto API expects us to set the IV (req->iv) to the last
1452 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1453 	 * This is used e.g. by the CTS mode.
1454 	 */
1455 	if (!ecode)
1456 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1457 		       ivsize);
1458 
1459 	qi_cache_free(edesc);
1460 	skcipher_request_complete(req, ecode);
1461 }
1462 
xts_skcipher_ivsize(struct skcipher_request *req)1463 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1464 {
1465 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1466 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1467 
1468 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1469 }
1470 
skcipher_encrypt(struct skcipher_request *req)1471 static int skcipher_encrypt(struct skcipher_request *req)
1472 {
1473 	struct skcipher_edesc *edesc;
1474 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1475 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1476 	struct caam_request *caam_req = skcipher_request_ctx(req);
1477 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1478 	int ret;
1479 
1480 	/*
1481 	 * XTS is expected to return an error even for input length = 0
1482 	 * Note that the case input length < block size will be caught during
1483 	 * HW offloading and return an error.
1484 	 */
1485 	if (!req->cryptlen && !ctx->fallback)
1486 		return 0;
1487 
1488 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1489 			      ctx->xts_key_fallback)) {
1490 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1491 		skcipher_request_set_callback(&caam_req->fallback_req,
1492 					      req->base.flags,
1493 					      req->base.complete,
1494 					      req->base.data);
1495 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1496 					   req->dst, req->cryptlen, req->iv);
1497 
1498 		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1499 	}
1500 
1501 	/* allocate extended descriptor */
1502 	edesc = skcipher_edesc_alloc(req);
1503 	if (IS_ERR(edesc))
1504 		return PTR_ERR(edesc);
1505 
1506 	caam_req->flc = &ctx->flc[ENCRYPT];
1507 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1508 	caam_req->cbk = skcipher_encrypt_done;
1509 	caam_req->ctx = &req->base;
1510 	caam_req->edesc = edesc;
1511 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1512 	if (ret != -EINPROGRESS &&
1513 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1514 		skcipher_unmap(ctx->dev, edesc, req);
1515 		qi_cache_free(edesc);
1516 	}
1517 
1518 	return ret;
1519 }
1520 
skcipher_decrypt(struct skcipher_request *req)1521 static int skcipher_decrypt(struct skcipher_request *req)
1522 {
1523 	struct skcipher_edesc *edesc;
1524 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1525 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1526 	struct caam_request *caam_req = skcipher_request_ctx(req);
1527 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1528 	int ret;
1529 
1530 	/*
1531 	 * XTS is expected to return an error even for input length = 0
1532 	 * Note that the case input length < block size will be caught during
1533 	 * HW offloading and return an error.
1534 	 */
1535 	if (!req->cryptlen && !ctx->fallback)
1536 		return 0;
1537 
1538 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1539 			      ctx->xts_key_fallback)) {
1540 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1541 		skcipher_request_set_callback(&caam_req->fallback_req,
1542 					      req->base.flags,
1543 					      req->base.complete,
1544 					      req->base.data);
1545 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1546 					   req->dst, req->cryptlen, req->iv);
1547 
1548 		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1549 	}
1550 
1551 	/* allocate extended descriptor */
1552 	edesc = skcipher_edesc_alloc(req);
1553 	if (IS_ERR(edesc))
1554 		return PTR_ERR(edesc);
1555 
1556 	caam_req->flc = &ctx->flc[DECRYPT];
1557 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1558 	caam_req->cbk = skcipher_decrypt_done;
1559 	caam_req->ctx = &req->base;
1560 	caam_req->edesc = edesc;
1561 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1562 	if (ret != -EINPROGRESS &&
1563 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1564 		skcipher_unmap(ctx->dev, edesc, req);
1565 		qi_cache_free(edesc);
1566 	}
1567 
1568 	return ret;
1569 }
1570 
caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp)1571 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1572 			 bool uses_dkp)
1573 {
1574 	dma_addr_t dma_addr;
1575 	int i;
1576 
1577 	/* copy descriptor header template value */
1578 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1579 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1580 
1581 	ctx->dev = caam->dev;
1582 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1583 
1584 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1585 					offsetof(struct caam_ctx, flc_dma),
1586 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1587 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1588 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1589 		return -ENOMEM;
1590 	}
1591 
1592 	for (i = 0; i < NUM_OP; i++)
1593 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1594 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1595 
1596 	return 0;
1597 }
1598 
caam_cra_init_skcipher(struct crypto_skcipher *tfm)1599 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1600 {
1601 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1602 	struct caam_skcipher_alg *caam_alg =
1603 		container_of(alg, typeof(*caam_alg), skcipher);
1604 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1605 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1606 	int ret = 0;
1607 
1608 	if (alg_aai == OP_ALG_AAI_XTS) {
1609 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1610 		struct crypto_skcipher *fallback;
1611 
1612 		fallback = crypto_alloc_skcipher(tfm_name, 0,
1613 						 CRYPTO_ALG_NEED_FALLBACK);
1614 		if (IS_ERR(fallback)) {
1615 			dev_err(caam_alg->caam.dev,
1616 				"Failed to allocate %s fallback: %ld\n",
1617 				tfm_name, PTR_ERR(fallback));
1618 			return PTR_ERR(fallback);
1619 		}
1620 
1621 		ctx->fallback = fallback;
1622 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1623 					    crypto_skcipher_reqsize(fallback));
1624 	} else {
1625 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1626 	}
1627 
1628 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
1629 	if (ret && ctx->fallback)
1630 		crypto_free_skcipher(ctx->fallback);
1631 
1632 	return ret;
1633 }
1634 
caam_cra_init_aead(struct crypto_aead *tfm)1635 static int caam_cra_init_aead(struct crypto_aead *tfm)
1636 {
1637 	struct aead_alg *alg = crypto_aead_alg(tfm);
1638 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1639 						      aead);
1640 
1641 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1642 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1643 			     !caam_alg->caam.nodkp);
1644 }
1645 
caam_exit_common(struct caam_ctx *ctx)1646 static void caam_exit_common(struct caam_ctx *ctx)
1647 {
1648 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1649 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1650 			       DMA_ATTR_SKIP_CPU_SYNC);
1651 }
1652 
caam_cra_exit(struct crypto_skcipher *tfm)1653 static void caam_cra_exit(struct crypto_skcipher *tfm)
1654 {
1655 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1656 
1657 	if (ctx->fallback)
1658 		crypto_free_skcipher(ctx->fallback);
1659 	caam_exit_common(ctx);
1660 }
1661 
caam_cra_exit_aead(struct crypto_aead *tfm)1662 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1663 {
1664 	caam_exit_common(crypto_aead_ctx(tfm));
1665 }
1666 
1667 static struct caam_skcipher_alg driver_algs[] = {
1668 	{
1669 		.skcipher = {
1670 			.base = {
1671 				.cra_name = "cbc(aes)",
1672 				.cra_driver_name = "cbc-aes-caam-qi2",
1673 				.cra_blocksize = AES_BLOCK_SIZE,
1674 			},
1675 			.setkey = aes_skcipher_setkey,
1676 			.encrypt = skcipher_encrypt,
1677 			.decrypt = skcipher_decrypt,
1678 			.min_keysize = AES_MIN_KEY_SIZE,
1679 			.max_keysize = AES_MAX_KEY_SIZE,
1680 			.ivsize = AES_BLOCK_SIZE,
1681 		},
1682 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1683 	},
1684 	{
1685 		.skcipher = {
1686 			.base = {
1687 				.cra_name = "cbc(des3_ede)",
1688 				.cra_driver_name = "cbc-3des-caam-qi2",
1689 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1690 			},
1691 			.setkey = des3_skcipher_setkey,
1692 			.encrypt = skcipher_encrypt,
1693 			.decrypt = skcipher_decrypt,
1694 			.min_keysize = DES3_EDE_KEY_SIZE,
1695 			.max_keysize = DES3_EDE_KEY_SIZE,
1696 			.ivsize = DES3_EDE_BLOCK_SIZE,
1697 		},
1698 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1699 	},
1700 	{
1701 		.skcipher = {
1702 			.base = {
1703 				.cra_name = "cbc(des)",
1704 				.cra_driver_name = "cbc-des-caam-qi2",
1705 				.cra_blocksize = DES_BLOCK_SIZE,
1706 			},
1707 			.setkey = des_skcipher_setkey,
1708 			.encrypt = skcipher_encrypt,
1709 			.decrypt = skcipher_decrypt,
1710 			.min_keysize = DES_KEY_SIZE,
1711 			.max_keysize = DES_KEY_SIZE,
1712 			.ivsize = DES_BLOCK_SIZE,
1713 		},
1714 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1715 	},
1716 	{
1717 		.skcipher = {
1718 			.base = {
1719 				.cra_name = "ctr(aes)",
1720 				.cra_driver_name = "ctr-aes-caam-qi2",
1721 				.cra_blocksize = 1,
1722 			},
1723 			.setkey = ctr_skcipher_setkey,
1724 			.encrypt = skcipher_encrypt,
1725 			.decrypt = skcipher_decrypt,
1726 			.min_keysize = AES_MIN_KEY_SIZE,
1727 			.max_keysize = AES_MAX_KEY_SIZE,
1728 			.ivsize = AES_BLOCK_SIZE,
1729 			.chunksize = AES_BLOCK_SIZE,
1730 		},
1731 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1732 					OP_ALG_AAI_CTR_MOD128,
1733 	},
1734 	{
1735 		.skcipher = {
1736 			.base = {
1737 				.cra_name = "rfc3686(ctr(aes))",
1738 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1739 				.cra_blocksize = 1,
1740 			},
1741 			.setkey = rfc3686_skcipher_setkey,
1742 			.encrypt = skcipher_encrypt,
1743 			.decrypt = skcipher_decrypt,
1744 			.min_keysize = AES_MIN_KEY_SIZE +
1745 				       CTR_RFC3686_NONCE_SIZE,
1746 			.max_keysize = AES_MAX_KEY_SIZE +
1747 				       CTR_RFC3686_NONCE_SIZE,
1748 			.ivsize = CTR_RFC3686_IV_SIZE,
1749 			.chunksize = AES_BLOCK_SIZE,
1750 		},
1751 		.caam = {
1752 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1753 					   OP_ALG_AAI_CTR_MOD128,
1754 			.rfc3686 = true,
1755 		},
1756 	},
1757 	{
1758 		.skcipher = {
1759 			.base = {
1760 				.cra_name = "xts(aes)",
1761 				.cra_driver_name = "xts-aes-caam-qi2",
1762 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1763 				.cra_blocksize = AES_BLOCK_SIZE,
1764 			},
1765 			.setkey = xts_skcipher_setkey,
1766 			.encrypt = skcipher_encrypt,
1767 			.decrypt = skcipher_decrypt,
1768 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1769 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1770 			.ivsize = AES_BLOCK_SIZE,
1771 		},
1772 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1773 	},
1774 	{
1775 		.skcipher = {
1776 			.base = {
1777 				.cra_name = "chacha20",
1778 				.cra_driver_name = "chacha20-caam-qi2",
1779 				.cra_blocksize = 1,
1780 			},
1781 			.setkey = chacha20_skcipher_setkey,
1782 			.encrypt = skcipher_encrypt,
1783 			.decrypt = skcipher_decrypt,
1784 			.min_keysize = CHACHA_KEY_SIZE,
1785 			.max_keysize = CHACHA_KEY_SIZE,
1786 			.ivsize = CHACHA_IV_SIZE,
1787 		},
1788 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1789 	},
1790 };
1791 
1792 static struct caam_aead_alg driver_aeads[] = {
1793 	{
1794 		.aead = {
1795 			.base = {
1796 				.cra_name = "rfc4106(gcm(aes))",
1797 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1798 				.cra_blocksize = 1,
1799 			},
1800 			.setkey = rfc4106_setkey,
1801 			.setauthsize = rfc4106_setauthsize,
1802 			.encrypt = ipsec_gcm_encrypt,
1803 			.decrypt = ipsec_gcm_decrypt,
1804 			.ivsize = 8,
1805 			.maxauthsize = AES_BLOCK_SIZE,
1806 		},
1807 		.caam = {
1808 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1809 			.nodkp = true,
1810 		},
1811 	},
1812 	{
1813 		.aead = {
1814 			.base = {
1815 				.cra_name = "rfc4543(gcm(aes))",
1816 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1817 				.cra_blocksize = 1,
1818 			},
1819 			.setkey = rfc4543_setkey,
1820 			.setauthsize = rfc4543_setauthsize,
1821 			.encrypt = ipsec_gcm_encrypt,
1822 			.decrypt = ipsec_gcm_decrypt,
1823 			.ivsize = 8,
1824 			.maxauthsize = AES_BLOCK_SIZE,
1825 		},
1826 		.caam = {
1827 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1828 			.nodkp = true,
1829 		},
1830 	},
1831 	/* Galois Counter Mode */
1832 	{
1833 		.aead = {
1834 			.base = {
1835 				.cra_name = "gcm(aes)",
1836 				.cra_driver_name = "gcm-aes-caam-qi2",
1837 				.cra_blocksize = 1,
1838 			},
1839 			.setkey = gcm_setkey,
1840 			.setauthsize = gcm_setauthsize,
1841 			.encrypt = aead_encrypt,
1842 			.decrypt = aead_decrypt,
1843 			.ivsize = 12,
1844 			.maxauthsize = AES_BLOCK_SIZE,
1845 		},
1846 		.caam = {
1847 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1848 			.nodkp = true,
1849 		}
1850 	},
1851 	/* single-pass ipsec_esp descriptor */
1852 	{
1853 		.aead = {
1854 			.base = {
1855 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1856 				.cra_driver_name = "authenc-hmac-md5-"
1857 						   "cbc-aes-caam-qi2",
1858 				.cra_blocksize = AES_BLOCK_SIZE,
1859 			},
1860 			.setkey = aead_setkey,
1861 			.setauthsize = aead_setauthsize,
1862 			.encrypt = aead_encrypt,
1863 			.decrypt = aead_decrypt,
1864 			.ivsize = AES_BLOCK_SIZE,
1865 			.maxauthsize = MD5_DIGEST_SIZE,
1866 		},
1867 		.caam = {
1868 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1869 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1870 					   OP_ALG_AAI_HMAC_PRECOMP,
1871 		}
1872 	},
1873 	{
1874 		.aead = {
1875 			.base = {
1876 				.cra_name = "echainiv(authenc(hmac(md5),"
1877 					    "cbc(aes)))",
1878 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1879 						   "cbc-aes-caam-qi2",
1880 				.cra_blocksize = AES_BLOCK_SIZE,
1881 			},
1882 			.setkey = aead_setkey,
1883 			.setauthsize = aead_setauthsize,
1884 			.encrypt = aead_encrypt,
1885 			.decrypt = aead_decrypt,
1886 			.ivsize = AES_BLOCK_SIZE,
1887 			.maxauthsize = MD5_DIGEST_SIZE,
1888 		},
1889 		.caam = {
1890 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1891 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1892 					   OP_ALG_AAI_HMAC_PRECOMP,
1893 			.geniv = true,
1894 		}
1895 	},
1896 	{
1897 		.aead = {
1898 			.base = {
1899 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1900 				.cra_driver_name = "authenc-hmac-sha1-"
1901 						   "cbc-aes-caam-qi2",
1902 				.cra_blocksize = AES_BLOCK_SIZE,
1903 			},
1904 			.setkey = aead_setkey,
1905 			.setauthsize = aead_setauthsize,
1906 			.encrypt = aead_encrypt,
1907 			.decrypt = aead_decrypt,
1908 			.ivsize = AES_BLOCK_SIZE,
1909 			.maxauthsize = SHA1_DIGEST_SIZE,
1910 		},
1911 		.caam = {
1912 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1913 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1914 					   OP_ALG_AAI_HMAC_PRECOMP,
1915 		}
1916 	},
1917 	{
1918 		.aead = {
1919 			.base = {
1920 				.cra_name = "echainiv(authenc(hmac(sha1),"
1921 					    "cbc(aes)))",
1922 				.cra_driver_name = "echainiv-authenc-"
1923 						   "hmac-sha1-cbc-aes-caam-qi2",
1924 				.cra_blocksize = AES_BLOCK_SIZE,
1925 			},
1926 			.setkey = aead_setkey,
1927 			.setauthsize = aead_setauthsize,
1928 			.encrypt = aead_encrypt,
1929 			.decrypt = aead_decrypt,
1930 			.ivsize = AES_BLOCK_SIZE,
1931 			.maxauthsize = SHA1_DIGEST_SIZE,
1932 		},
1933 		.caam = {
1934 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1935 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1936 					   OP_ALG_AAI_HMAC_PRECOMP,
1937 			.geniv = true,
1938 		},
1939 	},
1940 	{
1941 		.aead = {
1942 			.base = {
1943 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1944 				.cra_driver_name = "authenc-hmac-sha224-"
1945 						   "cbc-aes-caam-qi2",
1946 				.cra_blocksize = AES_BLOCK_SIZE,
1947 			},
1948 			.setkey = aead_setkey,
1949 			.setauthsize = aead_setauthsize,
1950 			.encrypt = aead_encrypt,
1951 			.decrypt = aead_decrypt,
1952 			.ivsize = AES_BLOCK_SIZE,
1953 			.maxauthsize = SHA224_DIGEST_SIZE,
1954 		},
1955 		.caam = {
1956 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1957 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1958 					   OP_ALG_AAI_HMAC_PRECOMP,
1959 		}
1960 	},
1961 	{
1962 		.aead = {
1963 			.base = {
1964 				.cra_name = "echainiv(authenc(hmac(sha224),"
1965 					    "cbc(aes)))",
1966 				.cra_driver_name = "echainiv-authenc-"
1967 						   "hmac-sha224-cbc-aes-caam-qi2",
1968 				.cra_blocksize = AES_BLOCK_SIZE,
1969 			},
1970 			.setkey = aead_setkey,
1971 			.setauthsize = aead_setauthsize,
1972 			.encrypt = aead_encrypt,
1973 			.decrypt = aead_decrypt,
1974 			.ivsize = AES_BLOCK_SIZE,
1975 			.maxauthsize = SHA224_DIGEST_SIZE,
1976 		},
1977 		.caam = {
1978 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1979 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1980 					   OP_ALG_AAI_HMAC_PRECOMP,
1981 			.geniv = true,
1982 		}
1983 	},
1984 	{
1985 		.aead = {
1986 			.base = {
1987 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1988 				.cra_driver_name = "authenc-hmac-sha256-"
1989 						   "cbc-aes-caam-qi2",
1990 				.cra_blocksize = AES_BLOCK_SIZE,
1991 			},
1992 			.setkey = aead_setkey,
1993 			.setauthsize = aead_setauthsize,
1994 			.encrypt = aead_encrypt,
1995 			.decrypt = aead_decrypt,
1996 			.ivsize = AES_BLOCK_SIZE,
1997 			.maxauthsize = SHA256_DIGEST_SIZE,
1998 		},
1999 		.caam = {
2000 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2001 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2002 					   OP_ALG_AAI_HMAC_PRECOMP,
2003 		}
2004 	},
2005 	{
2006 		.aead = {
2007 			.base = {
2008 				.cra_name = "echainiv(authenc(hmac(sha256),"
2009 					    "cbc(aes)))",
2010 				.cra_driver_name = "echainiv-authenc-"
2011 						   "hmac-sha256-cbc-aes-"
2012 						   "caam-qi2",
2013 				.cra_blocksize = AES_BLOCK_SIZE,
2014 			},
2015 			.setkey = aead_setkey,
2016 			.setauthsize = aead_setauthsize,
2017 			.encrypt = aead_encrypt,
2018 			.decrypt = aead_decrypt,
2019 			.ivsize = AES_BLOCK_SIZE,
2020 			.maxauthsize = SHA256_DIGEST_SIZE,
2021 		},
2022 		.caam = {
2023 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2024 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2025 					   OP_ALG_AAI_HMAC_PRECOMP,
2026 			.geniv = true,
2027 		}
2028 	},
2029 	{
2030 		.aead = {
2031 			.base = {
2032 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2033 				.cra_driver_name = "authenc-hmac-sha384-"
2034 						   "cbc-aes-caam-qi2",
2035 				.cra_blocksize = AES_BLOCK_SIZE,
2036 			},
2037 			.setkey = aead_setkey,
2038 			.setauthsize = aead_setauthsize,
2039 			.encrypt = aead_encrypt,
2040 			.decrypt = aead_decrypt,
2041 			.ivsize = AES_BLOCK_SIZE,
2042 			.maxauthsize = SHA384_DIGEST_SIZE,
2043 		},
2044 		.caam = {
2045 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2046 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2047 					   OP_ALG_AAI_HMAC_PRECOMP,
2048 		}
2049 	},
2050 	{
2051 		.aead = {
2052 			.base = {
2053 				.cra_name = "echainiv(authenc(hmac(sha384),"
2054 					    "cbc(aes)))",
2055 				.cra_driver_name = "echainiv-authenc-"
2056 						   "hmac-sha384-cbc-aes-"
2057 						   "caam-qi2",
2058 				.cra_blocksize = AES_BLOCK_SIZE,
2059 			},
2060 			.setkey = aead_setkey,
2061 			.setauthsize = aead_setauthsize,
2062 			.encrypt = aead_encrypt,
2063 			.decrypt = aead_decrypt,
2064 			.ivsize = AES_BLOCK_SIZE,
2065 			.maxauthsize = SHA384_DIGEST_SIZE,
2066 		},
2067 		.caam = {
2068 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2069 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2070 					   OP_ALG_AAI_HMAC_PRECOMP,
2071 			.geniv = true,
2072 		}
2073 	},
2074 	{
2075 		.aead = {
2076 			.base = {
2077 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2078 				.cra_driver_name = "authenc-hmac-sha512-"
2079 						   "cbc-aes-caam-qi2",
2080 				.cra_blocksize = AES_BLOCK_SIZE,
2081 			},
2082 			.setkey = aead_setkey,
2083 			.setauthsize = aead_setauthsize,
2084 			.encrypt = aead_encrypt,
2085 			.decrypt = aead_decrypt,
2086 			.ivsize = AES_BLOCK_SIZE,
2087 			.maxauthsize = SHA512_DIGEST_SIZE,
2088 		},
2089 		.caam = {
2090 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2091 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2092 					   OP_ALG_AAI_HMAC_PRECOMP,
2093 		}
2094 	},
2095 	{
2096 		.aead = {
2097 			.base = {
2098 				.cra_name = "echainiv(authenc(hmac(sha512),"
2099 					    "cbc(aes)))",
2100 				.cra_driver_name = "echainiv-authenc-"
2101 						   "hmac-sha512-cbc-aes-"
2102 						   "caam-qi2",
2103 				.cra_blocksize = AES_BLOCK_SIZE,
2104 			},
2105 			.setkey = aead_setkey,
2106 			.setauthsize = aead_setauthsize,
2107 			.encrypt = aead_encrypt,
2108 			.decrypt = aead_decrypt,
2109 			.ivsize = AES_BLOCK_SIZE,
2110 			.maxauthsize = SHA512_DIGEST_SIZE,
2111 		},
2112 		.caam = {
2113 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2114 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2115 					   OP_ALG_AAI_HMAC_PRECOMP,
2116 			.geniv = true,
2117 		}
2118 	},
2119 	{
2120 		.aead = {
2121 			.base = {
2122 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2123 				.cra_driver_name = "authenc-hmac-md5-"
2124 						   "cbc-des3_ede-caam-qi2",
2125 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2126 			},
2127 			.setkey = des3_aead_setkey,
2128 			.setauthsize = aead_setauthsize,
2129 			.encrypt = aead_encrypt,
2130 			.decrypt = aead_decrypt,
2131 			.ivsize = DES3_EDE_BLOCK_SIZE,
2132 			.maxauthsize = MD5_DIGEST_SIZE,
2133 		},
2134 		.caam = {
2135 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2136 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2137 					   OP_ALG_AAI_HMAC_PRECOMP,
2138 		}
2139 	},
2140 	{
2141 		.aead = {
2142 			.base = {
2143 				.cra_name = "echainiv(authenc(hmac(md5),"
2144 					    "cbc(des3_ede)))",
2145 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2146 						   "cbc-des3_ede-caam-qi2",
2147 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2148 			},
2149 			.setkey = des3_aead_setkey,
2150 			.setauthsize = aead_setauthsize,
2151 			.encrypt = aead_encrypt,
2152 			.decrypt = aead_decrypt,
2153 			.ivsize = DES3_EDE_BLOCK_SIZE,
2154 			.maxauthsize = MD5_DIGEST_SIZE,
2155 		},
2156 		.caam = {
2157 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2158 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2159 					   OP_ALG_AAI_HMAC_PRECOMP,
2160 			.geniv = true,
2161 		}
2162 	},
2163 	{
2164 		.aead = {
2165 			.base = {
2166 				.cra_name = "authenc(hmac(sha1),"
2167 					    "cbc(des3_ede))",
2168 				.cra_driver_name = "authenc-hmac-sha1-"
2169 						   "cbc-des3_ede-caam-qi2",
2170 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2171 			},
2172 			.setkey = des3_aead_setkey,
2173 			.setauthsize = aead_setauthsize,
2174 			.encrypt = aead_encrypt,
2175 			.decrypt = aead_decrypt,
2176 			.ivsize = DES3_EDE_BLOCK_SIZE,
2177 			.maxauthsize = SHA1_DIGEST_SIZE,
2178 		},
2179 		.caam = {
2180 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2181 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182 					   OP_ALG_AAI_HMAC_PRECOMP,
2183 		},
2184 	},
2185 	{
2186 		.aead = {
2187 			.base = {
2188 				.cra_name = "echainiv(authenc(hmac(sha1),"
2189 					    "cbc(des3_ede)))",
2190 				.cra_driver_name = "echainiv-authenc-"
2191 						   "hmac-sha1-"
2192 						   "cbc-des3_ede-caam-qi2",
2193 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2194 			},
2195 			.setkey = des3_aead_setkey,
2196 			.setauthsize = aead_setauthsize,
2197 			.encrypt = aead_encrypt,
2198 			.decrypt = aead_decrypt,
2199 			.ivsize = DES3_EDE_BLOCK_SIZE,
2200 			.maxauthsize = SHA1_DIGEST_SIZE,
2201 		},
2202 		.caam = {
2203 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2204 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2205 					   OP_ALG_AAI_HMAC_PRECOMP,
2206 			.geniv = true,
2207 		}
2208 	},
2209 	{
2210 		.aead = {
2211 			.base = {
2212 				.cra_name = "authenc(hmac(sha224),"
2213 					    "cbc(des3_ede))",
2214 				.cra_driver_name = "authenc-hmac-sha224-"
2215 						   "cbc-des3_ede-caam-qi2",
2216 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217 			},
2218 			.setkey = des3_aead_setkey,
2219 			.setauthsize = aead_setauthsize,
2220 			.encrypt = aead_encrypt,
2221 			.decrypt = aead_decrypt,
2222 			.ivsize = DES3_EDE_BLOCK_SIZE,
2223 			.maxauthsize = SHA224_DIGEST_SIZE,
2224 		},
2225 		.caam = {
2226 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2227 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2228 					   OP_ALG_AAI_HMAC_PRECOMP,
2229 		},
2230 	},
2231 	{
2232 		.aead = {
2233 			.base = {
2234 				.cra_name = "echainiv(authenc(hmac(sha224),"
2235 					    "cbc(des3_ede)))",
2236 				.cra_driver_name = "echainiv-authenc-"
2237 						   "hmac-sha224-"
2238 						   "cbc-des3_ede-caam-qi2",
2239 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2240 			},
2241 			.setkey = des3_aead_setkey,
2242 			.setauthsize = aead_setauthsize,
2243 			.encrypt = aead_encrypt,
2244 			.decrypt = aead_decrypt,
2245 			.ivsize = DES3_EDE_BLOCK_SIZE,
2246 			.maxauthsize = SHA224_DIGEST_SIZE,
2247 		},
2248 		.caam = {
2249 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2250 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2251 					   OP_ALG_AAI_HMAC_PRECOMP,
2252 			.geniv = true,
2253 		}
2254 	},
2255 	{
2256 		.aead = {
2257 			.base = {
2258 				.cra_name = "authenc(hmac(sha256),"
2259 					    "cbc(des3_ede))",
2260 				.cra_driver_name = "authenc-hmac-sha256-"
2261 						   "cbc-des3_ede-caam-qi2",
2262 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2263 			},
2264 			.setkey = des3_aead_setkey,
2265 			.setauthsize = aead_setauthsize,
2266 			.encrypt = aead_encrypt,
2267 			.decrypt = aead_decrypt,
2268 			.ivsize = DES3_EDE_BLOCK_SIZE,
2269 			.maxauthsize = SHA256_DIGEST_SIZE,
2270 		},
2271 		.caam = {
2272 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2273 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2274 					   OP_ALG_AAI_HMAC_PRECOMP,
2275 		},
2276 	},
2277 	{
2278 		.aead = {
2279 			.base = {
2280 				.cra_name = "echainiv(authenc(hmac(sha256),"
2281 					    "cbc(des3_ede)))",
2282 				.cra_driver_name = "echainiv-authenc-"
2283 						   "hmac-sha256-"
2284 						   "cbc-des3_ede-caam-qi2",
2285 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2286 			},
2287 			.setkey = des3_aead_setkey,
2288 			.setauthsize = aead_setauthsize,
2289 			.encrypt = aead_encrypt,
2290 			.decrypt = aead_decrypt,
2291 			.ivsize = DES3_EDE_BLOCK_SIZE,
2292 			.maxauthsize = SHA256_DIGEST_SIZE,
2293 		},
2294 		.caam = {
2295 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2296 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2297 					   OP_ALG_AAI_HMAC_PRECOMP,
2298 			.geniv = true,
2299 		}
2300 	},
2301 	{
2302 		.aead = {
2303 			.base = {
2304 				.cra_name = "authenc(hmac(sha384),"
2305 					    "cbc(des3_ede))",
2306 				.cra_driver_name = "authenc-hmac-sha384-"
2307 						   "cbc-des3_ede-caam-qi2",
2308 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2309 			},
2310 			.setkey = des3_aead_setkey,
2311 			.setauthsize = aead_setauthsize,
2312 			.encrypt = aead_encrypt,
2313 			.decrypt = aead_decrypt,
2314 			.ivsize = DES3_EDE_BLOCK_SIZE,
2315 			.maxauthsize = SHA384_DIGEST_SIZE,
2316 		},
2317 		.caam = {
2318 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2319 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2320 					   OP_ALG_AAI_HMAC_PRECOMP,
2321 		},
2322 	},
2323 	{
2324 		.aead = {
2325 			.base = {
2326 				.cra_name = "echainiv(authenc(hmac(sha384),"
2327 					    "cbc(des3_ede)))",
2328 				.cra_driver_name = "echainiv-authenc-"
2329 						   "hmac-sha384-"
2330 						   "cbc-des3_ede-caam-qi2",
2331 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2332 			},
2333 			.setkey = des3_aead_setkey,
2334 			.setauthsize = aead_setauthsize,
2335 			.encrypt = aead_encrypt,
2336 			.decrypt = aead_decrypt,
2337 			.ivsize = DES3_EDE_BLOCK_SIZE,
2338 			.maxauthsize = SHA384_DIGEST_SIZE,
2339 		},
2340 		.caam = {
2341 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2342 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2343 					   OP_ALG_AAI_HMAC_PRECOMP,
2344 			.geniv = true,
2345 		}
2346 	},
2347 	{
2348 		.aead = {
2349 			.base = {
2350 				.cra_name = "authenc(hmac(sha512),"
2351 					    "cbc(des3_ede))",
2352 				.cra_driver_name = "authenc-hmac-sha512-"
2353 						   "cbc-des3_ede-caam-qi2",
2354 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2355 			},
2356 			.setkey = des3_aead_setkey,
2357 			.setauthsize = aead_setauthsize,
2358 			.encrypt = aead_encrypt,
2359 			.decrypt = aead_decrypt,
2360 			.ivsize = DES3_EDE_BLOCK_SIZE,
2361 			.maxauthsize = SHA512_DIGEST_SIZE,
2362 		},
2363 		.caam = {
2364 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2365 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2366 					   OP_ALG_AAI_HMAC_PRECOMP,
2367 		},
2368 	},
2369 	{
2370 		.aead = {
2371 			.base = {
2372 				.cra_name = "echainiv(authenc(hmac(sha512),"
2373 					    "cbc(des3_ede)))",
2374 				.cra_driver_name = "echainiv-authenc-"
2375 						   "hmac-sha512-"
2376 						   "cbc-des3_ede-caam-qi2",
2377 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2378 			},
2379 			.setkey = des3_aead_setkey,
2380 			.setauthsize = aead_setauthsize,
2381 			.encrypt = aead_encrypt,
2382 			.decrypt = aead_decrypt,
2383 			.ivsize = DES3_EDE_BLOCK_SIZE,
2384 			.maxauthsize = SHA512_DIGEST_SIZE,
2385 		},
2386 		.caam = {
2387 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2388 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2389 					   OP_ALG_AAI_HMAC_PRECOMP,
2390 			.geniv = true,
2391 		}
2392 	},
2393 	{
2394 		.aead = {
2395 			.base = {
2396 				.cra_name = "authenc(hmac(md5),cbc(des))",
2397 				.cra_driver_name = "authenc-hmac-md5-"
2398 						   "cbc-des-caam-qi2",
2399 				.cra_blocksize = DES_BLOCK_SIZE,
2400 			},
2401 			.setkey = aead_setkey,
2402 			.setauthsize = aead_setauthsize,
2403 			.encrypt = aead_encrypt,
2404 			.decrypt = aead_decrypt,
2405 			.ivsize = DES_BLOCK_SIZE,
2406 			.maxauthsize = MD5_DIGEST_SIZE,
2407 		},
2408 		.caam = {
2409 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2410 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2411 					   OP_ALG_AAI_HMAC_PRECOMP,
2412 		},
2413 	},
2414 	{
2415 		.aead = {
2416 			.base = {
2417 				.cra_name = "echainiv(authenc(hmac(md5),"
2418 					    "cbc(des)))",
2419 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2420 						   "cbc-des-caam-qi2",
2421 				.cra_blocksize = DES_BLOCK_SIZE,
2422 			},
2423 			.setkey = aead_setkey,
2424 			.setauthsize = aead_setauthsize,
2425 			.encrypt = aead_encrypt,
2426 			.decrypt = aead_decrypt,
2427 			.ivsize = DES_BLOCK_SIZE,
2428 			.maxauthsize = MD5_DIGEST_SIZE,
2429 		},
2430 		.caam = {
2431 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2432 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2433 					   OP_ALG_AAI_HMAC_PRECOMP,
2434 			.geniv = true,
2435 		}
2436 	},
2437 	{
2438 		.aead = {
2439 			.base = {
2440 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2441 				.cra_driver_name = "authenc-hmac-sha1-"
2442 						   "cbc-des-caam-qi2",
2443 				.cra_blocksize = DES_BLOCK_SIZE,
2444 			},
2445 			.setkey = aead_setkey,
2446 			.setauthsize = aead_setauthsize,
2447 			.encrypt = aead_encrypt,
2448 			.decrypt = aead_decrypt,
2449 			.ivsize = DES_BLOCK_SIZE,
2450 			.maxauthsize = SHA1_DIGEST_SIZE,
2451 		},
2452 		.caam = {
2453 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2454 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2455 					   OP_ALG_AAI_HMAC_PRECOMP,
2456 		},
2457 	},
2458 	{
2459 		.aead = {
2460 			.base = {
2461 				.cra_name = "echainiv(authenc(hmac(sha1),"
2462 					    "cbc(des)))",
2463 				.cra_driver_name = "echainiv-authenc-"
2464 						   "hmac-sha1-cbc-des-caam-qi2",
2465 				.cra_blocksize = DES_BLOCK_SIZE,
2466 			},
2467 			.setkey = aead_setkey,
2468 			.setauthsize = aead_setauthsize,
2469 			.encrypt = aead_encrypt,
2470 			.decrypt = aead_decrypt,
2471 			.ivsize = DES_BLOCK_SIZE,
2472 			.maxauthsize = SHA1_DIGEST_SIZE,
2473 		},
2474 		.caam = {
2475 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2476 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2477 					   OP_ALG_AAI_HMAC_PRECOMP,
2478 			.geniv = true,
2479 		}
2480 	},
2481 	{
2482 		.aead = {
2483 			.base = {
2484 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2485 				.cra_driver_name = "authenc-hmac-sha224-"
2486 						   "cbc-des-caam-qi2",
2487 				.cra_blocksize = DES_BLOCK_SIZE,
2488 			},
2489 			.setkey = aead_setkey,
2490 			.setauthsize = aead_setauthsize,
2491 			.encrypt = aead_encrypt,
2492 			.decrypt = aead_decrypt,
2493 			.ivsize = DES_BLOCK_SIZE,
2494 			.maxauthsize = SHA224_DIGEST_SIZE,
2495 		},
2496 		.caam = {
2497 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2498 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2499 					   OP_ALG_AAI_HMAC_PRECOMP,
2500 		},
2501 	},
2502 	{
2503 		.aead = {
2504 			.base = {
2505 				.cra_name = "echainiv(authenc(hmac(sha224),"
2506 					    "cbc(des)))",
2507 				.cra_driver_name = "echainiv-authenc-"
2508 						   "hmac-sha224-cbc-des-"
2509 						   "caam-qi2",
2510 				.cra_blocksize = DES_BLOCK_SIZE,
2511 			},
2512 			.setkey = aead_setkey,
2513 			.setauthsize = aead_setauthsize,
2514 			.encrypt = aead_encrypt,
2515 			.decrypt = aead_decrypt,
2516 			.ivsize = DES_BLOCK_SIZE,
2517 			.maxauthsize = SHA224_DIGEST_SIZE,
2518 		},
2519 		.caam = {
2520 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2521 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2522 					   OP_ALG_AAI_HMAC_PRECOMP,
2523 			.geniv = true,
2524 		}
2525 	},
2526 	{
2527 		.aead = {
2528 			.base = {
2529 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2530 				.cra_driver_name = "authenc-hmac-sha256-"
2531 						   "cbc-des-caam-qi2",
2532 				.cra_blocksize = DES_BLOCK_SIZE,
2533 			},
2534 			.setkey = aead_setkey,
2535 			.setauthsize = aead_setauthsize,
2536 			.encrypt = aead_encrypt,
2537 			.decrypt = aead_decrypt,
2538 			.ivsize = DES_BLOCK_SIZE,
2539 			.maxauthsize = SHA256_DIGEST_SIZE,
2540 		},
2541 		.caam = {
2542 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2543 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2544 					   OP_ALG_AAI_HMAC_PRECOMP,
2545 		},
2546 	},
2547 	{
2548 		.aead = {
2549 			.base = {
2550 				.cra_name = "echainiv(authenc(hmac(sha256),"
2551 					    "cbc(des)))",
2552 				.cra_driver_name = "echainiv-authenc-"
2553 						   "hmac-sha256-cbc-des-"
2554 						   "caam-qi2",
2555 				.cra_blocksize = DES_BLOCK_SIZE,
2556 			},
2557 			.setkey = aead_setkey,
2558 			.setauthsize = aead_setauthsize,
2559 			.encrypt = aead_encrypt,
2560 			.decrypt = aead_decrypt,
2561 			.ivsize = DES_BLOCK_SIZE,
2562 			.maxauthsize = SHA256_DIGEST_SIZE,
2563 		},
2564 		.caam = {
2565 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2566 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2567 					   OP_ALG_AAI_HMAC_PRECOMP,
2568 			.geniv = true,
2569 		},
2570 	},
2571 	{
2572 		.aead = {
2573 			.base = {
2574 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2575 				.cra_driver_name = "authenc-hmac-sha384-"
2576 						   "cbc-des-caam-qi2",
2577 				.cra_blocksize = DES_BLOCK_SIZE,
2578 			},
2579 			.setkey = aead_setkey,
2580 			.setauthsize = aead_setauthsize,
2581 			.encrypt = aead_encrypt,
2582 			.decrypt = aead_decrypt,
2583 			.ivsize = DES_BLOCK_SIZE,
2584 			.maxauthsize = SHA384_DIGEST_SIZE,
2585 		},
2586 		.caam = {
2587 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2588 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2589 					   OP_ALG_AAI_HMAC_PRECOMP,
2590 		},
2591 	},
2592 	{
2593 		.aead = {
2594 			.base = {
2595 				.cra_name = "echainiv(authenc(hmac(sha384),"
2596 					    "cbc(des)))",
2597 				.cra_driver_name = "echainiv-authenc-"
2598 						   "hmac-sha384-cbc-des-"
2599 						   "caam-qi2",
2600 				.cra_blocksize = DES_BLOCK_SIZE,
2601 			},
2602 			.setkey = aead_setkey,
2603 			.setauthsize = aead_setauthsize,
2604 			.encrypt = aead_encrypt,
2605 			.decrypt = aead_decrypt,
2606 			.ivsize = DES_BLOCK_SIZE,
2607 			.maxauthsize = SHA384_DIGEST_SIZE,
2608 		},
2609 		.caam = {
2610 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2611 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2612 					   OP_ALG_AAI_HMAC_PRECOMP,
2613 			.geniv = true,
2614 		}
2615 	},
2616 	{
2617 		.aead = {
2618 			.base = {
2619 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2620 				.cra_driver_name = "authenc-hmac-sha512-"
2621 						   "cbc-des-caam-qi2",
2622 				.cra_blocksize = DES_BLOCK_SIZE,
2623 			},
2624 			.setkey = aead_setkey,
2625 			.setauthsize = aead_setauthsize,
2626 			.encrypt = aead_encrypt,
2627 			.decrypt = aead_decrypt,
2628 			.ivsize = DES_BLOCK_SIZE,
2629 			.maxauthsize = SHA512_DIGEST_SIZE,
2630 		},
2631 		.caam = {
2632 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2633 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2634 					   OP_ALG_AAI_HMAC_PRECOMP,
2635 		}
2636 	},
2637 	{
2638 		.aead = {
2639 			.base = {
2640 				.cra_name = "echainiv(authenc(hmac(sha512),"
2641 					    "cbc(des)))",
2642 				.cra_driver_name = "echainiv-authenc-"
2643 						   "hmac-sha512-cbc-des-"
2644 						   "caam-qi2",
2645 				.cra_blocksize = DES_BLOCK_SIZE,
2646 			},
2647 			.setkey = aead_setkey,
2648 			.setauthsize = aead_setauthsize,
2649 			.encrypt = aead_encrypt,
2650 			.decrypt = aead_decrypt,
2651 			.ivsize = DES_BLOCK_SIZE,
2652 			.maxauthsize = SHA512_DIGEST_SIZE,
2653 		},
2654 		.caam = {
2655 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2656 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2657 					   OP_ALG_AAI_HMAC_PRECOMP,
2658 			.geniv = true,
2659 		}
2660 	},
2661 	{
2662 		.aead = {
2663 			.base = {
2664 				.cra_name = "authenc(hmac(md5),"
2665 					    "rfc3686(ctr(aes)))",
2666 				.cra_driver_name = "authenc-hmac-md5-"
2667 						   "rfc3686-ctr-aes-caam-qi2",
2668 				.cra_blocksize = 1,
2669 			},
2670 			.setkey = aead_setkey,
2671 			.setauthsize = aead_setauthsize,
2672 			.encrypt = aead_encrypt,
2673 			.decrypt = aead_decrypt,
2674 			.ivsize = CTR_RFC3686_IV_SIZE,
2675 			.maxauthsize = MD5_DIGEST_SIZE,
2676 		},
2677 		.caam = {
2678 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2679 					   OP_ALG_AAI_CTR_MOD128,
2680 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2681 					   OP_ALG_AAI_HMAC_PRECOMP,
2682 			.rfc3686 = true,
2683 		},
2684 	},
2685 	{
2686 		.aead = {
2687 			.base = {
2688 				.cra_name = "seqiv(authenc("
2689 					    "hmac(md5),rfc3686(ctr(aes))))",
2690 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2691 						   "rfc3686-ctr-aes-caam-qi2",
2692 				.cra_blocksize = 1,
2693 			},
2694 			.setkey = aead_setkey,
2695 			.setauthsize = aead_setauthsize,
2696 			.encrypt = aead_encrypt,
2697 			.decrypt = aead_decrypt,
2698 			.ivsize = CTR_RFC3686_IV_SIZE,
2699 			.maxauthsize = MD5_DIGEST_SIZE,
2700 		},
2701 		.caam = {
2702 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2703 					   OP_ALG_AAI_CTR_MOD128,
2704 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2705 					   OP_ALG_AAI_HMAC_PRECOMP,
2706 			.rfc3686 = true,
2707 			.geniv = true,
2708 		},
2709 	},
2710 	{
2711 		.aead = {
2712 			.base = {
2713 				.cra_name = "authenc(hmac(sha1),"
2714 					    "rfc3686(ctr(aes)))",
2715 				.cra_driver_name = "authenc-hmac-sha1-"
2716 						   "rfc3686-ctr-aes-caam-qi2",
2717 				.cra_blocksize = 1,
2718 			},
2719 			.setkey = aead_setkey,
2720 			.setauthsize = aead_setauthsize,
2721 			.encrypt = aead_encrypt,
2722 			.decrypt = aead_decrypt,
2723 			.ivsize = CTR_RFC3686_IV_SIZE,
2724 			.maxauthsize = SHA1_DIGEST_SIZE,
2725 		},
2726 		.caam = {
2727 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2728 					   OP_ALG_AAI_CTR_MOD128,
2729 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2730 					   OP_ALG_AAI_HMAC_PRECOMP,
2731 			.rfc3686 = true,
2732 		},
2733 	},
2734 	{
2735 		.aead = {
2736 			.base = {
2737 				.cra_name = "seqiv(authenc("
2738 					    "hmac(sha1),rfc3686(ctr(aes))))",
2739 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2740 						   "rfc3686-ctr-aes-caam-qi2",
2741 				.cra_blocksize = 1,
2742 			},
2743 			.setkey = aead_setkey,
2744 			.setauthsize = aead_setauthsize,
2745 			.encrypt = aead_encrypt,
2746 			.decrypt = aead_decrypt,
2747 			.ivsize = CTR_RFC3686_IV_SIZE,
2748 			.maxauthsize = SHA1_DIGEST_SIZE,
2749 		},
2750 		.caam = {
2751 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2752 					   OP_ALG_AAI_CTR_MOD128,
2753 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2754 					   OP_ALG_AAI_HMAC_PRECOMP,
2755 			.rfc3686 = true,
2756 			.geniv = true,
2757 		},
2758 	},
2759 	{
2760 		.aead = {
2761 			.base = {
2762 				.cra_name = "authenc(hmac(sha224),"
2763 					    "rfc3686(ctr(aes)))",
2764 				.cra_driver_name = "authenc-hmac-sha224-"
2765 						   "rfc3686-ctr-aes-caam-qi2",
2766 				.cra_blocksize = 1,
2767 			},
2768 			.setkey = aead_setkey,
2769 			.setauthsize = aead_setauthsize,
2770 			.encrypt = aead_encrypt,
2771 			.decrypt = aead_decrypt,
2772 			.ivsize = CTR_RFC3686_IV_SIZE,
2773 			.maxauthsize = SHA224_DIGEST_SIZE,
2774 		},
2775 		.caam = {
2776 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2777 					   OP_ALG_AAI_CTR_MOD128,
2778 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2779 					   OP_ALG_AAI_HMAC_PRECOMP,
2780 			.rfc3686 = true,
2781 		},
2782 	},
2783 	{
2784 		.aead = {
2785 			.base = {
2786 				.cra_name = "seqiv(authenc("
2787 					    "hmac(sha224),rfc3686(ctr(aes))))",
2788 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2789 						   "rfc3686-ctr-aes-caam-qi2",
2790 				.cra_blocksize = 1,
2791 			},
2792 			.setkey = aead_setkey,
2793 			.setauthsize = aead_setauthsize,
2794 			.encrypt = aead_encrypt,
2795 			.decrypt = aead_decrypt,
2796 			.ivsize = CTR_RFC3686_IV_SIZE,
2797 			.maxauthsize = SHA224_DIGEST_SIZE,
2798 		},
2799 		.caam = {
2800 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2801 					   OP_ALG_AAI_CTR_MOD128,
2802 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2803 					   OP_ALG_AAI_HMAC_PRECOMP,
2804 			.rfc3686 = true,
2805 			.geniv = true,
2806 		},
2807 	},
2808 	{
2809 		.aead = {
2810 			.base = {
2811 				.cra_name = "authenc(hmac(sha256),"
2812 					    "rfc3686(ctr(aes)))",
2813 				.cra_driver_name = "authenc-hmac-sha256-"
2814 						   "rfc3686-ctr-aes-caam-qi2",
2815 				.cra_blocksize = 1,
2816 			},
2817 			.setkey = aead_setkey,
2818 			.setauthsize = aead_setauthsize,
2819 			.encrypt = aead_encrypt,
2820 			.decrypt = aead_decrypt,
2821 			.ivsize = CTR_RFC3686_IV_SIZE,
2822 			.maxauthsize = SHA256_DIGEST_SIZE,
2823 		},
2824 		.caam = {
2825 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2826 					   OP_ALG_AAI_CTR_MOD128,
2827 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2828 					   OP_ALG_AAI_HMAC_PRECOMP,
2829 			.rfc3686 = true,
2830 		},
2831 	},
2832 	{
2833 		.aead = {
2834 			.base = {
2835 				.cra_name = "seqiv(authenc(hmac(sha256),"
2836 					    "rfc3686(ctr(aes))))",
2837 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2838 						   "rfc3686-ctr-aes-caam-qi2",
2839 				.cra_blocksize = 1,
2840 			},
2841 			.setkey = aead_setkey,
2842 			.setauthsize = aead_setauthsize,
2843 			.encrypt = aead_encrypt,
2844 			.decrypt = aead_decrypt,
2845 			.ivsize = CTR_RFC3686_IV_SIZE,
2846 			.maxauthsize = SHA256_DIGEST_SIZE,
2847 		},
2848 		.caam = {
2849 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2850 					   OP_ALG_AAI_CTR_MOD128,
2851 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2852 					   OP_ALG_AAI_HMAC_PRECOMP,
2853 			.rfc3686 = true,
2854 			.geniv = true,
2855 		},
2856 	},
2857 	{
2858 		.aead = {
2859 			.base = {
2860 				.cra_name = "authenc(hmac(sha384),"
2861 					    "rfc3686(ctr(aes)))",
2862 				.cra_driver_name = "authenc-hmac-sha384-"
2863 						   "rfc3686-ctr-aes-caam-qi2",
2864 				.cra_blocksize = 1,
2865 			},
2866 			.setkey = aead_setkey,
2867 			.setauthsize = aead_setauthsize,
2868 			.encrypt = aead_encrypt,
2869 			.decrypt = aead_decrypt,
2870 			.ivsize = CTR_RFC3686_IV_SIZE,
2871 			.maxauthsize = SHA384_DIGEST_SIZE,
2872 		},
2873 		.caam = {
2874 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2875 					   OP_ALG_AAI_CTR_MOD128,
2876 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2877 					   OP_ALG_AAI_HMAC_PRECOMP,
2878 			.rfc3686 = true,
2879 		},
2880 	},
2881 	{
2882 		.aead = {
2883 			.base = {
2884 				.cra_name = "seqiv(authenc(hmac(sha384),"
2885 					    "rfc3686(ctr(aes))))",
2886 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2887 						   "rfc3686-ctr-aes-caam-qi2",
2888 				.cra_blocksize = 1,
2889 			},
2890 			.setkey = aead_setkey,
2891 			.setauthsize = aead_setauthsize,
2892 			.encrypt = aead_encrypt,
2893 			.decrypt = aead_decrypt,
2894 			.ivsize = CTR_RFC3686_IV_SIZE,
2895 			.maxauthsize = SHA384_DIGEST_SIZE,
2896 		},
2897 		.caam = {
2898 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2899 					   OP_ALG_AAI_CTR_MOD128,
2900 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2901 					   OP_ALG_AAI_HMAC_PRECOMP,
2902 			.rfc3686 = true,
2903 			.geniv = true,
2904 		},
2905 	},
2906 	{
2907 		.aead = {
2908 			.base = {
2909 				.cra_name = "rfc7539(chacha20,poly1305)",
2910 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2911 						   "caam-qi2",
2912 				.cra_blocksize = 1,
2913 			},
2914 			.setkey = chachapoly_setkey,
2915 			.setauthsize = chachapoly_setauthsize,
2916 			.encrypt = aead_encrypt,
2917 			.decrypt = aead_decrypt,
2918 			.ivsize = CHACHAPOLY_IV_SIZE,
2919 			.maxauthsize = POLY1305_DIGEST_SIZE,
2920 		},
2921 		.caam = {
2922 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2923 					   OP_ALG_AAI_AEAD,
2924 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2925 					   OP_ALG_AAI_AEAD,
2926 			.nodkp = true,
2927 		},
2928 	},
2929 	{
2930 		.aead = {
2931 			.base = {
2932 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2933 				.cra_driver_name = "rfc7539esp-chacha20-"
2934 						   "poly1305-caam-qi2",
2935 				.cra_blocksize = 1,
2936 			},
2937 			.setkey = chachapoly_setkey,
2938 			.setauthsize = chachapoly_setauthsize,
2939 			.encrypt = aead_encrypt,
2940 			.decrypt = aead_decrypt,
2941 			.ivsize = 8,
2942 			.maxauthsize = POLY1305_DIGEST_SIZE,
2943 		},
2944 		.caam = {
2945 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2946 					   OP_ALG_AAI_AEAD,
2947 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2948 					   OP_ALG_AAI_AEAD,
2949 			.nodkp = true,
2950 		},
2951 	},
2952 	{
2953 		.aead = {
2954 			.base = {
2955 				.cra_name = "authenc(hmac(sha512),"
2956 					    "rfc3686(ctr(aes)))",
2957 				.cra_driver_name = "authenc-hmac-sha512-"
2958 						   "rfc3686-ctr-aes-caam-qi2",
2959 				.cra_blocksize = 1,
2960 			},
2961 			.setkey = aead_setkey,
2962 			.setauthsize = aead_setauthsize,
2963 			.encrypt = aead_encrypt,
2964 			.decrypt = aead_decrypt,
2965 			.ivsize = CTR_RFC3686_IV_SIZE,
2966 			.maxauthsize = SHA512_DIGEST_SIZE,
2967 		},
2968 		.caam = {
2969 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2970 					   OP_ALG_AAI_CTR_MOD128,
2971 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2972 					   OP_ALG_AAI_HMAC_PRECOMP,
2973 			.rfc3686 = true,
2974 		},
2975 	},
2976 	{
2977 		.aead = {
2978 			.base = {
2979 				.cra_name = "seqiv(authenc(hmac(sha512),"
2980 					    "rfc3686(ctr(aes))))",
2981 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2982 						   "rfc3686-ctr-aes-caam-qi2",
2983 				.cra_blocksize = 1,
2984 			},
2985 			.setkey = aead_setkey,
2986 			.setauthsize = aead_setauthsize,
2987 			.encrypt = aead_encrypt,
2988 			.decrypt = aead_decrypt,
2989 			.ivsize = CTR_RFC3686_IV_SIZE,
2990 			.maxauthsize = SHA512_DIGEST_SIZE,
2991 		},
2992 		.caam = {
2993 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2994 					   OP_ALG_AAI_CTR_MOD128,
2995 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2996 					   OP_ALG_AAI_HMAC_PRECOMP,
2997 			.rfc3686 = true,
2998 			.geniv = true,
2999 		},
3000 	},
3001 };
3002 
caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)3003 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3004 {
3005 	struct skcipher_alg *alg = &t_alg->skcipher;
3006 
3007 	alg->base.cra_module = THIS_MODULE;
3008 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3009 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3010 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3011 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3012 
3013 	alg->init = caam_cra_init_skcipher;
3014 	alg->exit = caam_cra_exit;
3015 }
3016 
caam_aead_alg_init(struct caam_aead_alg *t_alg)3017 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3018 {
3019 	struct aead_alg *alg = &t_alg->aead;
3020 
3021 	alg->base.cra_module = THIS_MODULE;
3022 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3023 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3024 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3025 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3026 
3027 	alg->init = caam_cra_init_aead;
3028 	alg->exit = caam_cra_exit_aead;
3029 }
3030 
3031 /* max hash key is max split key size */
3032 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
3033 
3034 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
3035 
3036 /* caam context sizes for hashes: running digest + 8 */
3037 #define HASH_MSG_LEN			8
3038 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3039 
3040 enum hash_optype {
3041 	UPDATE = 0,
3042 	UPDATE_FIRST,
3043 	FINALIZE,
3044 	DIGEST,
3045 	HASH_NUM_OP
3046 };
3047 
3048 /**
3049  * struct caam_hash_ctx - ahash per-session context
3050  * @flc: Flow Contexts array
3051  * @key: authentication key
3052  * @flc_dma: I/O virtual addresses of the Flow Contexts
3053  * @dev: dpseci device
3054  * @ctx_len: size of Context Register
3055  * @adata: hashing algorithm details
3056  */
3057 struct caam_hash_ctx {
3058 	struct caam_flc flc[HASH_NUM_OP];
3059 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3060 	dma_addr_t flc_dma[HASH_NUM_OP];
3061 	struct device *dev;
3062 	int ctx_len;
3063 	struct alginfo adata;
3064 };
3065 
3066 /* ahash state */
3067 struct caam_hash_state {
3068 	struct caam_request caam_req;
3069 	dma_addr_t buf_dma;
3070 	dma_addr_t ctx_dma;
3071 	int ctx_dma_len;
3072 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3073 	int buflen;
3074 	int next_buflen;
3075 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3076 	int (*update)(struct ahash_request *req);
3077 	int (*final)(struct ahash_request *req);
3078 	int (*finup)(struct ahash_request *req);
3079 };
3080 
3081 struct caam_export_state {
3082 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3083 	u8 caam_ctx[MAX_CTX_LEN];
3084 	int buflen;
3085 	int (*update)(struct ahash_request *req);
3086 	int (*final)(struct ahash_request *req);
3087 	int (*finup)(struct ahash_request *req);
3088 };
3089 
3090 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device *dev, struct dpaa2_sg_entry *qm_sg, struct caam_hash_state *state)3091 static inline int buf_map_to_qm_sg(struct device *dev,
3092 				   struct dpaa2_sg_entry *qm_sg,
3093 				   struct caam_hash_state *state)
3094 {
3095 	int buflen = state->buflen;
3096 
3097 	if (!buflen)
3098 		return 0;
3099 
3100 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3101 					DMA_TO_DEVICE);
3102 	if (dma_mapping_error(dev, state->buf_dma)) {
3103 		dev_err(dev, "unable to map buf\n");
3104 		state->buf_dma = 0;
3105 		return -ENOMEM;
3106 	}
3107 
3108 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3109 
3110 	return 0;
3111 }
3112 
3113 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device *dev, struct caam_hash_state *state, int ctx_len, struct dpaa2_sg_entry *qm_sg, u32 flag)3114 static inline int ctx_map_to_qm_sg(struct device *dev,
3115 				   struct caam_hash_state *state, int ctx_len,
3116 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3117 {
3118 	state->ctx_dma_len = ctx_len;
3119 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3120 	if (dma_mapping_error(dev, state->ctx_dma)) {
3121 		dev_err(dev, "unable to map ctx\n");
3122 		state->ctx_dma = 0;
3123 		return -ENOMEM;
3124 	}
3125 
3126 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3127 
3128 	return 0;
3129 }
3130 
ahash_set_sh_desc(struct crypto_ahash *ahash)3131 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3132 {
3133 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3134 	int digestsize = crypto_ahash_digestsize(ahash);
3135 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3136 	struct caam_flc *flc;
3137 	u32 *desc;
3138 
3139 	/* ahash_update shared descriptor */
3140 	flc = &ctx->flc[UPDATE];
3141 	desc = flc->sh_desc;
3142 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3143 			  ctx->ctx_len, true, priv->sec_attr.era);
3144 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3145 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3146 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3147 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3148 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3149 			     1);
3150 
3151 	/* ahash_update_first shared descriptor */
3152 	flc = &ctx->flc[UPDATE_FIRST];
3153 	desc = flc->sh_desc;
3154 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3155 			  ctx->ctx_len, false, priv->sec_attr.era);
3156 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3157 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3158 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3159 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3160 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3161 			     1);
3162 
3163 	/* ahash_final shared descriptor */
3164 	flc = &ctx->flc[FINALIZE];
3165 	desc = flc->sh_desc;
3166 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3167 			  ctx->ctx_len, true, priv->sec_attr.era);
3168 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3169 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3170 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3171 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3172 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3173 			     1);
3174 
3175 	/* ahash_digest shared descriptor */
3176 	flc = &ctx->flc[DIGEST];
3177 	desc = flc->sh_desc;
3178 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3179 			  ctx->ctx_len, false, priv->sec_attr.era);
3180 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3181 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3182 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3183 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3184 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3185 			     1);
3186 
3187 	return 0;
3188 }
3189 
3190 struct split_key_sh_result {
3191 	struct completion completion;
3192 	int err;
3193 	struct device *dev;
3194 };
3195 
split_key_sh_done(void *cbk_ctx, u32 err)3196 static void split_key_sh_done(void *cbk_ctx, u32 err)
3197 {
3198 	struct split_key_sh_result *res = cbk_ctx;
3199 
3200 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3201 
3202 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3203 	complete(&res->completion);
3204 }
3205 
3206 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, u32 digestsize)3207 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3208 			   u32 digestsize)
3209 {
3210 	struct caam_request *req_ctx;
3211 	u32 *desc;
3212 	struct split_key_sh_result result;
3213 	dma_addr_t key_dma;
3214 	struct caam_flc *flc;
3215 	dma_addr_t flc_dma;
3216 	int ret = -ENOMEM;
3217 	struct dpaa2_fl_entry *in_fle, *out_fle;
3218 
3219 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3220 	if (!req_ctx)
3221 		return -ENOMEM;
3222 
3223 	in_fle = &req_ctx->fd_flt[1];
3224 	out_fle = &req_ctx->fd_flt[0];
3225 
3226 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3227 	if (!flc)
3228 		goto err_flc;
3229 
3230 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3231 	if (dma_mapping_error(ctx->dev, key_dma)) {
3232 		dev_err(ctx->dev, "unable to map key memory\n");
3233 		goto err_key_dma;
3234 	}
3235 
3236 	desc = flc->sh_desc;
3237 
3238 	init_sh_desc(desc, 0);
3239 
3240 	/* descriptor to perform unkeyed hash on key_in */
3241 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3242 			 OP_ALG_AS_INITFINAL);
3243 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3244 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3245 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3246 			 LDST_SRCDST_BYTE_CONTEXT);
3247 
3248 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3249 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3250 				 desc_bytes(desc), DMA_TO_DEVICE);
3251 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3252 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3253 		goto err_flc_dma;
3254 	}
3255 
3256 	dpaa2_fl_set_final(in_fle, true);
3257 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3258 	dpaa2_fl_set_addr(in_fle, key_dma);
3259 	dpaa2_fl_set_len(in_fle, *keylen);
3260 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3261 	dpaa2_fl_set_addr(out_fle, key_dma);
3262 	dpaa2_fl_set_len(out_fle, digestsize);
3263 
3264 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3265 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3266 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3267 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3268 			     1);
3269 
3270 	result.err = 0;
3271 	init_completion(&result.completion);
3272 	result.dev = ctx->dev;
3273 
3274 	req_ctx->flc = flc;
3275 	req_ctx->flc_dma = flc_dma;
3276 	req_ctx->cbk = split_key_sh_done;
3277 	req_ctx->ctx = &result;
3278 
3279 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3280 	if (ret == -EINPROGRESS) {
3281 		/* in progress */
3282 		wait_for_completion(&result.completion);
3283 		ret = result.err;
3284 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3285 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3286 				     digestsize, 1);
3287 	}
3288 
3289 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3290 			 DMA_TO_DEVICE);
3291 err_flc_dma:
3292 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3293 err_key_dma:
3294 	kfree(flc);
3295 err_flc:
3296 	kfree(req_ctx);
3297 
3298 	*keylen = digestsize;
3299 
3300 	return ret;
3301 }
3302 
ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen)3303 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3304 			unsigned int keylen)
3305 {
3306 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3307 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3308 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3309 	int ret;
3310 	u8 *hashed_key = NULL;
3311 
3312 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3313 
3314 	if (keylen > blocksize) {
3315 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3316 		if (!hashed_key)
3317 			return -ENOMEM;
3318 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3319 		if (ret)
3320 			goto bad_free_key;
3321 		key = hashed_key;
3322 	}
3323 
3324 	ctx->adata.keylen = keylen;
3325 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3326 					      OP_ALG_ALGSEL_MASK);
3327 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3328 		goto bad_free_key;
3329 
3330 	ctx->adata.key_virt = key;
3331 	ctx->adata.key_inline = true;
3332 
3333 	/*
3334 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3335 	 * in invalid opcodes (last bytes of user key) in the resulting
3336 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3337 	 * addresses are needed.
3338 	 */
3339 	if (keylen > ctx->adata.keylen_pad) {
3340 		memcpy(ctx->key, key, keylen);
3341 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3342 					   ctx->adata.keylen_pad,
3343 					   DMA_TO_DEVICE);
3344 	}
3345 
3346 	ret = ahash_set_sh_desc(ahash);
3347 	kfree(hashed_key);
3348 	return ret;
3349 bad_free_key:
3350 	kfree(hashed_key);
3351 	return -EINVAL;
3352 }
3353 
ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req)3354 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3355 			       struct ahash_request *req)
3356 {
3357 	struct caam_hash_state *state = ahash_request_ctx(req);
3358 
3359 	if (edesc->src_nents)
3360 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3361 
3362 	if (edesc->qm_sg_bytes)
3363 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3364 				 DMA_TO_DEVICE);
3365 
3366 	if (state->buf_dma) {
3367 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3368 				 DMA_TO_DEVICE);
3369 		state->buf_dma = 0;
3370 	}
3371 }
3372 
ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, u32 flag)3373 static inline void ahash_unmap_ctx(struct device *dev,
3374 				   struct ahash_edesc *edesc,
3375 				   struct ahash_request *req, u32 flag)
3376 {
3377 	struct caam_hash_state *state = ahash_request_ctx(req);
3378 
3379 	if (state->ctx_dma) {
3380 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3381 		state->ctx_dma = 0;
3382 	}
3383 	ahash_unmap(dev, edesc, req);
3384 }
3385 
ahash_done(void *cbk_ctx, u32 status)3386 static void ahash_done(void *cbk_ctx, u32 status)
3387 {
3388 	struct crypto_async_request *areq = cbk_ctx;
3389 	struct ahash_request *req = ahash_request_cast(areq);
3390 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3391 	struct caam_hash_state *state = ahash_request_ctx(req);
3392 	struct ahash_edesc *edesc = state->caam_req.edesc;
3393 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3394 	int digestsize = crypto_ahash_digestsize(ahash);
3395 	int ecode = 0;
3396 
3397 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3398 
3399 	if (unlikely(status))
3400 		ecode = caam_qi2_strstatus(ctx->dev, status);
3401 
3402 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3403 	memcpy(req->result, state->caam_ctx, digestsize);
3404 	qi_cache_free(edesc);
3405 
3406 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3407 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3408 			     ctx->ctx_len, 1);
3409 
3410 	req->base.complete(&req->base, ecode);
3411 }
3412 
ahash_done_bi(void *cbk_ctx, u32 status)3413 static void ahash_done_bi(void *cbk_ctx, u32 status)
3414 {
3415 	struct crypto_async_request *areq = cbk_ctx;
3416 	struct ahash_request *req = ahash_request_cast(areq);
3417 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3418 	struct caam_hash_state *state = ahash_request_ctx(req);
3419 	struct ahash_edesc *edesc = state->caam_req.edesc;
3420 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3421 	int ecode = 0;
3422 
3423 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3424 
3425 	if (unlikely(status))
3426 		ecode = caam_qi2_strstatus(ctx->dev, status);
3427 
3428 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3429 	qi_cache_free(edesc);
3430 
3431 	scatterwalk_map_and_copy(state->buf, req->src,
3432 				 req->nbytes - state->next_buflen,
3433 				 state->next_buflen, 0);
3434 	state->buflen = state->next_buflen;
3435 
3436 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3437 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3438 			     state->buflen, 1);
3439 
3440 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3441 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3442 			     ctx->ctx_len, 1);
3443 	if (req->result)
3444 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3445 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3446 				     crypto_ahash_digestsize(ahash), 1);
3447 
3448 	req->base.complete(&req->base, ecode);
3449 }
3450 
ahash_done_ctx_src(void *cbk_ctx, u32 status)3451 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3452 {
3453 	struct crypto_async_request *areq = cbk_ctx;
3454 	struct ahash_request *req = ahash_request_cast(areq);
3455 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3456 	struct caam_hash_state *state = ahash_request_ctx(req);
3457 	struct ahash_edesc *edesc = state->caam_req.edesc;
3458 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3459 	int digestsize = crypto_ahash_digestsize(ahash);
3460 	int ecode = 0;
3461 
3462 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3463 
3464 	if (unlikely(status))
3465 		ecode = caam_qi2_strstatus(ctx->dev, status);
3466 
3467 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3468 	memcpy(req->result, state->caam_ctx, digestsize);
3469 	qi_cache_free(edesc);
3470 
3471 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3472 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3473 			     ctx->ctx_len, 1);
3474 
3475 	req->base.complete(&req->base, ecode);
3476 }
3477 
ahash_done_ctx_dst(void *cbk_ctx, u32 status)3478 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3479 {
3480 	struct crypto_async_request *areq = cbk_ctx;
3481 	struct ahash_request *req = ahash_request_cast(areq);
3482 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3483 	struct caam_hash_state *state = ahash_request_ctx(req);
3484 	struct ahash_edesc *edesc = state->caam_req.edesc;
3485 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3486 	int ecode = 0;
3487 
3488 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3489 
3490 	if (unlikely(status))
3491 		ecode = caam_qi2_strstatus(ctx->dev, status);
3492 
3493 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3494 	qi_cache_free(edesc);
3495 
3496 	scatterwalk_map_and_copy(state->buf, req->src,
3497 				 req->nbytes - state->next_buflen,
3498 				 state->next_buflen, 0);
3499 	state->buflen = state->next_buflen;
3500 
3501 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3502 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3503 			     state->buflen, 1);
3504 
3505 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3506 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3507 			     ctx->ctx_len, 1);
3508 	if (req->result)
3509 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3510 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3511 				     crypto_ahash_digestsize(ahash), 1);
3512 
3513 	req->base.complete(&req->base, ecode);
3514 }
3515 
ahash_update_ctx(struct ahash_request *req)3516 static int ahash_update_ctx(struct ahash_request *req)
3517 {
3518 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3519 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3520 	struct caam_hash_state *state = ahash_request_ctx(req);
3521 	struct caam_request *req_ctx = &state->caam_req;
3522 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3523 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3524 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3525 		      GFP_KERNEL : GFP_ATOMIC;
3526 	u8 *buf = state->buf;
3527 	int *buflen = &state->buflen;
3528 	int *next_buflen = &state->next_buflen;
3529 	int in_len = *buflen + req->nbytes, to_hash;
3530 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3531 	struct ahash_edesc *edesc;
3532 	int ret = 0;
3533 
3534 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3535 	to_hash = in_len - *next_buflen;
3536 
3537 	if (to_hash) {
3538 		struct dpaa2_sg_entry *sg_table;
3539 		int src_len = req->nbytes - *next_buflen;
3540 
3541 		src_nents = sg_nents_for_len(req->src, src_len);
3542 		if (src_nents < 0) {
3543 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3544 			return src_nents;
3545 		}
3546 
3547 		if (src_nents) {
3548 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3549 						  DMA_TO_DEVICE);
3550 			if (!mapped_nents) {
3551 				dev_err(ctx->dev, "unable to DMA map source\n");
3552 				return -ENOMEM;
3553 			}
3554 		} else {
3555 			mapped_nents = 0;
3556 		}
3557 
3558 		/* allocate space for base edesc and link tables */
3559 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3560 		if (!edesc) {
3561 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3562 				     DMA_TO_DEVICE);
3563 			return -ENOMEM;
3564 		}
3565 
3566 		edesc->src_nents = src_nents;
3567 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3568 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3569 			      sizeof(*sg_table);
3570 		sg_table = &edesc->sgt[0];
3571 
3572 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3573 				       DMA_BIDIRECTIONAL);
3574 		if (ret)
3575 			goto unmap_ctx;
3576 
3577 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3578 		if (ret)
3579 			goto unmap_ctx;
3580 
3581 		if (mapped_nents) {
3582 			sg_to_qm_sg_last(req->src, src_len,
3583 					 sg_table + qm_sg_src_index, 0);
3584 		} else {
3585 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3586 					   true);
3587 		}
3588 
3589 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3590 						  qm_sg_bytes, DMA_TO_DEVICE);
3591 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3592 			dev_err(ctx->dev, "unable to map S/G table\n");
3593 			ret = -ENOMEM;
3594 			goto unmap_ctx;
3595 		}
3596 		edesc->qm_sg_bytes = qm_sg_bytes;
3597 
3598 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3599 		dpaa2_fl_set_final(in_fle, true);
3600 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3601 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3602 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3603 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3604 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3605 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3606 
3607 		req_ctx->flc = &ctx->flc[UPDATE];
3608 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3609 		req_ctx->cbk = ahash_done_bi;
3610 		req_ctx->ctx = &req->base;
3611 		req_ctx->edesc = edesc;
3612 
3613 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3614 		if (ret != -EINPROGRESS &&
3615 		    !(ret == -EBUSY &&
3616 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3617 			goto unmap_ctx;
3618 	} else if (*next_buflen) {
3619 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3620 					 req->nbytes, 0);
3621 		*buflen = *next_buflen;
3622 
3623 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3624 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3625 				     *buflen, 1);
3626 	}
3627 
3628 	return ret;
3629 unmap_ctx:
3630 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3631 	qi_cache_free(edesc);
3632 	return ret;
3633 }
3634 
ahash_final_ctx(struct ahash_request *req)3635 static int ahash_final_ctx(struct ahash_request *req)
3636 {
3637 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3638 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3639 	struct caam_hash_state *state = ahash_request_ctx(req);
3640 	struct caam_request *req_ctx = &state->caam_req;
3641 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3642 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3643 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3644 		      GFP_KERNEL : GFP_ATOMIC;
3645 	int buflen = state->buflen;
3646 	int qm_sg_bytes;
3647 	int digestsize = crypto_ahash_digestsize(ahash);
3648 	struct ahash_edesc *edesc;
3649 	struct dpaa2_sg_entry *sg_table;
3650 	int ret;
3651 
3652 	/* allocate space for base edesc and link tables */
3653 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3654 	if (!edesc)
3655 		return -ENOMEM;
3656 
3657 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3658 	sg_table = &edesc->sgt[0];
3659 
3660 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3661 			       DMA_BIDIRECTIONAL);
3662 	if (ret)
3663 		goto unmap_ctx;
3664 
3665 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3666 	if (ret)
3667 		goto unmap_ctx;
3668 
3669 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3670 
3671 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3672 					  DMA_TO_DEVICE);
3673 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3674 		dev_err(ctx->dev, "unable to map S/G table\n");
3675 		ret = -ENOMEM;
3676 		goto unmap_ctx;
3677 	}
3678 	edesc->qm_sg_bytes = qm_sg_bytes;
3679 
3680 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3681 	dpaa2_fl_set_final(in_fle, true);
3682 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3683 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3684 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3685 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3686 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3687 	dpaa2_fl_set_len(out_fle, digestsize);
3688 
3689 	req_ctx->flc = &ctx->flc[FINALIZE];
3690 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3691 	req_ctx->cbk = ahash_done_ctx_src;
3692 	req_ctx->ctx = &req->base;
3693 	req_ctx->edesc = edesc;
3694 
3695 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3696 	if (ret == -EINPROGRESS ||
3697 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3698 		return ret;
3699 
3700 unmap_ctx:
3701 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3702 	qi_cache_free(edesc);
3703 	return ret;
3704 }
3705 
ahash_finup_ctx(struct ahash_request *req)3706 static int ahash_finup_ctx(struct ahash_request *req)
3707 {
3708 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3709 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3710 	struct caam_hash_state *state = ahash_request_ctx(req);
3711 	struct caam_request *req_ctx = &state->caam_req;
3712 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3713 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3714 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3715 		      GFP_KERNEL : GFP_ATOMIC;
3716 	int buflen = state->buflen;
3717 	int qm_sg_bytes, qm_sg_src_index;
3718 	int src_nents, mapped_nents;
3719 	int digestsize = crypto_ahash_digestsize(ahash);
3720 	struct ahash_edesc *edesc;
3721 	struct dpaa2_sg_entry *sg_table;
3722 	int ret;
3723 
3724 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3725 	if (src_nents < 0) {
3726 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3727 		return src_nents;
3728 	}
3729 
3730 	if (src_nents) {
3731 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3732 					  DMA_TO_DEVICE);
3733 		if (!mapped_nents) {
3734 			dev_err(ctx->dev, "unable to DMA map source\n");
3735 			return -ENOMEM;
3736 		}
3737 	} else {
3738 		mapped_nents = 0;
3739 	}
3740 
3741 	/* allocate space for base edesc and link tables */
3742 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3743 	if (!edesc) {
3744 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3745 		return -ENOMEM;
3746 	}
3747 
3748 	edesc->src_nents = src_nents;
3749 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3750 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3751 		      sizeof(*sg_table);
3752 	sg_table = &edesc->sgt[0];
3753 
3754 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3755 			       DMA_BIDIRECTIONAL);
3756 	if (ret)
3757 		goto unmap_ctx;
3758 
3759 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3760 	if (ret)
3761 		goto unmap_ctx;
3762 
3763 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3764 
3765 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3766 					  DMA_TO_DEVICE);
3767 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3768 		dev_err(ctx->dev, "unable to map S/G table\n");
3769 		ret = -ENOMEM;
3770 		goto unmap_ctx;
3771 	}
3772 	edesc->qm_sg_bytes = qm_sg_bytes;
3773 
3774 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3775 	dpaa2_fl_set_final(in_fle, true);
3776 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3777 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3778 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3779 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3780 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3781 	dpaa2_fl_set_len(out_fle, digestsize);
3782 
3783 	req_ctx->flc = &ctx->flc[FINALIZE];
3784 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3785 	req_ctx->cbk = ahash_done_ctx_src;
3786 	req_ctx->ctx = &req->base;
3787 	req_ctx->edesc = edesc;
3788 
3789 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3790 	if (ret == -EINPROGRESS ||
3791 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3792 		return ret;
3793 
3794 unmap_ctx:
3795 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3796 	qi_cache_free(edesc);
3797 	return ret;
3798 }
3799 
ahash_digest(struct ahash_request *req)3800 static int ahash_digest(struct ahash_request *req)
3801 {
3802 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3803 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3804 	struct caam_hash_state *state = ahash_request_ctx(req);
3805 	struct caam_request *req_ctx = &state->caam_req;
3806 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3807 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3808 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3809 		      GFP_KERNEL : GFP_ATOMIC;
3810 	int digestsize = crypto_ahash_digestsize(ahash);
3811 	int src_nents, mapped_nents;
3812 	struct ahash_edesc *edesc;
3813 	int ret = -ENOMEM;
3814 
3815 	state->buf_dma = 0;
3816 
3817 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3818 	if (src_nents < 0) {
3819 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3820 		return src_nents;
3821 	}
3822 
3823 	if (src_nents) {
3824 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3825 					  DMA_TO_DEVICE);
3826 		if (!mapped_nents) {
3827 			dev_err(ctx->dev, "unable to map source for DMA\n");
3828 			return ret;
3829 		}
3830 	} else {
3831 		mapped_nents = 0;
3832 	}
3833 
3834 	/* allocate space for base edesc and link tables */
3835 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3836 	if (!edesc) {
3837 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3838 		return ret;
3839 	}
3840 
3841 	edesc->src_nents = src_nents;
3842 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3843 
3844 	if (mapped_nents > 1) {
3845 		int qm_sg_bytes;
3846 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3847 
3848 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3849 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3850 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3851 						  qm_sg_bytes, DMA_TO_DEVICE);
3852 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3853 			dev_err(ctx->dev, "unable to map S/G table\n");
3854 			goto unmap;
3855 		}
3856 		edesc->qm_sg_bytes = qm_sg_bytes;
3857 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3858 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3859 	} else {
3860 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3861 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3862 	}
3863 
3864 	state->ctx_dma_len = digestsize;
3865 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3866 					DMA_FROM_DEVICE);
3867 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3868 		dev_err(ctx->dev, "unable to map ctx\n");
3869 		state->ctx_dma = 0;
3870 		goto unmap;
3871 	}
3872 
3873 	dpaa2_fl_set_final(in_fle, true);
3874 	dpaa2_fl_set_len(in_fle, req->nbytes);
3875 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3876 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3877 	dpaa2_fl_set_len(out_fle, digestsize);
3878 
3879 	req_ctx->flc = &ctx->flc[DIGEST];
3880 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3881 	req_ctx->cbk = ahash_done;
3882 	req_ctx->ctx = &req->base;
3883 	req_ctx->edesc = edesc;
3884 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3885 	if (ret == -EINPROGRESS ||
3886 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3887 		return ret;
3888 
3889 unmap:
3890 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3891 	qi_cache_free(edesc);
3892 	return ret;
3893 }
3894 
ahash_final_no_ctx(struct ahash_request *req)3895 static int ahash_final_no_ctx(struct ahash_request *req)
3896 {
3897 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3898 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3899 	struct caam_hash_state *state = ahash_request_ctx(req);
3900 	struct caam_request *req_ctx = &state->caam_req;
3901 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3902 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3903 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3904 		      GFP_KERNEL : GFP_ATOMIC;
3905 	u8 *buf = state->buf;
3906 	int buflen = state->buflen;
3907 	int digestsize = crypto_ahash_digestsize(ahash);
3908 	struct ahash_edesc *edesc;
3909 	int ret = -ENOMEM;
3910 
3911 	/* allocate space for base edesc and link tables */
3912 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3913 	if (!edesc)
3914 		return ret;
3915 
3916 	if (buflen) {
3917 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3918 						DMA_TO_DEVICE);
3919 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3920 			dev_err(ctx->dev, "unable to map src\n");
3921 			goto unmap;
3922 		}
3923 	}
3924 
3925 	state->ctx_dma_len = digestsize;
3926 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3927 					DMA_FROM_DEVICE);
3928 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3929 		dev_err(ctx->dev, "unable to map ctx\n");
3930 		state->ctx_dma = 0;
3931 		goto unmap;
3932 	}
3933 
3934 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3935 	dpaa2_fl_set_final(in_fle, true);
3936 	/*
3937 	 * crypto engine requires the input entry to be present when
3938 	 * "frame list" FD is used.
3939 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3940 	 * in_fle zeroized (except for "Final" flag) is the best option.
3941 	 */
3942 	if (buflen) {
3943 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3944 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3945 		dpaa2_fl_set_len(in_fle, buflen);
3946 	}
3947 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3948 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3949 	dpaa2_fl_set_len(out_fle, digestsize);
3950 
3951 	req_ctx->flc = &ctx->flc[DIGEST];
3952 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3953 	req_ctx->cbk = ahash_done;
3954 	req_ctx->ctx = &req->base;
3955 	req_ctx->edesc = edesc;
3956 
3957 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3958 	if (ret == -EINPROGRESS ||
3959 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3960 		return ret;
3961 
3962 unmap:
3963 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3964 	qi_cache_free(edesc);
3965 	return ret;
3966 }
3967 
ahash_update_no_ctx(struct ahash_request *req)3968 static int ahash_update_no_ctx(struct ahash_request *req)
3969 {
3970 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3971 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3972 	struct caam_hash_state *state = ahash_request_ctx(req);
3973 	struct caam_request *req_ctx = &state->caam_req;
3974 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3975 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3976 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3977 		      GFP_KERNEL : GFP_ATOMIC;
3978 	u8 *buf = state->buf;
3979 	int *buflen = &state->buflen;
3980 	int *next_buflen = &state->next_buflen;
3981 	int in_len = *buflen + req->nbytes, to_hash;
3982 	int qm_sg_bytes, src_nents, mapped_nents;
3983 	struct ahash_edesc *edesc;
3984 	int ret = 0;
3985 
3986 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3987 	to_hash = in_len - *next_buflen;
3988 
3989 	if (to_hash) {
3990 		struct dpaa2_sg_entry *sg_table;
3991 		int src_len = req->nbytes - *next_buflen;
3992 
3993 		src_nents = sg_nents_for_len(req->src, src_len);
3994 		if (src_nents < 0) {
3995 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3996 			return src_nents;
3997 		}
3998 
3999 		if (src_nents) {
4000 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4001 						  DMA_TO_DEVICE);
4002 			if (!mapped_nents) {
4003 				dev_err(ctx->dev, "unable to DMA map source\n");
4004 				return -ENOMEM;
4005 			}
4006 		} else {
4007 			mapped_nents = 0;
4008 		}
4009 
4010 		/* allocate space for base edesc and link tables */
4011 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4012 		if (!edesc) {
4013 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4014 				     DMA_TO_DEVICE);
4015 			return -ENOMEM;
4016 		}
4017 
4018 		edesc->src_nents = src_nents;
4019 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4020 			      sizeof(*sg_table);
4021 		sg_table = &edesc->sgt[0];
4022 
4023 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4024 		if (ret)
4025 			goto unmap_ctx;
4026 
4027 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4028 
4029 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4030 						  qm_sg_bytes, DMA_TO_DEVICE);
4031 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4032 			dev_err(ctx->dev, "unable to map S/G table\n");
4033 			ret = -ENOMEM;
4034 			goto unmap_ctx;
4035 		}
4036 		edesc->qm_sg_bytes = qm_sg_bytes;
4037 
4038 		state->ctx_dma_len = ctx->ctx_len;
4039 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4040 						ctx->ctx_len, DMA_FROM_DEVICE);
4041 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4042 			dev_err(ctx->dev, "unable to map ctx\n");
4043 			state->ctx_dma = 0;
4044 			ret = -ENOMEM;
4045 			goto unmap_ctx;
4046 		}
4047 
4048 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4049 		dpaa2_fl_set_final(in_fle, true);
4050 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4051 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4052 		dpaa2_fl_set_len(in_fle, to_hash);
4053 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4054 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4055 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4056 
4057 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4058 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4059 		req_ctx->cbk = ahash_done_ctx_dst;
4060 		req_ctx->ctx = &req->base;
4061 		req_ctx->edesc = edesc;
4062 
4063 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4064 		if (ret != -EINPROGRESS &&
4065 		    !(ret == -EBUSY &&
4066 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4067 			goto unmap_ctx;
4068 
4069 		state->update = ahash_update_ctx;
4070 		state->finup = ahash_finup_ctx;
4071 		state->final = ahash_final_ctx;
4072 	} else if (*next_buflen) {
4073 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4074 					 req->nbytes, 0);
4075 		*buflen = *next_buflen;
4076 
4077 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4078 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4079 				     *buflen, 1);
4080 	}
4081 
4082 	return ret;
4083 unmap_ctx:
4084 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4085 	qi_cache_free(edesc);
4086 	return ret;
4087 }
4088 
ahash_finup_no_ctx(struct ahash_request *req)4089 static int ahash_finup_no_ctx(struct ahash_request *req)
4090 {
4091 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4092 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4093 	struct caam_hash_state *state = ahash_request_ctx(req);
4094 	struct caam_request *req_ctx = &state->caam_req;
4095 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4096 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4097 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4098 		      GFP_KERNEL : GFP_ATOMIC;
4099 	int buflen = state->buflen;
4100 	int qm_sg_bytes, src_nents, mapped_nents;
4101 	int digestsize = crypto_ahash_digestsize(ahash);
4102 	struct ahash_edesc *edesc;
4103 	struct dpaa2_sg_entry *sg_table;
4104 	int ret = -ENOMEM;
4105 
4106 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4107 	if (src_nents < 0) {
4108 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4109 		return src_nents;
4110 	}
4111 
4112 	if (src_nents) {
4113 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4114 					  DMA_TO_DEVICE);
4115 		if (!mapped_nents) {
4116 			dev_err(ctx->dev, "unable to DMA map source\n");
4117 			return ret;
4118 		}
4119 	} else {
4120 		mapped_nents = 0;
4121 	}
4122 
4123 	/* allocate space for base edesc and link tables */
4124 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4125 	if (!edesc) {
4126 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4127 		return ret;
4128 	}
4129 
4130 	edesc->src_nents = src_nents;
4131 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4132 	sg_table = &edesc->sgt[0];
4133 
4134 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4135 	if (ret)
4136 		goto unmap;
4137 
4138 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4139 
4140 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4141 					  DMA_TO_DEVICE);
4142 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4143 		dev_err(ctx->dev, "unable to map S/G table\n");
4144 		ret = -ENOMEM;
4145 		goto unmap;
4146 	}
4147 	edesc->qm_sg_bytes = qm_sg_bytes;
4148 
4149 	state->ctx_dma_len = digestsize;
4150 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4151 					DMA_FROM_DEVICE);
4152 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4153 		dev_err(ctx->dev, "unable to map ctx\n");
4154 		state->ctx_dma = 0;
4155 		ret = -ENOMEM;
4156 		goto unmap;
4157 	}
4158 
4159 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4160 	dpaa2_fl_set_final(in_fle, true);
4161 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4162 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4163 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4164 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4165 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4166 	dpaa2_fl_set_len(out_fle, digestsize);
4167 
4168 	req_ctx->flc = &ctx->flc[DIGEST];
4169 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4170 	req_ctx->cbk = ahash_done;
4171 	req_ctx->ctx = &req->base;
4172 	req_ctx->edesc = edesc;
4173 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4174 	if (ret != -EINPROGRESS &&
4175 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4176 		goto unmap;
4177 
4178 	return ret;
4179 unmap:
4180 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4181 	qi_cache_free(edesc);
4182 	return ret;
4183 }
4184 
ahash_update_first(struct ahash_request *req)4185 static int ahash_update_first(struct ahash_request *req)
4186 {
4187 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4188 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4189 	struct caam_hash_state *state = ahash_request_ctx(req);
4190 	struct caam_request *req_ctx = &state->caam_req;
4191 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4192 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4193 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4194 		      GFP_KERNEL : GFP_ATOMIC;
4195 	u8 *buf = state->buf;
4196 	int *buflen = &state->buflen;
4197 	int *next_buflen = &state->next_buflen;
4198 	int to_hash;
4199 	int src_nents, mapped_nents;
4200 	struct ahash_edesc *edesc;
4201 	int ret = 0;
4202 
4203 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4204 				      1);
4205 	to_hash = req->nbytes - *next_buflen;
4206 
4207 	if (to_hash) {
4208 		struct dpaa2_sg_entry *sg_table;
4209 		int src_len = req->nbytes - *next_buflen;
4210 
4211 		src_nents = sg_nents_for_len(req->src, src_len);
4212 		if (src_nents < 0) {
4213 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4214 			return src_nents;
4215 		}
4216 
4217 		if (src_nents) {
4218 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4219 						  DMA_TO_DEVICE);
4220 			if (!mapped_nents) {
4221 				dev_err(ctx->dev, "unable to map source for DMA\n");
4222 				return -ENOMEM;
4223 			}
4224 		} else {
4225 			mapped_nents = 0;
4226 		}
4227 
4228 		/* allocate space for base edesc and link tables */
4229 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4230 		if (!edesc) {
4231 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4232 				     DMA_TO_DEVICE);
4233 			return -ENOMEM;
4234 		}
4235 
4236 		edesc->src_nents = src_nents;
4237 		sg_table = &edesc->sgt[0];
4238 
4239 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4240 		dpaa2_fl_set_final(in_fle, true);
4241 		dpaa2_fl_set_len(in_fle, to_hash);
4242 
4243 		if (mapped_nents > 1) {
4244 			int qm_sg_bytes;
4245 
4246 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4247 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4248 				      sizeof(*sg_table);
4249 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4250 							  qm_sg_bytes,
4251 							  DMA_TO_DEVICE);
4252 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4253 				dev_err(ctx->dev, "unable to map S/G table\n");
4254 				ret = -ENOMEM;
4255 				goto unmap_ctx;
4256 			}
4257 			edesc->qm_sg_bytes = qm_sg_bytes;
4258 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4259 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4260 		} else {
4261 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4262 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4263 		}
4264 
4265 		state->ctx_dma_len = ctx->ctx_len;
4266 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4267 						ctx->ctx_len, DMA_FROM_DEVICE);
4268 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4269 			dev_err(ctx->dev, "unable to map ctx\n");
4270 			state->ctx_dma = 0;
4271 			ret = -ENOMEM;
4272 			goto unmap_ctx;
4273 		}
4274 
4275 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4276 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4277 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4278 
4279 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4280 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4281 		req_ctx->cbk = ahash_done_ctx_dst;
4282 		req_ctx->ctx = &req->base;
4283 		req_ctx->edesc = edesc;
4284 
4285 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4286 		if (ret != -EINPROGRESS &&
4287 		    !(ret == -EBUSY && req->base.flags &
4288 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4289 			goto unmap_ctx;
4290 
4291 		state->update = ahash_update_ctx;
4292 		state->finup = ahash_finup_ctx;
4293 		state->final = ahash_final_ctx;
4294 	} else if (*next_buflen) {
4295 		state->update = ahash_update_no_ctx;
4296 		state->finup = ahash_finup_no_ctx;
4297 		state->final = ahash_final_no_ctx;
4298 		scatterwalk_map_and_copy(buf, req->src, 0,
4299 					 req->nbytes, 0);
4300 		*buflen = *next_buflen;
4301 
4302 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4303 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4304 				     *buflen, 1);
4305 	}
4306 
4307 	return ret;
4308 unmap_ctx:
4309 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4310 	qi_cache_free(edesc);
4311 	return ret;
4312 }
4313 
ahash_finup_first(struct ahash_request *req)4314 static int ahash_finup_first(struct ahash_request *req)
4315 {
4316 	return ahash_digest(req);
4317 }
4318 
ahash_init(struct ahash_request *req)4319 static int ahash_init(struct ahash_request *req)
4320 {
4321 	struct caam_hash_state *state = ahash_request_ctx(req);
4322 
4323 	state->update = ahash_update_first;
4324 	state->finup = ahash_finup_first;
4325 	state->final = ahash_final_no_ctx;
4326 
4327 	state->ctx_dma = 0;
4328 	state->ctx_dma_len = 0;
4329 	state->buf_dma = 0;
4330 	state->buflen = 0;
4331 	state->next_buflen = 0;
4332 
4333 	return 0;
4334 }
4335 
ahash_update(struct ahash_request *req)4336 static int ahash_update(struct ahash_request *req)
4337 {
4338 	struct caam_hash_state *state = ahash_request_ctx(req);
4339 
4340 	return state->update(req);
4341 }
4342 
ahash_finup(struct ahash_request *req)4343 static int ahash_finup(struct ahash_request *req)
4344 {
4345 	struct caam_hash_state *state = ahash_request_ctx(req);
4346 
4347 	return state->finup(req);
4348 }
4349 
ahash_final(struct ahash_request *req)4350 static int ahash_final(struct ahash_request *req)
4351 {
4352 	struct caam_hash_state *state = ahash_request_ctx(req);
4353 
4354 	return state->final(req);
4355 }
4356 
ahash_export(struct ahash_request *req, void *out)4357 static int ahash_export(struct ahash_request *req, void *out)
4358 {
4359 	struct caam_hash_state *state = ahash_request_ctx(req);
4360 	struct caam_export_state *export = out;
4361 	u8 *buf = state->buf;
4362 	int len = state->buflen;
4363 
4364 	memcpy(export->buf, buf, len);
4365 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4366 	export->buflen = len;
4367 	export->update = state->update;
4368 	export->final = state->final;
4369 	export->finup = state->finup;
4370 
4371 	return 0;
4372 }
4373 
ahash_import(struct ahash_request *req, const void *in)4374 static int ahash_import(struct ahash_request *req, const void *in)
4375 {
4376 	struct caam_hash_state *state = ahash_request_ctx(req);
4377 	const struct caam_export_state *export = in;
4378 
4379 	memset(state, 0, sizeof(*state));
4380 	memcpy(state->buf, export->buf, export->buflen);
4381 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4382 	state->buflen = export->buflen;
4383 	state->update = export->update;
4384 	state->final = export->final;
4385 	state->finup = export->finup;
4386 
4387 	return 0;
4388 }
4389 
4390 struct caam_hash_template {
4391 	char name[CRYPTO_MAX_ALG_NAME];
4392 	char driver_name[CRYPTO_MAX_ALG_NAME];
4393 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4394 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4395 	unsigned int blocksize;
4396 	struct ahash_alg template_ahash;
4397 	u32 alg_type;
4398 };
4399 
4400 /* ahash descriptors */
4401 static struct caam_hash_template driver_hash[] = {
4402 	{
4403 		.name = "sha1",
4404 		.driver_name = "sha1-caam-qi2",
4405 		.hmac_name = "hmac(sha1)",
4406 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4407 		.blocksize = SHA1_BLOCK_SIZE,
4408 		.template_ahash = {
4409 			.init = ahash_init,
4410 			.update = ahash_update,
4411 			.final = ahash_final,
4412 			.finup = ahash_finup,
4413 			.digest = ahash_digest,
4414 			.export = ahash_export,
4415 			.import = ahash_import,
4416 			.setkey = ahash_setkey,
4417 			.halg = {
4418 				.digestsize = SHA1_DIGEST_SIZE,
4419 				.statesize = sizeof(struct caam_export_state),
4420 			},
4421 		},
4422 		.alg_type = OP_ALG_ALGSEL_SHA1,
4423 	}, {
4424 		.name = "sha224",
4425 		.driver_name = "sha224-caam-qi2",
4426 		.hmac_name = "hmac(sha224)",
4427 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4428 		.blocksize = SHA224_BLOCK_SIZE,
4429 		.template_ahash = {
4430 			.init = ahash_init,
4431 			.update = ahash_update,
4432 			.final = ahash_final,
4433 			.finup = ahash_finup,
4434 			.digest = ahash_digest,
4435 			.export = ahash_export,
4436 			.import = ahash_import,
4437 			.setkey = ahash_setkey,
4438 			.halg = {
4439 				.digestsize = SHA224_DIGEST_SIZE,
4440 				.statesize = sizeof(struct caam_export_state),
4441 			},
4442 		},
4443 		.alg_type = OP_ALG_ALGSEL_SHA224,
4444 	}, {
4445 		.name = "sha256",
4446 		.driver_name = "sha256-caam-qi2",
4447 		.hmac_name = "hmac(sha256)",
4448 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4449 		.blocksize = SHA256_BLOCK_SIZE,
4450 		.template_ahash = {
4451 			.init = ahash_init,
4452 			.update = ahash_update,
4453 			.final = ahash_final,
4454 			.finup = ahash_finup,
4455 			.digest = ahash_digest,
4456 			.export = ahash_export,
4457 			.import = ahash_import,
4458 			.setkey = ahash_setkey,
4459 			.halg = {
4460 				.digestsize = SHA256_DIGEST_SIZE,
4461 				.statesize = sizeof(struct caam_export_state),
4462 			},
4463 		},
4464 		.alg_type = OP_ALG_ALGSEL_SHA256,
4465 	}, {
4466 		.name = "sha384",
4467 		.driver_name = "sha384-caam-qi2",
4468 		.hmac_name = "hmac(sha384)",
4469 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4470 		.blocksize = SHA384_BLOCK_SIZE,
4471 		.template_ahash = {
4472 			.init = ahash_init,
4473 			.update = ahash_update,
4474 			.final = ahash_final,
4475 			.finup = ahash_finup,
4476 			.digest = ahash_digest,
4477 			.export = ahash_export,
4478 			.import = ahash_import,
4479 			.setkey = ahash_setkey,
4480 			.halg = {
4481 				.digestsize = SHA384_DIGEST_SIZE,
4482 				.statesize = sizeof(struct caam_export_state),
4483 			},
4484 		},
4485 		.alg_type = OP_ALG_ALGSEL_SHA384,
4486 	}, {
4487 		.name = "sha512",
4488 		.driver_name = "sha512-caam-qi2",
4489 		.hmac_name = "hmac(sha512)",
4490 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4491 		.blocksize = SHA512_BLOCK_SIZE,
4492 		.template_ahash = {
4493 			.init = ahash_init,
4494 			.update = ahash_update,
4495 			.final = ahash_final,
4496 			.finup = ahash_finup,
4497 			.digest = ahash_digest,
4498 			.export = ahash_export,
4499 			.import = ahash_import,
4500 			.setkey = ahash_setkey,
4501 			.halg = {
4502 				.digestsize = SHA512_DIGEST_SIZE,
4503 				.statesize = sizeof(struct caam_export_state),
4504 			},
4505 		},
4506 		.alg_type = OP_ALG_ALGSEL_SHA512,
4507 	}, {
4508 		.name = "md5",
4509 		.driver_name = "md5-caam-qi2",
4510 		.hmac_name = "hmac(md5)",
4511 		.hmac_driver_name = "hmac-md5-caam-qi2",
4512 		.blocksize = MD5_BLOCK_WORDS * 4,
4513 		.template_ahash = {
4514 			.init = ahash_init,
4515 			.update = ahash_update,
4516 			.final = ahash_final,
4517 			.finup = ahash_finup,
4518 			.digest = ahash_digest,
4519 			.export = ahash_export,
4520 			.import = ahash_import,
4521 			.setkey = ahash_setkey,
4522 			.halg = {
4523 				.digestsize = MD5_DIGEST_SIZE,
4524 				.statesize = sizeof(struct caam_export_state),
4525 			},
4526 		},
4527 		.alg_type = OP_ALG_ALGSEL_MD5,
4528 	}
4529 };
4530 
4531 struct caam_hash_alg {
4532 	struct list_head entry;
4533 	struct device *dev;
4534 	int alg_type;
4535 	struct ahash_alg ahash_alg;
4536 };
4537 
caam_hash_cra_init(struct crypto_tfm *tfm)4538 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4539 {
4540 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4541 	struct crypto_alg *base = tfm->__crt_alg;
4542 	struct hash_alg_common *halg =
4543 		 container_of(base, struct hash_alg_common, base);
4544 	struct ahash_alg *alg =
4545 		 container_of(halg, struct ahash_alg, halg);
4546 	struct caam_hash_alg *caam_hash =
4547 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4548 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4549 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4550 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4551 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4552 					 HASH_MSG_LEN + 32,
4553 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4554 					 HASH_MSG_LEN + 64,
4555 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4556 	dma_addr_t dma_addr;
4557 	int i;
4558 
4559 	ctx->dev = caam_hash->dev;
4560 
4561 	if (alg->setkey) {
4562 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4563 							  ARRAY_SIZE(ctx->key),
4564 							  DMA_TO_DEVICE,
4565 							  DMA_ATTR_SKIP_CPU_SYNC);
4566 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4567 			dev_err(ctx->dev, "unable to map key\n");
4568 			return -ENOMEM;
4569 		}
4570 	}
4571 
4572 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4573 					DMA_BIDIRECTIONAL,
4574 					DMA_ATTR_SKIP_CPU_SYNC);
4575 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4576 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4577 		if (ctx->adata.key_dma)
4578 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4579 					       ARRAY_SIZE(ctx->key),
4580 					       DMA_TO_DEVICE,
4581 					       DMA_ATTR_SKIP_CPU_SYNC);
4582 		return -ENOMEM;
4583 	}
4584 
4585 	for (i = 0; i < HASH_NUM_OP; i++)
4586 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4587 
4588 	/* copy descriptor header template value */
4589 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4590 
4591 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4592 				   OP_ALG_ALGSEL_SUBMASK) >>
4593 				  OP_ALG_ALGSEL_SHIFT];
4594 
4595 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4596 				 sizeof(struct caam_hash_state));
4597 
4598 	/*
4599 	 * For keyed hash algorithms shared descriptors
4600 	 * will be created later in setkey() callback
4601 	 */
4602 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4603 }
4604 
caam_hash_cra_exit(struct crypto_tfm *tfm)4605 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4606 {
4607 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4608 
4609 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4610 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4611 	if (ctx->adata.key_dma)
4612 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4613 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4614 				       DMA_ATTR_SKIP_CPU_SYNC);
4615 }
4616 
caam_hash_alloc(struct device *dev, struct caam_hash_template *template, bool keyed)4617 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4618 	struct caam_hash_template *template, bool keyed)
4619 {
4620 	struct caam_hash_alg *t_alg;
4621 	struct ahash_alg *halg;
4622 	struct crypto_alg *alg;
4623 
4624 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4625 	if (!t_alg)
4626 		return ERR_PTR(-ENOMEM);
4627 
4628 	t_alg->ahash_alg = template->template_ahash;
4629 	halg = &t_alg->ahash_alg;
4630 	alg = &halg->halg.base;
4631 
4632 	if (keyed) {
4633 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4634 			 template->hmac_name);
4635 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4636 			 template->hmac_driver_name);
4637 	} else {
4638 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4639 			 template->name);
4640 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4641 			 template->driver_name);
4642 		t_alg->ahash_alg.setkey = NULL;
4643 	}
4644 	alg->cra_module = THIS_MODULE;
4645 	alg->cra_init = caam_hash_cra_init;
4646 	alg->cra_exit = caam_hash_cra_exit;
4647 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4648 	alg->cra_priority = CAAM_CRA_PRIORITY;
4649 	alg->cra_blocksize = template->blocksize;
4650 	alg->cra_alignmask = 0;
4651 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4652 
4653 	t_alg->alg_type = template->alg_type;
4654 	t_alg->dev = dev;
4655 
4656 	return t_alg;
4657 }
4658 
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)4659 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4660 {
4661 	struct dpaa2_caam_priv_per_cpu *ppriv;
4662 
4663 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4664 	napi_schedule_irqoff(&ppriv->napi);
4665 }
4666 
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)4667 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4668 {
4669 	struct device *dev = priv->dev;
4670 	struct dpaa2_io_notification_ctx *nctx;
4671 	struct dpaa2_caam_priv_per_cpu *ppriv;
4672 	int err, i = 0, cpu;
4673 
4674 	for_each_online_cpu(cpu) {
4675 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4676 		ppriv->priv = priv;
4677 		nctx = &ppriv->nctx;
4678 		nctx->is_cdan = 0;
4679 		nctx->id = ppriv->rsp_fqid;
4680 		nctx->desired_cpu = cpu;
4681 		nctx->cb = dpaa2_caam_fqdan_cb;
4682 
4683 		/* Register notification callbacks */
4684 		ppriv->dpio = dpaa2_io_service_select(cpu);
4685 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4686 		if (unlikely(err)) {
4687 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4688 			nctx->cb = NULL;
4689 			/*
4690 			 * If no affine DPIO for this core, there's probably
4691 			 * none available for next cores either. Signal we want
4692 			 * to retry later, in case the DPIO devices weren't
4693 			 * probed yet.
4694 			 */
4695 			err = -EPROBE_DEFER;
4696 			goto err;
4697 		}
4698 
4699 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4700 						     dev);
4701 		if (unlikely(!ppriv->store)) {
4702 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4703 			err = -ENOMEM;
4704 			goto err;
4705 		}
4706 
4707 		if (++i == priv->num_pairs)
4708 			break;
4709 	}
4710 
4711 	return 0;
4712 
4713 err:
4714 	for_each_online_cpu(cpu) {
4715 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4716 		if (!ppriv->nctx.cb)
4717 			break;
4718 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4719 	}
4720 
4721 	for_each_online_cpu(cpu) {
4722 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4723 		if (!ppriv->store)
4724 			break;
4725 		dpaa2_io_store_destroy(ppriv->store);
4726 	}
4727 
4728 	return err;
4729 }
4730 
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)4731 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4732 {
4733 	struct dpaa2_caam_priv_per_cpu *ppriv;
4734 	int i = 0, cpu;
4735 
4736 	for_each_online_cpu(cpu) {
4737 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4738 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4739 					    priv->dev);
4740 		dpaa2_io_store_destroy(ppriv->store);
4741 
4742 		if (++i == priv->num_pairs)
4743 			return;
4744 	}
4745 }
4746 
dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)4747 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4748 {
4749 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4750 	struct device *dev = priv->dev;
4751 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4752 	struct dpaa2_caam_priv_per_cpu *ppriv;
4753 	int err = 0, i = 0, cpu;
4754 
4755 	/* Configure Rx queues */
4756 	for_each_online_cpu(cpu) {
4757 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4758 
4759 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4760 				       DPSECI_QUEUE_OPT_USER_CTX;
4761 		rx_queue_cfg.order_preservation_en = 0;
4762 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4763 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4764 		/*
4765 		 * Rx priority (WQ) doesn't really matter, since we use
4766 		 * pull mode, i.e. volatile dequeues from specific FQs
4767 		 */
4768 		rx_queue_cfg.dest_cfg.priority = 0;
4769 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4770 
4771 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4772 					  &rx_queue_cfg);
4773 		if (err) {
4774 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4775 				err);
4776 			return err;
4777 		}
4778 
4779 		if (++i == priv->num_pairs)
4780 			break;
4781 	}
4782 
4783 	return err;
4784 }
4785 
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)4786 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4787 {
4788 	struct device *dev = priv->dev;
4789 
4790 	if (!priv->cscn_mem)
4791 		return;
4792 
4793 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4794 	kfree(priv->cscn_mem);
4795 }
4796 
dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)4797 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4798 {
4799 	struct device *dev = priv->dev;
4800 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4801 	int err;
4802 
4803 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4804 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4805 		if (err)
4806 			dev_err(dev, "dpseci_reset() failed\n");
4807 	}
4808 
4809 	dpaa2_dpseci_congestion_free(priv);
4810 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4811 }
4812 
dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, const struct dpaa2_fd *fd)4813 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4814 				  const struct dpaa2_fd *fd)
4815 {
4816 	struct caam_request *req;
4817 	u32 fd_err;
4818 
4819 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4820 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4821 		return;
4822 	}
4823 
4824 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4825 	if (unlikely(fd_err))
4826 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4827 
4828 	/*
4829 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4830 	 * in FD[ERR] or FD[FRC].
4831 	 */
4832 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4833 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4834 			 DMA_BIDIRECTIONAL);
4835 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4836 }
4837 
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)4838 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4839 {
4840 	int err;
4841 
4842 	/* Retry while portal is busy */
4843 	do {
4844 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4845 					       ppriv->store);
4846 	} while (err == -EBUSY);
4847 
4848 	if (unlikely(err))
4849 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4850 
4851 	return err;
4852 }
4853 
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)4854 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4855 {
4856 	struct dpaa2_dq *dq;
4857 	int cleaned = 0, is_last;
4858 
4859 	do {
4860 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4861 		if (unlikely(!dq)) {
4862 			if (unlikely(!is_last)) {
4863 				dev_dbg(ppriv->priv->dev,
4864 					"FQ %d returned no valid frames\n",
4865 					ppriv->rsp_fqid);
4866 				/*
4867 				 * MUST retry until we get some sort of
4868 				 * valid response token (be it "empty dequeue"
4869 				 * or a valid frame).
4870 				 */
4871 				continue;
4872 			}
4873 			break;
4874 		}
4875 
4876 		/* Process FD */
4877 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4878 		cleaned++;
4879 	} while (!is_last);
4880 
4881 	return cleaned;
4882 }
4883 
dpaa2_dpseci_poll(struct napi_struct *napi, int budget)4884 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4885 {
4886 	struct dpaa2_caam_priv_per_cpu *ppriv;
4887 	struct dpaa2_caam_priv *priv;
4888 	int err, cleaned = 0, store_cleaned;
4889 
4890 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4891 	priv = ppriv->priv;
4892 
4893 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4894 		return 0;
4895 
4896 	do {
4897 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4898 		cleaned += store_cleaned;
4899 
4900 		if (store_cleaned == 0 ||
4901 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4902 			break;
4903 
4904 		/* Try to dequeue some more */
4905 		err = dpaa2_caam_pull_fq(ppriv);
4906 		if (unlikely(err))
4907 			break;
4908 	} while (1);
4909 
4910 	if (cleaned < budget) {
4911 		napi_complete_done(napi, cleaned);
4912 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4913 		if (unlikely(err))
4914 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4915 				err);
4916 	}
4917 
4918 	return cleaned;
4919 }
4920 
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, u16 token)4921 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4922 					 u16 token)
4923 {
4924 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4925 	struct device *dev = priv->dev;
4926 	int err;
4927 
4928 	/*
4929 	 * Congestion group feature supported starting with DPSECI API v5.1
4930 	 * and only when object has been created with this capability.
4931 	 */
4932 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4933 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4934 		return 0;
4935 
4936 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4937 				 GFP_KERNEL | GFP_DMA);
4938 	if (!priv->cscn_mem)
4939 		return -ENOMEM;
4940 
4941 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4942 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4943 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4944 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4945 		dev_err(dev, "Error mapping CSCN memory area\n");
4946 		err = -ENOMEM;
4947 		goto err_dma_map;
4948 	}
4949 
4950 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4951 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4952 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4953 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4954 	cong_notif_cfg.message_iova = priv->cscn_dma;
4955 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4956 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4957 					DPSECI_CGN_MODE_COHERENT_WRITE;
4958 
4959 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4960 						 &cong_notif_cfg);
4961 	if (err) {
4962 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4963 		goto err_set_cong;
4964 	}
4965 
4966 	return 0;
4967 
4968 err_set_cong:
4969 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4970 err_dma_map:
4971 	kfree(priv->cscn_mem);
4972 
4973 	return err;
4974 }
4975 
dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)4976 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4977 {
4978 	struct device *dev = &ls_dev->dev;
4979 	struct dpaa2_caam_priv *priv;
4980 	struct dpaa2_caam_priv_per_cpu *ppriv;
4981 	int err, cpu;
4982 	u8 i;
4983 
4984 	priv = dev_get_drvdata(dev);
4985 
4986 	priv->dev = dev;
4987 	priv->dpsec_id = ls_dev->obj_desc.id;
4988 
4989 	/* Get a handle for the DPSECI this interface is associate with */
4990 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4991 	if (err) {
4992 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4993 		goto err_open;
4994 	}
4995 
4996 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4997 				     &priv->minor_ver);
4998 	if (err) {
4999 		dev_err(dev, "dpseci_get_api_version() failed\n");
5000 		goto err_get_vers;
5001 	}
5002 
5003 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5004 
5005 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5006 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5007 		if (err) {
5008 			dev_err(dev, "dpseci_reset() failed\n");
5009 			goto err_get_vers;
5010 		}
5011 	}
5012 
5013 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5014 				    &priv->dpseci_attr);
5015 	if (err) {
5016 		dev_err(dev, "dpseci_get_attributes() failed\n");
5017 		goto err_get_vers;
5018 	}
5019 
5020 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5021 				  &priv->sec_attr);
5022 	if (err) {
5023 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
5024 		goto err_get_vers;
5025 	}
5026 
5027 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5028 	if (err) {
5029 		dev_err(dev, "setup_congestion() failed\n");
5030 		goto err_get_vers;
5031 	}
5032 
5033 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5034 			      priv->dpseci_attr.num_tx_queues);
5035 	if (priv->num_pairs > num_online_cpus()) {
5036 		dev_warn(dev, "%d queues won't be used\n",
5037 			 priv->num_pairs - num_online_cpus());
5038 		priv->num_pairs = num_online_cpus();
5039 	}
5040 
5041 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5042 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5043 					  &priv->rx_queue_attr[i]);
5044 		if (err) {
5045 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
5046 			goto err_get_rx_queue;
5047 		}
5048 	}
5049 
5050 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5051 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5052 					  &priv->tx_queue_attr[i]);
5053 		if (err) {
5054 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
5055 			goto err_get_rx_queue;
5056 		}
5057 	}
5058 
5059 	i = 0;
5060 	for_each_online_cpu(cpu) {
5061 		u8 j;
5062 
5063 		j = i % priv->num_pairs;
5064 
5065 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
5066 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5067 
5068 		/*
5069 		 * Allow all cores to enqueue, while only some of them
5070 		 * will take part in dequeuing.
5071 		 */
5072 		if (++i > priv->num_pairs)
5073 			continue;
5074 
5075 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5076 		ppriv->prio = j;
5077 
5078 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5079 			priv->rx_queue_attr[j].fqid,
5080 			priv->tx_queue_attr[j].fqid);
5081 
5082 		ppriv->net_dev.dev = *dev;
5083 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5084 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5085 			       DPAA2_CAAM_NAPI_WEIGHT);
5086 	}
5087 
5088 	return 0;
5089 
5090 err_get_rx_queue:
5091 	dpaa2_dpseci_congestion_free(priv);
5092 err_get_vers:
5093 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5094 err_open:
5095 	return err;
5096 }
5097 
dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)5098 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5099 {
5100 	struct device *dev = priv->dev;
5101 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5102 	struct dpaa2_caam_priv_per_cpu *ppriv;
5103 	int i;
5104 
5105 	for (i = 0; i < priv->num_pairs; i++) {
5106 		ppriv = per_cpu_ptr(priv->ppriv, i);
5107 		napi_enable(&ppriv->napi);
5108 	}
5109 
5110 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5111 }
5112 
dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)5113 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5114 {
5115 	struct device *dev = priv->dev;
5116 	struct dpaa2_caam_priv_per_cpu *ppriv;
5117 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5118 	int i, err = 0, enabled;
5119 
5120 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5121 	if (err) {
5122 		dev_err(dev, "dpseci_disable() failed\n");
5123 		return err;
5124 	}
5125 
5126 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5127 	if (err) {
5128 		dev_err(dev, "dpseci_is_enabled() failed\n");
5129 		return err;
5130 	}
5131 
5132 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5133 
5134 	for (i = 0; i < priv->num_pairs; i++) {
5135 		ppriv = per_cpu_ptr(priv->ppriv, i);
5136 		napi_disable(&ppriv->napi);
5137 		netif_napi_del(&ppriv->napi);
5138 	}
5139 
5140 	return 0;
5141 }
5142 
5143 static struct list_head hash_list;
5144 
dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)5145 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5146 {
5147 	struct device *dev;
5148 	struct dpaa2_caam_priv *priv;
5149 	int i, err = 0;
5150 	bool registered = false;
5151 
5152 	/*
5153 	 * There is no way to get CAAM endianness - there is no direct register
5154 	 * space access and MC f/w does not provide this attribute.
5155 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5156 	 * property.
5157 	 */
5158 	caam_little_end = true;
5159 
5160 	caam_imx = false;
5161 
5162 	dev = &dpseci_dev->dev;
5163 
5164 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5165 	if (!priv)
5166 		return -ENOMEM;
5167 
5168 	dev_set_drvdata(dev, priv);
5169 
5170 	priv->domain = iommu_get_domain_for_dev(dev);
5171 
5172 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5173 				     0, SLAB_CACHE_DMA, NULL);
5174 	if (!qi_cache) {
5175 		dev_err(dev, "Can't allocate SEC cache\n");
5176 		return -ENOMEM;
5177 	}
5178 
5179 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5180 	if (err) {
5181 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5182 		goto err_dma_mask;
5183 	}
5184 
5185 	/* Obtain a MC portal */
5186 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5187 	if (err) {
5188 		if (err == -ENXIO)
5189 			err = -EPROBE_DEFER;
5190 		else
5191 			dev_err(dev, "MC portal allocation failed\n");
5192 
5193 		goto err_dma_mask;
5194 	}
5195 
5196 	priv->ppriv = alloc_percpu(*priv->ppriv);
5197 	if (!priv->ppriv) {
5198 		dev_err(dev, "alloc_percpu() failed\n");
5199 		err = -ENOMEM;
5200 		goto err_alloc_ppriv;
5201 	}
5202 
5203 	/* DPSECI initialization */
5204 	err = dpaa2_dpseci_setup(dpseci_dev);
5205 	if (err) {
5206 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5207 		goto err_dpseci_setup;
5208 	}
5209 
5210 	/* DPIO */
5211 	err = dpaa2_dpseci_dpio_setup(priv);
5212 	if (err) {
5213 		dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5214 		goto err_dpio_setup;
5215 	}
5216 
5217 	/* DPSECI binding to DPIO */
5218 	err = dpaa2_dpseci_bind(priv);
5219 	if (err) {
5220 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5221 		goto err_bind;
5222 	}
5223 
5224 	/* DPSECI enable */
5225 	err = dpaa2_dpseci_enable(priv);
5226 	if (err) {
5227 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5228 		goto err_bind;
5229 	}
5230 
5231 	dpaa2_dpseci_debugfs_init(priv);
5232 
5233 	/* register crypto algorithms the device supports */
5234 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5235 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5236 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5237 
5238 		/* Skip DES algorithms if not supported by device */
5239 		if (!priv->sec_attr.des_acc_num &&
5240 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5241 		     alg_sel == OP_ALG_ALGSEL_DES))
5242 			continue;
5243 
5244 		/* Skip AES algorithms if not supported by device */
5245 		if (!priv->sec_attr.aes_acc_num &&
5246 		    alg_sel == OP_ALG_ALGSEL_AES)
5247 			continue;
5248 
5249 		/* Skip CHACHA20 algorithms if not supported by device */
5250 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5251 		    !priv->sec_attr.ccha_acc_num)
5252 			continue;
5253 
5254 		t_alg->caam.dev = dev;
5255 		caam_skcipher_alg_init(t_alg);
5256 
5257 		err = crypto_register_skcipher(&t_alg->skcipher);
5258 		if (err) {
5259 			dev_warn(dev, "%s alg registration failed: %d\n",
5260 				 t_alg->skcipher.base.cra_driver_name, err);
5261 			continue;
5262 		}
5263 
5264 		t_alg->registered = true;
5265 		registered = true;
5266 	}
5267 
5268 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5269 		struct caam_aead_alg *t_alg = driver_aeads + i;
5270 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5271 				 OP_ALG_ALGSEL_MASK;
5272 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5273 				 OP_ALG_ALGSEL_MASK;
5274 
5275 		/* Skip DES algorithms if not supported by device */
5276 		if (!priv->sec_attr.des_acc_num &&
5277 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5278 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5279 			continue;
5280 
5281 		/* Skip AES algorithms if not supported by device */
5282 		if (!priv->sec_attr.aes_acc_num &&
5283 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5284 			continue;
5285 
5286 		/* Skip CHACHA20 algorithms if not supported by device */
5287 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5288 		    !priv->sec_attr.ccha_acc_num)
5289 			continue;
5290 
5291 		/* Skip POLY1305 algorithms if not supported by device */
5292 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5293 		    !priv->sec_attr.ptha_acc_num)
5294 			continue;
5295 
5296 		/*
5297 		 * Skip algorithms requiring message digests
5298 		 * if MD not supported by device.
5299 		 */
5300 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5301 		    !priv->sec_attr.md_acc_num)
5302 			continue;
5303 
5304 		t_alg->caam.dev = dev;
5305 		caam_aead_alg_init(t_alg);
5306 
5307 		err = crypto_register_aead(&t_alg->aead);
5308 		if (err) {
5309 			dev_warn(dev, "%s alg registration failed: %d\n",
5310 				 t_alg->aead.base.cra_driver_name, err);
5311 			continue;
5312 		}
5313 
5314 		t_alg->registered = true;
5315 		registered = true;
5316 	}
5317 	if (registered)
5318 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5319 
5320 	/* register hash algorithms the device supports */
5321 	INIT_LIST_HEAD(&hash_list);
5322 
5323 	/*
5324 	 * Skip registration of any hashing algorithms if MD block
5325 	 * is not present.
5326 	 */
5327 	if (!priv->sec_attr.md_acc_num)
5328 		return 0;
5329 
5330 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5331 		struct caam_hash_alg *t_alg;
5332 		struct caam_hash_template *alg = driver_hash + i;
5333 
5334 		/* register hmac version */
5335 		t_alg = caam_hash_alloc(dev, alg, true);
5336 		if (IS_ERR(t_alg)) {
5337 			err = PTR_ERR(t_alg);
5338 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5339 				 alg->hmac_driver_name, err);
5340 			continue;
5341 		}
5342 
5343 		err = crypto_register_ahash(&t_alg->ahash_alg);
5344 		if (err) {
5345 			dev_warn(dev, "%s alg registration failed: %d\n",
5346 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5347 				 err);
5348 			kfree(t_alg);
5349 		} else {
5350 			list_add_tail(&t_alg->entry, &hash_list);
5351 		}
5352 
5353 		/* register unkeyed version */
5354 		t_alg = caam_hash_alloc(dev, alg, false);
5355 		if (IS_ERR(t_alg)) {
5356 			err = PTR_ERR(t_alg);
5357 			dev_warn(dev, "%s alg allocation failed: %d\n",
5358 				 alg->driver_name, err);
5359 			continue;
5360 		}
5361 
5362 		err = crypto_register_ahash(&t_alg->ahash_alg);
5363 		if (err) {
5364 			dev_warn(dev, "%s alg registration failed: %d\n",
5365 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5366 				 err);
5367 			kfree(t_alg);
5368 		} else {
5369 			list_add_tail(&t_alg->entry, &hash_list);
5370 		}
5371 	}
5372 	if (!list_empty(&hash_list))
5373 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5374 
5375 	return err;
5376 
5377 err_bind:
5378 	dpaa2_dpseci_dpio_free(priv);
5379 err_dpio_setup:
5380 	dpaa2_dpseci_free(priv);
5381 err_dpseci_setup:
5382 	free_percpu(priv->ppriv);
5383 err_alloc_ppriv:
5384 	fsl_mc_portal_free(priv->mc_io);
5385 err_dma_mask:
5386 	kmem_cache_destroy(qi_cache);
5387 
5388 	return err;
5389 }
5390 
dpaa2_caam_remove(struct fsl_mc_device *ls_dev)5391 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5392 {
5393 	struct device *dev;
5394 	struct dpaa2_caam_priv *priv;
5395 	int i;
5396 
5397 	dev = &ls_dev->dev;
5398 	priv = dev_get_drvdata(dev);
5399 
5400 	dpaa2_dpseci_debugfs_exit(priv);
5401 
5402 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5403 		struct caam_aead_alg *t_alg = driver_aeads + i;
5404 
5405 		if (t_alg->registered)
5406 			crypto_unregister_aead(&t_alg->aead);
5407 	}
5408 
5409 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5410 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5411 
5412 		if (t_alg->registered)
5413 			crypto_unregister_skcipher(&t_alg->skcipher);
5414 	}
5415 
5416 	if (hash_list.next) {
5417 		struct caam_hash_alg *t_hash_alg, *p;
5418 
5419 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5420 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5421 			list_del(&t_hash_alg->entry);
5422 			kfree(t_hash_alg);
5423 		}
5424 	}
5425 
5426 	dpaa2_dpseci_disable(priv);
5427 	dpaa2_dpseci_dpio_free(priv);
5428 	dpaa2_dpseci_free(priv);
5429 	free_percpu(priv->ppriv);
5430 	fsl_mc_portal_free(priv->mc_io);
5431 	kmem_cache_destroy(qi_cache);
5432 
5433 	return 0;
5434 }
5435 
dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)5436 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5437 {
5438 	struct dpaa2_fd fd;
5439 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5440 	struct dpaa2_caam_priv_per_cpu *ppriv;
5441 	int err = 0, i;
5442 
5443 	if (IS_ERR(req))
5444 		return PTR_ERR(req);
5445 
5446 	if (priv->cscn_mem) {
5447 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5448 					DPAA2_CSCN_SIZE,
5449 					DMA_FROM_DEVICE);
5450 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5451 			dev_dbg_ratelimited(dev, "Dropping request\n");
5452 			return -EBUSY;
5453 		}
5454 	}
5455 
5456 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5457 
5458 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5459 					 DMA_BIDIRECTIONAL);
5460 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5461 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5462 		goto err_out;
5463 	}
5464 
5465 	memset(&fd, 0, sizeof(fd));
5466 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5467 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5468 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5469 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5470 
5471 	ppriv = raw_cpu_ptr(priv->ppriv);
5472 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5473 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5474 						  &fd);
5475 		if (err != -EBUSY)
5476 			break;
5477 
5478 		cpu_relax();
5479 	}
5480 
5481 	if (unlikely(err)) {
5482 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5483 		goto err_out;
5484 	}
5485 
5486 	return -EINPROGRESS;
5487 
5488 err_out:
5489 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5490 			 DMA_BIDIRECTIONAL);
5491 	return -EIO;
5492 }
5493 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5494 
5495 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5496 	{
5497 		.vendor = FSL_MC_VENDOR_FREESCALE,
5498 		.obj_type = "dpseci",
5499 	},
5500 	{ .vendor = 0x0 }
5501 };
5502 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5503 
5504 static struct fsl_mc_driver dpaa2_caam_driver = {
5505 	.driver = {
5506 		.name		= KBUILD_MODNAME,
5507 		.owner		= THIS_MODULE,
5508 	},
5509 	.probe		= dpaa2_caam_probe,
5510 	.remove		= dpaa2_caam_remove,
5511 	.match_id_table = dpaa2_caam_match_id_table
5512 };
5513 
5514 MODULE_LICENSE("Dual BSD/GPL");
5515 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5516 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5517 
5518 module_fsl_mc_driver(dpaa2_caam_driver);
5519