1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12#include <crypto/aes.h>
13#include <crypto/internal/des.h>
14#include <linux/device.h>
15#include <linux/dma-mapping.h>
16
17#include "cesa.h"
18
19struct mv_cesa_des_ctx {
20	struct mv_cesa_ctx base;
21	u8 key[DES_KEY_SIZE];
22};
23
24struct mv_cesa_des3_ctx {
25	struct mv_cesa_ctx base;
26	u8 key[DES3_EDE_KEY_SIZE];
27};
28
29struct mv_cesa_aes_ctx {
30	struct mv_cesa_ctx base;
31	struct crypto_aes_ctx aes;
32};
33
34struct mv_cesa_skcipher_dma_iter {
35	struct mv_cesa_dma_iter base;
36	struct mv_cesa_sg_dma_iter src;
37	struct mv_cesa_sg_dma_iter dst;
38};
39
40static inline void
41mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
42			       struct skcipher_request *req)
43{
44	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
45	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
46	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
47}
48
49static inline bool
50mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
51{
52	iter->src.op_offset = 0;
53	iter->dst.op_offset = 0;
54
55	return mv_cesa_req_dma_iter_next_op(&iter->base);
56}
57
58static inline void
59mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
60{
61	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
62
63	if (req->dst != req->src) {
64		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
65			     DMA_FROM_DEVICE);
66		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
67			     DMA_TO_DEVICE);
68	} else {
69		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70			     DMA_BIDIRECTIONAL);
71	}
72	mv_cesa_dma_cleanup(&creq->base);
73}
74
75static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
76{
77	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
78
79	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
80		mv_cesa_skcipher_dma_cleanup(req);
81}
82
83static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
84{
85	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
86	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
87	struct mv_cesa_engine *engine = creq->base.engine;
88	size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
89			    CESA_SA_SRAM_PAYLOAD_SIZE);
90
91	mv_cesa_adjust_op(engine, &sreq->op);
92	memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
93
94	len = sg_pcopy_to_buffer(req->src, creq->src_nents,
95				 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
96				 len, sreq->offset);
97
98	sreq->size = len;
99	mv_cesa_set_crypt_op_len(&sreq->op, len);
100
101	/* FIXME: only update enc_len field */
102	if (!sreq->skip_ctx) {
103		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
104		sreq->skip_ctx = true;
105	} else {
106		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
107	}
108
109	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
110	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
111	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
112		CESA_SA_CMD_EN_CESA_SA_ACCL0);
113	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
114}
115
116static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
117					u32 status)
118{
119	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
120	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
121	struct mv_cesa_engine *engine = creq->base.engine;
122	size_t len;
123
124	len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
125				   engine->sram + CESA_SA_DATA_SRAM_OFFSET,
126				   sreq->size, sreq->offset);
127
128	sreq->offset += len;
129	if (sreq->offset < req->cryptlen)
130		return -EINPROGRESS;
131
132	return 0;
133}
134
135static int mv_cesa_skcipher_process(struct crypto_async_request *req,
136				    u32 status)
137{
138	struct skcipher_request *skreq = skcipher_request_cast(req);
139	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
140	struct mv_cesa_req *basereq = &creq->base;
141
142	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
143		return mv_cesa_skcipher_std_process(skreq, status);
144
145	return mv_cesa_dma_process(basereq, status);
146}
147
148static void mv_cesa_skcipher_step(struct crypto_async_request *req)
149{
150	struct skcipher_request *skreq = skcipher_request_cast(req);
151	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
152
153	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
154		mv_cesa_dma_step(&creq->base);
155	else
156		mv_cesa_skcipher_std_step(skreq);
157}
158
159static inline void
160mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
161{
162	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
163	struct mv_cesa_req *basereq = &creq->base;
164
165	mv_cesa_dma_prepare(basereq, basereq->engine);
166}
167
168static inline void
169mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
170{
171	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
172	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
173
174	sreq->size = 0;
175	sreq->offset = 0;
176}
177
178static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
179					    struct mv_cesa_engine *engine)
180{
181	struct skcipher_request *skreq = skcipher_request_cast(req);
182	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
183
184	creq->base.engine = engine;
185
186	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187		mv_cesa_skcipher_dma_prepare(skreq);
188	else
189		mv_cesa_skcipher_std_prepare(skreq);
190}
191
192static inline void
193mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
194{
195	struct skcipher_request *skreq = skcipher_request_cast(req);
196
197	mv_cesa_skcipher_cleanup(skreq);
198}
199
200static void
201mv_cesa_skcipher_complete(struct crypto_async_request *req)
202{
203	struct skcipher_request *skreq = skcipher_request_cast(req);
204	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
205	struct mv_cesa_engine *engine = creq->base.engine;
206	unsigned int ivsize;
207
208	atomic_sub(skreq->cryptlen, &engine->load);
209	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
210
211	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212		struct mv_cesa_req *basereq;
213
214		basereq = &creq->base;
215		memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
216		       ivsize);
217	} else {
218		memcpy_fromio(skreq->iv,
219			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
220			      ivsize);
221	}
222}
223
224static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
225	.step = mv_cesa_skcipher_step,
226	.process = mv_cesa_skcipher_process,
227	.cleanup = mv_cesa_skcipher_req_cleanup,
228	.complete = mv_cesa_skcipher_complete,
229};
230
231static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
232{
233	void *ctx = crypto_tfm_ctx(tfm);
234
235	memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
236}
237
238static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
239{
240	struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
241
242	ctx->ops = &mv_cesa_skcipher_req_ops;
243
244	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
245				    sizeof(struct mv_cesa_skcipher_req));
246
247	return 0;
248}
249
250static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
251			      unsigned int len)
252{
253	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
254	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
255	int remaining;
256	int offset;
257	int ret;
258	int i;
259
260	ret = aes_expandkey(&ctx->aes, key, len);
261	if (ret)
262		return ret;
263
264	remaining = (ctx->aes.key_length - 16) / 4;
265	offset = ctx->aes.key_length + 24 - remaining;
266	for (i = 0; i < remaining; i++)
267		ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
268
269	return 0;
270}
271
272static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
273			      unsigned int len)
274{
275	struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
276	int err;
277
278	err = verify_skcipher_des_key(cipher, key);
279	if (err)
280		return err;
281
282	memcpy(ctx->key, key, DES_KEY_SIZE);
283
284	return 0;
285}
286
287static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
288				   const u8 *key, unsigned int len)
289{
290	struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
291	int err;
292
293	err = verify_skcipher_des3_key(cipher, key);
294	if (err)
295		return err;
296
297	memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
298
299	return 0;
300}
301
302static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
303					 const struct mv_cesa_op_ctx *op_templ)
304{
305	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
306	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
307		      GFP_KERNEL : GFP_ATOMIC;
308	struct mv_cesa_req *basereq = &creq->base;
309	struct mv_cesa_skcipher_dma_iter iter;
310	bool skip_ctx = false;
311	int ret;
312
313	basereq->chain.first = NULL;
314	basereq->chain.last = NULL;
315
316	if (req->src != req->dst) {
317		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
318				 DMA_TO_DEVICE);
319		if (!ret)
320			return -ENOMEM;
321
322		ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
323				 DMA_FROM_DEVICE);
324		if (!ret) {
325			ret = -ENOMEM;
326			goto err_unmap_src;
327		}
328	} else {
329		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
330				 DMA_BIDIRECTIONAL);
331		if (!ret)
332			return -ENOMEM;
333	}
334
335	mv_cesa_tdma_desc_iter_init(&basereq->chain);
336	mv_cesa_skcipher_req_iter_init(&iter, req);
337
338	do {
339		struct mv_cesa_op_ctx *op;
340
341		op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
342					flags);
343		if (IS_ERR(op)) {
344			ret = PTR_ERR(op);
345			goto err_free_tdma;
346		}
347		skip_ctx = true;
348
349		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
350
351		/* Add input transfers */
352		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
353						   &iter.src, flags);
354		if (ret)
355			goto err_free_tdma;
356
357		/* Add dummy desc to launch the crypto operation */
358		ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
359		if (ret)
360			goto err_free_tdma;
361
362		/* Add output transfers */
363		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
364						   &iter.dst, flags);
365		if (ret)
366			goto err_free_tdma;
367
368	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
369
370	/* Add output data for IV */
371	ret = mv_cesa_dma_add_result_op(&basereq->chain,
372					CESA_SA_CFG_SRAM_OFFSET,
373					CESA_SA_DATA_SRAM_OFFSET,
374					CESA_TDMA_SRC_IN_SRAM, flags);
375
376	if (ret)
377		goto err_free_tdma;
378
379	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
380
381	return 0;
382
383err_free_tdma:
384	mv_cesa_dma_cleanup(basereq);
385	if (req->dst != req->src)
386		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
387			     DMA_FROM_DEVICE);
388
389err_unmap_src:
390	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
391		     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
392
393	return ret;
394}
395
396static inline int
397mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
398			      const struct mv_cesa_op_ctx *op_templ)
399{
400	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
401	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
402	struct mv_cesa_req *basereq = &creq->base;
403
404	sreq->op = *op_templ;
405	sreq->skip_ctx = false;
406	basereq->chain.first = NULL;
407	basereq->chain.last = NULL;
408
409	return 0;
410}
411
412static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
413				     struct mv_cesa_op_ctx *tmpl)
414{
415	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
416	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
417	unsigned int blksize = crypto_skcipher_blocksize(tfm);
418	int ret;
419
420	if (!IS_ALIGNED(req->cryptlen, blksize))
421		return -EINVAL;
422
423	creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
424	if (creq->src_nents < 0) {
425		dev_err(cesa_dev->dev, "Invalid number of src SG");
426		return creq->src_nents;
427	}
428	creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
429	if (creq->dst_nents < 0) {
430		dev_err(cesa_dev->dev, "Invalid number of dst SG");
431		return creq->dst_nents;
432	}
433
434	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
435			      CESA_SA_DESC_CFG_OP_MSK);
436
437	if (cesa_dev->caps->has_tdma)
438		ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
439	else
440		ret = mv_cesa_skcipher_std_req_init(req, tmpl);
441
442	return ret;
443}
444
445static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
446				      struct mv_cesa_op_ctx *tmpl)
447{
448	int ret;
449	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
450	struct mv_cesa_engine *engine;
451
452	ret = mv_cesa_skcipher_req_init(req, tmpl);
453	if (ret)
454		return ret;
455
456	engine = mv_cesa_select_engine(req->cryptlen);
457	mv_cesa_skcipher_prepare(&req->base, engine);
458
459	ret = mv_cesa_queue_req(&req->base, &creq->base);
460
461	if (mv_cesa_req_needs_cleanup(&req->base, ret))
462		mv_cesa_skcipher_cleanup(req);
463
464	return ret;
465}
466
467static int mv_cesa_des_op(struct skcipher_request *req,
468			  struct mv_cesa_op_ctx *tmpl)
469{
470	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
471
472	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
473			      CESA_SA_DESC_CFG_CRYPTM_MSK);
474
475	memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
476
477	return mv_cesa_skcipher_queue_req(req, tmpl);
478}
479
480static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
481{
482	struct mv_cesa_op_ctx tmpl;
483
484	mv_cesa_set_op_cfg(&tmpl,
485			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
486			   CESA_SA_DESC_CFG_DIR_ENC);
487
488	return mv_cesa_des_op(req, &tmpl);
489}
490
491static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
492{
493	struct mv_cesa_op_ctx tmpl;
494
495	mv_cesa_set_op_cfg(&tmpl,
496			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
497			   CESA_SA_DESC_CFG_DIR_DEC);
498
499	return mv_cesa_des_op(req, &tmpl);
500}
501
502struct skcipher_alg mv_cesa_ecb_des_alg = {
503	.setkey = mv_cesa_des_setkey,
504	.encrypt = mv_cesa_ecb_des_encrypt,
505	.decrypt = mv_cesa_ecb_des_decrypt,
506	.min_keysize = DES_KEY_SIZE,
507	.max_keysize = DES_KEY_SIZE,
508	.base = {
509		.cra_name = "ecb(des)",
510		.cra_driver_name = "mv-ecb-des",
511		.cra_priority = 300,
512		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
513			     CRYPTO_ALG_ALLOCATES_MEMORY,
514		.cra_blocksize = DES_BLOCK_SIZE,
515		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
516		.cra_alignmask = 0,
517		.cra_module = THIS_MODULE,
518		.cra_init = mv_cesa_skcipher_cra_init,
519		.cra_exit = mv_cesa_skcipher_cra_exit,
520	},
521};
522
523static int mv_cesa_cbc_des_op(struct skcipher_request *req,
524			      struct mv_cesa_op_ctx *tmpl)
525{
526	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
527			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
528
529	memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
530
531	return mv_cesa_des_op(req, tmpl);
532}
533
534static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
535{
536	struct mv_cesa_op_ctx tmpl;
537
538	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
539
540	return mv_cesa_cbc_des_op(req, &tmpl);
541}
542
543static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
544{
545	struct mv_cesa_op_ctx tmpl;
546
547	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
548
549	return mv_cesa_cbc_des_op(req, &tmpl);
550}
551
552struct skcipher_alg mv_cesa_cbc_des_alg = {
553	.setkey = mv_cesa_des_setkey,
554	.encrypt = mv_cesa_cbc_des_encrypt,
555	.decrypt = mv_cesa_cbc_des_decrypt,
556	.min_keysize = DES_KEY_SIZE,
557	.max_keysize = DES_KEY_SIZE,
558	.ivsize = DES_BLOCK_SIZE,
559	.base = {
560		.cra_name = "cbc(des)",
561		.cra_driver_name = "mv-cbc-des",
562		.cra_priority = 300,
563		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
564			     CRYPTO_ALG_ALLOCATES_MEMORY,
565		.cra_blocksize = DES_BLOCK_SIZE,
566		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
567		.cra_alignmask = 0,
568		.cra_module = THIS_MODULE,
569		.cra_init = mv_cesa_skcipher_cra_init,
570		.cra_exit = mv_cesa_skcipher_cra_exit,
571	},
572};
573
574static int mv_cesa_des3_op(struct skcipher_request *req,
575			   struct mv_cesa_op_ctx *tmpl)
576{
577	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
578
579	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
580			      CESA_SA_DESC_CFG_CRYPTM_MSK);
581
582	memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
583
584	return mv_cesa_skcipher_queue_req(req, tmpl);
585}
586
587static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
588{
589	struct mv_cesa_op_ctx tmpl;
590
591	mv_cesa_set_op_cfg(&tmpl,
592			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
593			   CESA_SA_DESC_CFG_3DES_EDE |
594			   CESA_SA_DESC_CFG_DIR_ENC);
595
596	return mv_cesa_des3_op(req, &tmpl);
597}
598
599static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
600{
601	struct mv_cesa_op_ctx tmpl;
602
603	mv_cesa_set_op_cfg(&tmpl,
604			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
605			   CESA_SA_DESC_CFG_3DES_EDE |
606			   CESA_SA_DESC_CFG_DIR_DEC);
607
608	return mv_cesa_des3_op(req, &tmpl);
609}
610
611struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
612	.setkey = mv_cesa_des3_ede_setkey,
613	.encrypt = mv_cesa_ecb_des3_ede_encrypt,
614	.decrypt = mv_cesa_ecb_des3_ede_decrypt,
615	.min_keysize = DES3_EDE_KEY_SIZE,
616	.max_keysize = DES3_EDE_KEY_SIZE,
617	.base = {
618		.cra_name = "ecb(des3_ede)",
619		.cra_driver_name = "mv-ecb-des3-ede",
620		.cra_priority = 300,
621		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
622			     CRYPTO_ALG_ALLOCATES_MEMORY,
623		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
624		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
625		.cra_alignmask = 0,
626		.cra_module = THIS_MODULE,
627		.cra_init = mv_cesa_skcipher_cra_init,
628		.cra_exit = mv_cesa_skcipher_cra_exit,
629	},
630};
631
632static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
633			       struct mv_cesa_op_ctx *tmpl)
634{
635	memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
636
637	return mv_cesa_des3_op(req, tmpl);
638}
639
640static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
641{
642	struct mv_cesa_op_ctx tmpl;
643
644	mv_cesa_set_op_cfg(&tmpl,
645			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
646			   CESA_SA_DESC_CFG_3DES_EDE |
647			   CESA_SA_DESC_CFG_DIR_ENC);
648
649	return mv_cesa_cbc_des3_op(req, &tmpl);
650}
651
652static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
653{
654	struct mv_cesa_op_ctx tmpl;
655
656	mv_cesa_set_op_cfg(&tmpl,
657			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
658			   CESA_SA_DESC_CFG_3DES_EDE |
659			   CESA_SA_DESC_CFG_DIR_DEC);
660
661	return mv_cesa_cbc_des3_op(req, &tmpl);
662}
663
664struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
665	.setkey = mv_cesa_des3_ede_setkey,
666	.encrypt = mv_cesa_cbc_des3_ede_encrypt,
667	.decrypt = mv_cesa_cbc_des3_ede_decrypt,
668	.min_keysize = DES3_EDE_KEY_SIZE,
669	.max_keysize = DES3_EDE_KEY_SIZE,
670	.ivsize = DES3_EDE_BLOCK_SIZE,
671	.base = {
672		.cra_name = "cbc(des3_ede)",
673		.cra_driver_name = "mv-cbc-des3-ede",
674		.cra_priority = 300,
675		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
676			     CRYPTO_ALG_ALLOCATES_MEMORY,
677		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
678		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
679		.cra_alignmask = 0,
680		.cra_module = THIS_MODULE,
681		.cra_init = mv_cesa_skcipher_cra_init,
682		.cra_exit = mv_cesa_skcipher_cra_exit,
683	},
684};
685
686static int mv_cesa_aes_op(struct skcipher_request *req,
687			  struct mv_cesa_op_ctx *tmpl)
688{
689	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
690	int i;
691	u32 *key;
692	u32 cfg;
693
694	cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
695
696	if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
697		key = ctx->aes.key_dec;
698	else
699		key = ctx->aes.key_enc;
700
701	for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
702		tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
703
704	if (ctx->aes.key_length == 24)
705		cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
706	else if (ctx->aes.key_length == 32)
707		cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
708
709	mv_cesa_update_op_cfg(tmpl, cfg,
710			      CESA_SA_DESC_CFG_CRYPTM_MSK |
711			      CESA_SA_DESC_CFG_AES_LEN_MSK);
712
713	return mv_cesa_skcipher_queue_req(req, tmpl);
714}
715
716static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
717{
718	struct mv_cesa_op_ctx tmpl;
719
720	mv_cesa_set_op_cfg(&tmpl,
721			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
722			   CESA_SA_DESC_CFG_DIR_ENC);
723
724	return mv_cesa_aes_op(req, &tmpl);
725}
726
727static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
728{
729	struct mv_cesa_op_ctx tmpl;
730
731	mv_cesa_set_op_cfg(&tmpl,
732			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
733			   CESA_SA_DESC_CFG_DIR_DEC);
734
735	return mv_cesa_aes_op(req, &tmpl);
736}
737
738struct skcipher_alg mv_cesa_ecb_aes_alg = {
739	.setkey = mv_cesa_aes_setkey,
740	.encrypt = mv_cesa_ecb_aes_encrypt,
741	.decrypt = mv_cesa_ecb_aes_decrypt,
742	.min_keysize = AES_MIN_KEY_SIZE,
743	.max_keysize = AES_MAX_KEY_SIZE,
744	.base = {
745		.cra_name = "ecb(aes)",
746		.cra_driver_name = "mv-ecb-aes",
747		.cra_priority = 300,
748		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
749			     CRYPTO_ALG_ALLOCATES_MEMORY,
750		.cra_blocksize = AES_BLOCK_SIZE,
751		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
752		.cra_alignmask = 0,
753		.cra_module = THIS_MODULE,
754		.cra_init = mv_cesa_skcipher_cra_init,
755		.cra_exit = mv_cesa_skcipher_cra_exit,
756	},
757};
758
759static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
760			      struct mv_cesa_op_ctx *tmpl)
761{
762	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
763			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
764	memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
765
766	return mv_cesa_aes_op(req, tmpl);
767}
768
769static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
770{
771	struct mv_cesa_op_ctx tmpl;
772
773	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
774
775	return mv_cesa_cbc_aes_op(req, &tmpl);
776}
777
778static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
779{
780	struct mv_cesa_op_ctx tmpl;
781
782	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
783
784	return mv_cesa_cbc_aes_op(req, &tmpl);
785}
786
787struct skcipher_alg mv_cesa_cbc_aes_alg = {
788	.setkey = mv_cesa_aes_setkey,
789	.encrypt = mv_cesa_cbc_aes_encrypt,
790	.decrypt = mv_cesa_cbc_aes_decrypt,
791	.min_keysize = AES_MIN_KEY_SIZE,
792	.max_keysize = AES_MAX_KEY_SIZE,
793	.ivsize = AES_BLOCK_SIZE,
794	.base = {
795		.cra_name = "cbc(aes)",
796		.cra_driver_name = "mv-cbc-aes",
797		.cra_priority = 300,
798		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
799			     CRYPTO_ALG_ALLOCATES_MEMORY,
800		.cra_blocksize = AES_BLOCK_SIZE,
801		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
802		.cra_alignmask = 0,
803		.cra_module = THIS_MODULE,
804		.cra_init = mv_cesa_skcipher_cra_init,
805		.cra_exit = mv_cesa_skcipher_cra_exit,
806	},
807};
808