1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Crypto acceleration support for Rockchip RK3288
4 *
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6 *
7 * Author: Zain Wang <zain.wang@rock-chips.com>
8 *
9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10 */
11#include <linux/device.h>
12#include <crypto/scatterwalk.h>
13#include "rk3288_crypto.h"
14
15#define RK_CRYPTO_DEC			BIT(0)
16
17static int rk_cipher_need_fallback(struct skcipher_request *req)
18{
19	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
20	unsigned int bs = crypto_skcipher_blocksize(tfm);
21	struct scatterlist *sgs, *sgd;
22	unsigned int stodo, dtodo, len;
23
24	if (!req->cryptlen)
25		return true;
26
27	len = req->cryptlen;
28	sgs = req->src;
29	sgd = req->dst;
30	while (sgs && sgd) {
31		if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
32			return true;
33		}
34		if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
35			return true;
36		}
37		stodo = min(len, sgs->length);
38		if (stodo % bs) {
39			return true;
40		}
41		dtodo = min(len, sgd->length);
42		if (dtodo % bs) {
43			return true;
44		}
45		if (stodo != dtodo) {
46			return true;
47		}
48		len -= stodo;
49		sgs = sg_next(sgs);
50		sgd = sg_next(sgd);
51	}
52	return false;
53}
54
55static int rk_cipher_fallback(struct skcipher_request *areq)
56{
57	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
58	struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
59	struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
60	int err;
61
62	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
63	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
64				      areq->base.complete, areq->base.data);
65	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
66				   areq->cryptlen, areq->iv);
67	if (rctx->mode & RK_CRYPTO_DEC)
68		err = crypto_skcipher_decrypt(&rctx->fallback_req);
69	else
70		err = crypto_skcipher_encrypt(&rctx->fallback_req);
71	return err;
72}
73
74static int rk_handle_req(struct rk_crypto_info *dev,
75			 struct skcipher_request *req)
76{
77	struct crypto_engine *engine = dev->engine;
78
79	if (rk_cipher_need_fallback(req))
80		return rk_cipher_fallback(req);
81
82	return crypto_transfer_skcipher_request_to_engine(engine, req);
83}
84
85static int rk_aes_setkey(struct crypto_skcipher *cipher,
86			 const u8 *key, unsigned int keylen)
87{
88	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
89	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
90
91	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
92	    keylen != AES_KEYSIZE_256)
93		return -EINVAL;
94	ctx->keylen = keylen;
95	memcpy(ctx->key, key, keylen);
96
97	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
98}
99
100static int rk_des_setkey(struct crypto_skcipher *cipher,
101			 const u8 *key, unsigned int keylen)
102{
103	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
104	int err;
105
106	err = verify_skcipher_des_key(cipher, key);
107	if (err)
108		return err;
109
110	ctx->keylen = keylen;
111	memcpy(ctx->key, key, keylen);
112
113	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
114}
115
116static int rk_tdes_setkey(struct crypto_skcipher *cipher,
117			  const u8 *key, unsigned int keylen)
118{
119	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
120	int err;
121
122	err = verify_skcipher_des3_key(cipher, key);
123	if (err)
124		return err;
125
126	ctx->keylen = keylen;
127	memcpy(ctx->key, key, keylen);
128
129	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
130}
131
132static int rk_aes_ecb_encrypt(struct skcipher_request *req)
133{
134	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
135	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
136	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
137	struct rk_crypto_info *dev = ctx->dev;
138
139	rctx->mode = RK_CRYPTO_AES_ECB_MODE;
140	return rk_handle_req(dev, req);
141}
142
143static int rk_aes_ecb_decrypt(struct skcipher_request *req)
144{
145	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
146	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
147	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
148	struct rk_crypto_info *dev = ctx->dev;
149
150	rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
151	return rk_handle_req(dev, req);
152}
153
154static int rk_aes_cbc_encrypt(struct skcipher_request *req)
155{
156	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
157	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
158	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
159	struct rk_crypto_info *dev = ctx->dev;
160
161	rctx->mode = RK_CRYPTO_AES_CBC_MODE;
162	return rk_handle_req(dev, req);
163}
164
165static int rk_aes_cbc_decrypt(struct skcipher_request *req)
166{
167	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
168	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
169	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
170	struct rk_crypto_info *dev = ctx->dev;
171
172	rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
173	return rk_handle_req(dev, req);
174}
175
176static int rk_des_ecb_encrypt(struct skcipher_request *req)
177{
178	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
179	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
180	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
181	struct rk_crypto_info *dev = ctx->dev;
182
183	rctx->mode = 0;
184	return rk_handle_req(dev, req);
185}
186
187static int rk_des_ecb_decrypt(struct skcipher_request *req)
188{
189	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
190	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
191	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
192	struct rk_crypto_info *dev = ctx->dev;
193
194	rctx->mode = RK_CRYPTO_DEC;
195	return rk_handle_req(dev, req);
196}
197
198static int rk_des_cbc_encrypt(struct skcipher_request *req)
199{
200	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
201	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
202	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
203	struct rk_crypto_info *dev = ctx->dev;
204
205	rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
206	return rk_handle_req(dev, req);
207}
208
209static int rk_des_cbc_decrypt(struct skcipher_request *req)
210{
211	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
212	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
213	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
214	struct rk_crypto_info *dev = ctx->dev;
215
216	rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
217	return rk_handle_req(dev, req);
218}
219
220static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
221{
222	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
223	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
224	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
225	struct rk_crypto_info *dev = ctx->dev;
226
227	rctx->mode = RK_CRYPTO_TDES_SELECT;
228	return rk_handle_req(dev, req);
229}
230
231static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
232{
233	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
234	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
235	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
236	struct rk_crypto_info *dev = ctx->dev;
237
238	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
239	return rk_handle_req(dev, req);
240}
241
242static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
243{
244	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
245	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
246	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
247	struct rk_crypto_info *dev = ctx->dev;
248
249	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
250	return rk_handle_req(dev, req);
251}
252
253static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
254{
255	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
256	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
257	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
258	struct rk_crypto_info *dev = ctx->dev;
259
260	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
261		    RK_CRYPTO_DEC;
262	return rk_handle_req(dev, req);
263}
264
265static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
266{
267	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
268	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
269	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
270	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
271	u32 block, conf_reg = 0;
272
273	block = crypto_tfm_alg_blocksize(tfm);
274
275	if (block == DES_BLOCK_SIZE) {
276		rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
277			     RK_CRYPTO_TDES_BYTESWAP_KEY |
278			     RK_CRYPTO_TDES_BYTESWAP_IV;
279		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
280		memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
281		conf_reg = RK_CRYPTO_DESSEL;
282	} else {
283		rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
284			     RK_CRYPTO_AES_KEY_CHANGE |
285			     RK_CRYPTO_AES_BYTESWAP_KEY |
286			     RK_CRYPTO_AES_BYTESWAP_IV;
287		if (ctx->keylen == AES_KEYSIZE_192)
288			rctx->mode |= RK_CRYPTO_AES_192BIT_key;
289		else if (ctx->keylen == AES_KEYSIZE_256)
290			rctx->mode |= RK_CRYPTO_AES_256BIT_key;
291		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
292		memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
293	}
294	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
295		    RK_CRYPTO_BYTESWAP_BRFIFO;
296	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
297	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
298		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
299}
300
301static void crypto_dma_start(struct rk_crypto_info *dev,
302			     struct scatterlist *sgs,
303			     struct scatterlist *sgd, unsigned int todo)
304{
305	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
306	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
307	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
308	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
309		     _SBF(RK_CRYPTO_BLOCK_START, 16));
310}
311
312static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
313{
314	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
315	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
316	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
317	struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
318	struct scatterlist *sgs, *sgd;
319	int err = 0;
320	int ivsize = crypto_skcipher_ivsize(tfm);
321	int offset;
322	u8 iv[AES_BLOCK_SIZE];
323	u8 biv[AES_BLOCK_SIZE];
324	u8 *ivtouse = areq->iv;
325	unsigned int len = areq->cryptlen;
326	unsigned int todo;
327
328	ivsize = crypto_skcipher_ivsize(tfm);
329	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
330		if (rctx->mode & RK_CRYPTO_DEC) {
331			offset = areq->cryptlen - ivsize;
332			scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
333						 offset, ivsize, 0);
334		}
335	}
336
337	sgs = areq->src;
338	sgd = areq->dst;
339
340	while (sgs && sgd && len) {
341		if (!sgs->length) {
342			sgs = sg_next(sgs);
343			sgd = sg_next(sgd);
344			continue;
345		}
346		if (rctx->mode & RK_CRYPTO_DEC) {
347			/* we backup last block of source to be used as IV at next step */
348			offset = sgs->length - ivsize;
349			scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
350		}
351		if (sgs == sgd) {
352			err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
353			if (err <= 0) {
354				err = -EINVAL;
355				goto theend_iv;
356			}
357		} else {
358			err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
359			if (err <= 0) {
360				err = -EINVAL;
361				goto theend_iv;
362			}
363			err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
364			if (err <= 0) {
365				err = -EINVAL;
366				goto theend_sgs;
367			}
368		}
369		err = 0;
370		rk_ablk_hw_init(ctx->dev, areq);
371		if (ivsize) {
372			if (ivsize == DES_BLOCK_SIZE)
373				memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
374			else
375				memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
376		}
377		reinit_completion(&ctx->dev->complete);
378		ctx->dev->status = 0;
379
380		todo = min(sg_dma_len(sgs), len);
381		len -= todo;
382		crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
383		wait_for_completion_interruptible_timeout(&ctx->dev->complete,
384							  msecs_to_jiffies(2000));
385		if (!ctx->dev->status) {
386			dev_err(ctx->dev->dev, "DMA timeout\n");
387			err = -EFAULT;
388			goto theend;
389		}
390		if (sgs == sgd) {
391			dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
392		} else {
393			dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
394			dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
395		}
396		if (rctx->mode & RK_CRYPTO_DEC) {
397			memcpy(iv, biv, ivsize);
398			ivtouse = iv;
399		} else {
400			offset = sgd->length - ivsize;
401			scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
402			ivtouse = iv;
403		}
404		sgs = sg_next(sgs);
405		sgd = sg_next(sgd);
406	}
407
408	if (areq->iv && ivsize > 0) {
409		offset = areq->cryptlen - ivsize;
410		if (rctx->mode & RK_CRYPTO_DEC) {
411			memcpy(areq->iv, rctx->backup_iv, ivsize);
412			memzero_explicit(rctx->backup_iv, ivsize);
413		} else {
414			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
415						 ivsize, 0);
416		}
417	}
418
419theend:
420	local_bh_disable();
421	crypto_finalize_skcipher_request(engine, areq, err);
422	local_bh_enable();
423	return 0;
424
425theend_sgs:
426	if (sgs == sgd) {
427		dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
428	} else {
429		dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
430		dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
431	}
432theend_iv:
433	return err;
434}
435
436static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
437{
438	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
439	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
440	const char *name = crypto_tfm_alg_name(&tfm->base);
441	struct rk_crypto_tmp *algt;
442
443	algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
444
445	ctx->dev = algt->dev;
446
447	ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
448	if (IS_ERR(ctx->fallback_tfm)) {
449		dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
450			name, PTR_ERR(ctx->fallback_tfm));
451		return PTR_ERR(ctx->fallback_tfm);
452	}
453
454	tfm->reqsize = sizeof(struct rk_cipher_rctx) +
455		crypto_skcipher_reqsize(ctx->fallback_tfm);
456
457	ctx->enginectx.op.do_one_request = rk_cipher_run;
458
459	return 0;
460}
461
462static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
463{
464	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
465
466	memzero_explicit(ctx->key, ctx->keylen);
467	crypto_free_skcipher(ctx->fallback_tfm);
468}
469
470struct rk_crypto_tmp rk_ecb_aes_alg = {
471	.type = ALG_TYPE_CIPHER,
472	.alg.skcipher = {
473		.base.cra_name		= "ecb(aes)",
474		.base.cra_driver_name	= "ecb-aes-rk",
475		.base.cra_priority	= 300,
476		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
477		.base.cra_blocksize	= AES_BLOCK_SIZE,
478		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
479		.base.cra_alignmask	= 0x0f,
480		.base.cra_module	= THIS_MODULE,
481
482		.init			= rk_ablk_init_tfm,
483		.exit			= rk_ablk_exit_tfm,
484		.min_keysize		= AES_MIN_KEY_SIZE,
485		.max_keysize		= AES_MAX_KEY_SIZE,
486		.setkey			= rk_aes_setkey,
487		.encrypt		= rk_aes_ecb_encrypt,
488		.decrypt		= rk_aes_ecb_decrypt,
489	}
490};
491
492struct rk_crypto_tmp rk_cbc_aes_alg = {
493	.type = ALG_TYPE_CIPHER,
494	.alg.skcipher = {
495		.base.cra_name		= "cbc(aes)",
496		.base.cra_driver_name	= "cbc-aes-rk",
497		.base.cra_priority	= 300,
498		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
499		.base.cra_blocksize	= AES_BLOCK_SIZE,
500		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
501		.base.cra_alignmask	= 0x0f,
502		.base.cra_module	= THIS_MODULE,
503
504		.init			= rk_ablk_init_tfm,
505		.exit			= rk_ablk_exit_tfm,
506		.min_keysize		= AES_MIN_KEY_SIZE,
507		.max_keysize		= AES_MAX_KEY_SIZE,
508		.ivsize			= AES_BLOCK_SIZE,
509		.setkey			= rk_aes_setkey,
510		.encrypt		= rk_aes_cbc_encrypt,
511		.decrypt		= rk_aes_cbc_decrypt,
512	}
513};
514
515struct rk_crypto_tmp rk_ecb_des_alg = {
516	.type = ALG_TYPE_CIPHER,
517	.alg.skcipher = {
518		.base.cra_name		= "ecb(des)",
519		.base.cra_driver_name	= "ecb-des-rk",
520		.base.cra_priority	= 300,
521		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
522		.base.cra_blocksize	= DES_BLOCK_SIZE,
523		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
524		.base.cra_alignmask	= 0x07,
525		.base.cra_module	= THIS_MODULE,
526
527		.init			= rk_ablk_init_tfm,
528		.exit			= rk_ablk_exit_tfm,
529		.min_keysize		= DES_KEY_SIZE,
530		.max_keysize		= DES_KEY_SIZE,
531		.setkey			= rk_des_setkey,
532		.encrypt		= rk_des_ecb_encrypt,
533		.decrypt		= rk_des_ecb_decrypt,
534	}
535};
536
537struct rk_crypto_tmp rk_cbc_des_alg = {
538	.type = ALG_TYPE_CIPHER,
539	.alg.skcipher = {
540		.base.cra_name		= "cbc(des)",
541		.base.cra_driver_name	= "cbc-des-rk",
542		.base.cra_priority	= 300,
543		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
544		.base.cra_blocksize	= DES_BLOCK_SIZE,
545		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
546		.base.cra_alignmask	= 0x07,
547		.base.cra_module	= THIS_MODULE,
548
549		.init			= rk_ablk_init_tfm,
550		.exit			= rk_ablk_exit_tfm,
551		.min_keysize		= DES_KEY_SIZE,
552		.max_keysize		= DES_KEY_SIZE,
553		.ivsize			= DES_BLOCK_SIZE,
554		.setkey			= rk_des_setkey,
555		.encrypt		= rk_des_cbc_encrypt,
556		.decrypt		= rk_des_cbc_decrypt,
557	}
558};
559
560struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
561	.type = ALG_TYPE_CIPHER,
562	.alg.skcipher = {
563		.base.cra_name		= "ecb(des3_ede)",
564		.base.cra_driver_name	= "ecb-des3-ede-rk",
565		.base.cra_priority	= 300,
566		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
567		.base.cra_blocksize	= DES_BLOCK_SIZE,
568		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
569		.base.cra_alignmask	= 0x07,
570		.base.cra_module	= THIS_MODULE,
571
572		.init			= rk_ablk_init_tfm,
573		.exit			= rk_ablk_exit_tfm,
574		.min_keysize		= DES3_EDE_KEY_SIZE,
575		.max_keysize		= DES3_EDE_KEY_SIZE,
576		.setkey			= rk_tdes_setkey,
577		.encrypt		= rk_des3_ede_ecb_encrypt,
578		.decrypt		= rk_des3_ede_ecb_decrypt,
579	}
580};
581
582struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
583	.type = ALG_TYPE_CIPHER,
584	.alg.skcipher = {
585		.base.cra_name		= "cbc(des3_ede)",
586		.base.cra_driver_name	= "cbc-des3-ede-rk",
587		.base.cra_priority	= 300,
588		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
589		.base.cra_blocksize	= DES_BLOCK_SIZE,
590		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
591		.base.cra_alignmask	= 0x07,
592		.base.cra_module	= THIS_MODULE,
593
594		.init			= rk_ablk_init_tfm,
595		.exit			= rk_ablk_exit_tfm,
596		.min_keysize		= DES3_EDE_KEY_SIZE,
597		.max_keysize		= DES3_EDE_KEY_SIZE,
598		.ivsize			= DES_BLOCK_SIZE,
599		.setkey			= rk_tdes_setkey,
600		.encrypt		= rk_des3_ede_cbc_encrypt,
601		.decrypt		= rk_des3_ede_cbc_decrypt,
602	}
603};
604