1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cryptographic API.
4 *
5 * Support for ATMEL DES/TDES HW acceleration.
6 *
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
9 *
10 * Some ideas are from omap-aes.c drivers.
11 */
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/hw_random.h>
21#include <linux/platform_device.h>
22
23#include <linux/device.h>
24#include <linux/dmaengine.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
31#include <linux/mod_devicetable.h>
32#include <linux/delay.h>
33#include <linux/crypto.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/internal/des.h>
37#include <crypto/internal/skcipher.h>
38#include "atmel-tdes-regs.h"
39
40#define ATMEL_TDES_PRIORITY	300
41
42/* TDES flags  */
43/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
44#define TDES_FLAGS_ENCRYPT	TDES_MR_CYPHER_ENC
45#define TDES_FLAGS_OPMODE_MASK	(TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
46#define TDES_FLAGS_ECB		TDES_MR_OPMOD_ECB
47#define TDES_FLAGS_CBC		TDES_MR_OPMOD_CBC
48#define TDES_FLAGS_OFB		TDES_MR_OPMOD_OFB
49#define TDES_FLAGS_CFB64	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
50#define TDES_FLAGS_CFB32	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
51#define TDES_FLAGS_CFB16	(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
52#define TDES_FLAGS_CFB8		(TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
53
54#define TDES_FLAGS_MODE_MASK	(TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
55
56#define TDES_FLAGS_INIT		BIT(3)
57#define TDES_FLAGS_FAST		BIT(4)
58#define TDES_FLAGS_BUSY		BIT(5)
59#define TDES_FLAGS_DMA		BIT(6)
60
61#define ATMEL_TDES_QUEUE_LENGTH	50
62
63#define CFB8_BLOCK_SIZE		1
64#define CFB16_BLOCK_SIZE	2
65#define CFB32_BLOCK_SIZE	4
66
67struct atmel_tdes_caps {
68	bool	has_dma;
69	u32		has_cfb_3keys;
70};
71
72struct atmel_tdes_dev;
73
74struct atmel_tdes_ctx {
75	struct atmel_tdes_dev *dd;
76
77	int		keylen;
78	u32		key[DES3_EDE_KEY_SIZE / sizeof(u32)];
79	unsigned long	flags;
80
81	u16		block_size;
82};
83
84struct atmel_tdes_reqctx {
85	unsigned long mode;
86	u8 lastc[DES_BLOCK_SIZE];
87};
88
89struct atmel_tdes_dma {
90	struct dma_chan			*chan;
91	struct dma_slave_config dma_conf;
92};
93
94struct atmel_tdes_dev {
95	struct list_head	list;
96	unsigned long		phys_base;
97	void __iomem		*io_base;
98
99	struct atmel_tdes_ctx	*ctx;
100	struct device		*dev;
101	struct clk			*iclk;
102	int					irq;
103
104	unsigned long		flags;
105
106	spinlock_t		lock;
107	struct crypto_queue	queue;
108
109	struct tasklet_struct	done_task;
110	struct tasklet_struct	queue_task;
111
112	struct skcipher_request	*req;
113	size_t				total;
114
115	struct scatterlist	*in_sg;
116	unsigned int		nb_in_sg;
117	size_t				in_offset;
118	struct scatterlist	*out_sg;
119	unsigned int		nb_out_sg;
120	size_t				out_offset;
121
122	size_t	buflen;
123	size_t	dma_size;
124
125	void	*buf_in;
126	int		dma_in;
127	dma_addr_t	dma_addr_in;
128	struct atmel_tdes_dma	dma_lch_in;
129
130	void	*buf_out;
131	int		dma_out;
132	dma_addr_t	dma_addr_out;
133	struct atmel_tdes_dma	dma_lch_out;
134
135	struct atmel_tdes_caps	caps;
136
137	u32	hw_version;
138};
139
140struct atmel_tdes_drv {
141	struct list_head	dev_list;
142	spinlock_t		lock;
143};
144
145static struct atmel_tdes_drv atmel_tdes = {
146	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
147	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
148};
149
150static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
151			void *buf, size_t buflen, size_t total, int out)
152{
153	size_t count, off = 0;
154
155	while (buflen && total) {
156		count = min((*sg)->length - *offset, total);
157		count = min(count, buflen);
158
159		if (!count)
160			return off;
161
162		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
163
164		off += count;
165		buflen -= count;
166		*offset += count;
167		total -= count;
168
169		if (*offset == (*sg)->length) {
170			*sg = sg_next(*sg);
171			if (*sg)
172				*offset = 0;
173			else
174				total = 0;
175		}
176	}
177
178	return off;
179}
180
181static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
182{
183	return readl_relaxed(dd->io_base + offset);
184}
185
186static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
187					u32 offset, u32 value)
188{
189	writel_relaxed(value, dd->io_base + offset);
190}
191
192static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
193			       const u32 *value, int count)
194{
195	for (; count--; value++, offset += 4)
196		atmel_tdes_write(dd, offset, *value);
197}
198
199static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
200{
201	struct atmel_tdes_dev *tdes_dd;
202
203	spin_lock_bh(&atmel_tdes.lock);
204	/* One TDES IP per SoC. */
205	tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
206					   struct atmel_tdes_dev, list);
207	spin_unlock_bh(&atmel_tdes.lock);
208	return tdes_dd;
209}
210
211static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
212{
213	int err;
214
215	err = clk_prepare_enable(dd->iclk);
216	if (err)
217		return err;
218
219	if (!(dd->flags & TDES_FLAGS_INIT)) {
220		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
221		dd->flags |= TDES_FLAGS_INIT;
222	}
223
224	return 0;
225}
226
227static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
228{
229	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
230}
231
232static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
233{
234	int err;
235
236	err = atmel_tdes_hw_init(dd);
237	if (err)
238		return err;
239
240	dd->hw_version = atmel_tdes_get_version(dd);
241
242	dev_info(dd->dev,
243			"version: 0x%x\n", dd->hw_version);
244
245	clk_disable_unprepare(dd->iclk);
246
247	return 0;
248}
249
250static void atmel_tdes_dma_callback(void *data)
251{
252	struct atmel_tdes_dev *dd = data;
253
254	/* dma_lch_out - completed */
255	tasklet_schedule(&dd->done_task);
256}
257
258static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
259{
260	int err;
261	u32 valmr = TDES_MR_SMOD_PDC;
262
263	err = atmel_tdes_hw_init(dd);
264
265	if (err)
266		return err;
267
268	if (!dd->caps.has_dma)
269		atmel_tdes_write(dd, TDES_PTCR,
270			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
271
272	/* MR register must be set before IV registers */
273	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
274		valmr |= TDES_MR_KEYMOD_3KEY;
275		valmr |= TDES_MR_TDESMOD_TDES;
276	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
277		valmr |= TDES_MR_KEYMOD_2KEY;
278		valmr |= TDES_MR_TDESMOD_TDES;
279	} else {
280		valmr |= TDES_MR_TDESMOD_DES;
281	}
282
283	valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
284
285	atmel_tdes_write(dd, TDES_MR, valmr);
286
287	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
288						dd->ctx->keylen >> 2);
289
290	if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
291		atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
292
293	return 0;
294}
295
296static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
297{
298	int err = 0;
299	size_t count;
300
301	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
302
303	if (dd->flags & TDES_FLAGS_FAST) {
304		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
305		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
306	} else {
307		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
308					   dd->dma_size, DMA_FROM_DEVICE);
309
310		/* copy data */
311		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
312				dd->buf_out, dd->buflen, dd->dma_size, 1);
313		if (count != dd->dma_size) {
314			err = -EINVAL;
315			dev_dbg(dd->dev, "not all data converted: %zu\n", count);
316		}
317	}
318
319	return err;
320}
321
322static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
323{
324	int err = -ENOMEM;
325
326	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
327	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
328	dd->buflen = PAGE_SIZE;
329	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
330
331	if (!dd->buf_in || !dd->buf_out) {
332		dev_dbg(dd->dev, "unable to alloc pages.\n");
333		goto err_alloc;
334	}
335
336	/* MAP here */
337	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
338					dd->buflen, DMA_TO_DEVICE);
339	err = dma_mapping_error(dd->dev, dd->dma_addr_in);
340	if (err) {
341		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
342		goto err_map_in;
343	}
344
345	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
346					dd->buflen, DMA_FROM_DEVICE);
347	err = dma_mapping_error(dd->dev, dd->dma_addr_out);
348	if (err) {
349		dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
350		goto err_map_out;
351	}
352
353	return 0;
354
355err_map_out:
356	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
357		DMA_TO_DEVICE);
358err_map_in:
359err_alloc:
360	free_page((unsigned long)dd->buf_out);
361	free_page((unsigned long)dd->buf_in);
362	return err;
363}
364
365static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
366{
367	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
368			 DMA_FROM_DEVICE);
369	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
370		DMA_TO_DEVICE);
371	free_page((unsigned long)dd->buf_out);
372	free_page((unsigned long)dd->buf_in);
373}
374
375static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
376				dma_addr_t dma_addr_in,
377				dma_addr_t dma_addr_out, int length)
378{
379	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
380	int len32;
381
382	dd->dma_size = length;
383
384	if (!(dd->flags & TDES_FLAGS_FAST)) {
385		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
386					   DMA_TO_DEVICE);
387	}
388
389	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
390	case TDES_FLAGS_CFB8:
391		len32 = DIV_ROUND_UP(length, sizeof(u8));
392		break;
393
394	case TDES_FLAGS_CFB16:
395		len32 = DIV_ROUND_UP(length, sizeof(u16));
396		break;
397
398	default:
399		len32 = DIV_ROUND_UP(length, sizeof(u32));
400		break;
401	}
402
403	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
404	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
405	atmel_tdes_write(dd, TDES_TCR, len32);
406	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
407	atmel_tdes_write(dd, TDES_RCR, len32);
408
409	/* Enable Interrupt */
410	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
411
412	/* Start DMA transfer */
413	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
414
415	return 0;
416}
417
418static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
419				dma_addr_t dma_addr_in,
420				dma_addr_t dma_addr_out, int length)
421{
422	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
423	struct scatterlist sg[2];
424	struct dma_async_tx_descriptor	*in_desc, *out_desc;
425	enum dma_slave_buswidth addr_width;
426
427	dd->dma_size = length;
428
429	if (!(dd->flags & TDES_FLAGS_FAST)) {
430		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
431					   DMA_TO_DEVICE);
432	}
433
434	switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
435	case TDES_FLAGS_CFB8:
436		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
437		break;
438
439	case TDES_FLAGS_CFB16:
440		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
441		break;
442
443	default:
444		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
445		break;
446	}
447
448	dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
449	dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
450
451	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
452	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
453
454	dd->flags |= TDES_FLAGS_DMA;
455
456	sg_init_table(&sg[0], 1);
457	sg_dma_address(&sg[0]) = dma_addr_in;
458	sg_dma_len(&sg[0]) = length;
459
460	sg_init_table(&sg[1], 1);
461	sg_dma_address(&sg[1]) = dma_addr_out;
462	sg_dma_len(&sg[1]) = length;
463
464	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
465				1, DMA_MEM_TO_DEV,
466				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
467	if (!in_desc)
468		return -EINVAL;
469
470	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
471				1, DMA_DEV_TO_MEM,
472				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
473	if (!out_desc)
474		return -EINVAL;
475
476	out_desc->callback = atmel_tdes_dma_callback;
477	out_desc->callback_param = dd;
478
479	dmaengine_submit(out_desc);
480	dma_async_issue_pending(dd->dma_lch_out.chan);
481
482	dmaengine_submit(in_desc);
483	dma_async_issue_pending(dd->dma_lch_in.chan);
484
485	return 0;
486}
487
488static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
489{
490	int err, fast = 0, in, out;
491	size_t count;
492	dma_addr_t addr_in, addr_out;
493
494	if ((!dd->in_offset) && (!dd->out_offset)) {
495		/* check for alignment */
496		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
497			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
498		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
499			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
500		fast = in && out;
501
502		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
503			fast = 0;
504	}
505
506
507	if (fast)  {
508		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
509		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
510
511		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
512		if (!err) {
513			dev_dbg(dd->dev, "dma_map_sg() error\n");
514			return -EINVAL;
515		}
516
517		err = dma_map_sg(dd->dev, dd->out_sg, 1,
518				DMA_FROM_DEVICE);
519		if (!err) {
520			dev_dbg(dd->dev, "dma_map_sg() error\n");
521			dma_unmap_sg(dd->dev, dd->in_sg, 1,
522				DMA_TO_DEVICE);
523			return -EINVAL;
524		}
525
526		addr_in = sg_dma_address(dd->in_sg);
527		addr_out = sg_dma_address(dd->out_sg);
528
529		dd->flags |= TDES_FLAGS_FAST;
530
531	} else {
532		/* use cache buffers */
533		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
534				dd->buf_in, dd->buflen, dd->total, 0);
535
536		addr_in = dd->dma_addr_in;
537		addr_out = dd->dma_addr_out;
538
539		dd->flags &= ~TDES_FLAGS_FAST;
540	}
541
542	dd->total -= count;
543
544	if (dd->caps.has_dma)
545		err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
546	else
547		err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
548
549	if (err && (dd->flags & TDES_FLAGS_FAST)) {
550		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
551		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
552	}
553
554	return err;
555}
556
557static void
558atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
559{
560	struct skcipher_request *req = dd->req;
561	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
562	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
563	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
564
565	if (req->cryptlen < ivsize)
566		return;
567
568	if (rctx->mode & TDES_FLAGS_ENCRYPT)
569		scatterwalk_map_and_copy(req->iv, req->dst,
570					 req->cryptlen - ivsize, ivsize, 0);
571	else
572		memcpy(req->iv, rctx->lastc, ivsize);
573
574}
575
576static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
577{
578	struct skcipher_request *req = dd->req;
579	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
580
581	clk_disable_unprepare(dd->iclk);
582
583	dd->flags &= ~TDES_FLAGS_BUSY;
584
585	if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
586		atmel_tdes_set_iv_as_last_ciphertext_block(dd);
587
588	skcipher_request_complete(req, err);
589}
590
591static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
592			       struct skcipher_request *req)
593{
594	struct crypto_async_request *async_req, *backlog;
595	struct atmel_tdes_ctx *ctx;
596	struct atmel_tdes_reqctx *rctx;
597	unsigned long flags;
598	int err, ret = 0;
599
600	spin_lock_irqsave(&dd->lock, flags);
601	if (req)
602		ret = crypto_enqueue_request(&dd->queue, &req->base);
603	if (dd->flags & TDES_FLAGS_BUSY) {
604		spin_unlock_irqrestore(&dd->lock, flags);
605		return ret;
606	}
607	backlog = crypto_get_backlog(&dd->queue);
608	async_req = crypto_dequeue_request(&dd->queue);
609	if (async_req)
610		dd->flags |= TDES_FLAGS_BUSY;
611	spin_unlock_irqrestore(&dd->lock, flags);
612
613	if (!async_req)
614		return ret;
615
616	if (backlog)
617		crypto_request_complete(backlog, -EINPROGRESS);
618
619	req = skcipher_request_cast(async_req);
620
621	/* assign new request to device */
622	dd->req = req;
623	dd->total = req->cryptlen;
624	dd->in_offset = 0;
625	dd->in_sg = req->src;
626	dd->out_offset = 0;
627	dd->out_sg = req->dst;
628
629	rctx = skcipher_request_ctx(req);
630	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
631	rctx->mode &= TDES_FLAGS_MODE_MASK;
632	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
633	dd->ctx = ctx;
634
635	err = atmel_tdes_write_ctrl(dd);
636	if (!err)
637		err = atmel_tdes_crypt_start(dd);
638	if (err) {
639		/* des_task will not finish it, so do it here */
640		atmel_tdes_finish_req(dd, err);
641		tasklet_schedule(&dd->queue_task);
642	}
643
644	return ret;
645}
646
647static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
648{
649	int err = -EINVAL;
650	size_t count;
651
652	if (dd->flags & TDES_FLAGS_DMA) {
653		err = 0;
654		if  (dd->flags & TDES_FLAGS_FAST) {
655			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
656			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
657		} else {
658			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
659				dd->dma_size, DMA_FROM_DEVICE);
660
661			/* copy data */
662			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
663				dd->buf_out, dd->buflen, dd->dma_size, 1);
664			if (count != dd->dma_size) {
665				err = -EINVAL;
666				dev_dbg(dd->dev, "not all data converted: %zu\n", count);
667			}
668		}
669	}
670	return err;
671}
672
673static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
674{
675	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
676	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
677	struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
678	struct device *dev = ctx->dd->dev;
679
680	if (!req->cryptlen)
681		return 0;
682
683	switch (mode & TDES_FLAGS_OPMODE_MASK) {
684	case TDES_FLAGS_CFB8:
685		if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
686			dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
687			return -EINVAL;
688		}
689		ctx->block_size = CFB8_BLOCK_SIZE;
690		break;
691
692	case TDES_FLAGS_CFB16:
693		if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
694			dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
695			return -EINVAL;
696		}
697		ctx->block_size = CFB16_BLOCK_SIZE;
698		break;
699
700	case TDES_FLAGS_CFB32:
701		if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
702			dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
703			return -EINVAL;
704		}
705		ctx->block_size = CFB32_BLOCK_SIZE;
706		break;
707
708	default:
709		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
710			dev_dbg(dev, "request size is not exact amount of DES blocks\n");
711			return -EINVAL;
712		}
713		ctx->block_size = DES_BLOCK_SIZE;
714		break;
715	}
716
717	rctx->mode = mode;
718
719	if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
720	    !(mode & TDES_FLAGS_ENCRYPT)) {
721		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
722
723		if (req->cryptlen >= ivsize)
724			scatterwalk_map_and_copy(rctx->lastc, req->src,
725						 req->cryptlen - ivsize,
726						 ivsize, 0);
727	}
728
729	return atmel_tdes_handle_queue(ctx->dd, req);
730}
731
732static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
733{
734	int ret;
735
736	/* Try to grab 2 DMA channels */
737	dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
738	if (IS_ERR(dd->dma_lch_in.chan)) {
739		ret = PTR_ERR(dd->dma_lch_in.chan);
740		goto err_dma_in;
741	}
742
743	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
744		TDES_IDATA1R;
745	dd->dma_lch_in.dma_conf.src_maxburst = 1;
746	dd->dma_lch_in.dma_conf.src_addr_width =
747		DMA_SLAVE_BUSWIDTH_4_BYTES;
748	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
749	dd->dma_lch_in.dma_conf.dst_addr_width =
750		DMA_SLAVE_BUSWIDTH_4_BYTES;
751	dd->dma_lch_in.dma_conf.device_fc = false;
752
753	dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
754	if (IS_ERR(dd->dma_lch_out.chan)) {
755		ret = PTR_ERR(dd->dma_lch_out.chan);
756		goto err_dma_out;
757	}
758
759	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
760		TDES_ODATA1R;
761	dd->dma_lch_out.dma_conf.src_maxburst = 1;
762	dd->dma_lch_out.dma_conf.src_addr_width =
763		DMA_SLAVE_BUSWIDTH_4_BYTES;
764	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
765	dd->dma_lch_out.dma_conf.dst_addr_width =
766		DMA_SLAVE_BUSWIDTH_4_BYTES;
767	dd->dma_lch_out.dma_conf.device_fc = false;
768
769	return 0;
770
771err_dma_out:
772	dma_release_channel(dd->dma_lch_in.chan);
773err_dma_in:
774	dev_err(dd->dev, "no DMA channel available\n");
775	return ret;
776}
777
778static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
779{
780	dma_release_channel(dd->dma_lch_in.chan);
781	dma_release_channel(dd->dma_lch_out.chan);
782}
783
784static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
785			   unsigned int keylen)
786{
787	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
788	int err;
789
790	err = verify_skcipher_des_key(tfm, key);
791	if (err)
792		return err;
793
794	memcpy(ctx->key, key, keylen);
795	ctx->keylen = keylen;
796
797	return 0;
798}
799
800static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
801			   unsigned int keylen)
802{
803	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
804	int err;
805
806	err = verify_skcipher_des3_key(tfm, key);
807	if (err)
808		return err;
809
810	memcpy(ctx->key, key, keylen);
811	ctx->keylen = keylen;
812
813	return 0;
814}
815
816static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
817{
818	return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
819}
820
821static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
822{
823	return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
824}
825
826static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
827{
828	return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
829}
830
831static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
832{
833	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
834}
835static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
836{
837	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
838}
839
840static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
841{
842	return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
843}
844
845static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
846{
847	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
848}
849
850static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
851{
852	return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
853}
854
855static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
856{
857	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
858}
859
860static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
861{
862	return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
863}
864
865static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
866{
867	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
868}
869
870static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
871{
872	return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
873}
874
875static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
876{
877	return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
878}
879
880static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
881{
882	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
883}
884
885static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
886{
887	struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
888
889	ctx->dd = atmel_tdes_dev_alloc();
890	if (!ctx->dd)
891		return -ENODEV;
892
893	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
894
895	return 0;
896}
897
898static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
899{
900	alg->base.cra_priority = ATMEL_TDES_PRIORITY;
901	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
902	alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
903	alg->base.cra_module = THIS_MODULE;
904
905	alg->init = atmel_tdes_init_tfm;
906}
907
908static struct skcipher_alg tdes_algs[] = {
909{
910	.base.cra_name		= "ecb(des)",
911	.base.cra_driver_name	= "atmel-ecb-des",
912	.base.cra_blocksize	= DES_BLOCK_SIZE,
913	.base.cra_alignmask	= 0x7,
914
915	.min_keysize		= DES_KEY_SIZE,
916	.max_keysize		= DES_KEY_SIZE,
917	.setkey			= atmel_des_setkey,
918	.encrypt		= atmel_tdes_ecb_encrypt,
919	.decrypt		= atmel_tdes_ecb_decrypt,
920},
921{
922	.base.cra_name		= "cbc(des)",
923	.base.cra_driver_name	= "atmel-cbc-des",
924	.base.cra_blocksize	= DES_BLOCK_SIZE,
925	.base.cra_alignmask	= 0x7,
926
927	.min_keysize		= DES_KEY_SIZE,
928	.max_keysize		= DES_KEY_SIZE,
929	.ivsize			= DES_BLOCK_SIZE,
930	.setkey			= atmel_des_setkey,
931	.encrypt		= atmel_tdes_cbc_encrypt,
932	.decrypt		= atmel_tdes_cbc_decrypt,
933},
934{
935	.base.cra_name		= "cfb(des)",
936	.base.cra_driver_name	= "atmel-cfb-des",
937	.base.cra_blocksize	= DES_BLOCK_SIZE,
938	.base.cra_alignmask	= 0x7,
939
940	.min_keysize		= DES_KEY_SIZE,
941	.max_keysize		= DES_KEY_SIZE,
942	.ivsize			= DES_BLOCK_SIZE,
943	.setkey			= atmel_des_setkey,
944	.encrypt		= atmel_tdes_cfb_encrypt,
945	.decrypt		= atmel_tdes_cfb_decrypt,
946},
947{
948	.base.cra_name		= "cfb8(des)",
949	.base.cra_driver_name	= "atmel-cfb8-des",
950	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
951	.base.cra_alignmask	= 0,
952
953	.min_keysize		= DES_KEY_SIZE,
954	.max_keysize		= DES_KEY_SIZE,
955	.ivsize			= DES_BLOCK_SIZE,
956	.setkey			= atmel_des_setkey,
957	.encrypt		= atmel_tdes_cfb8_encrypt,
958	.decrypt		= atmel_tdes_cfb8_decrypt,
959},
960{
961	.base.cra_name		= "cfb16(des)",
962	.base.cra_driver_name	= "atmel-cfb16-des",
963	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
964	.base.cra_alignmask	= 0x1,
965
966	.min_keysize		= DES_KEY_SIZE,
967	.max_keysize		= DES_KEY_SIZE,
968	.ivsize			= DES_BLOCK_SIZE,
969	.setkey			= atmel_des_setkey,
970	.encrypt		= atmel_tdes_cfb16_encrypt,
971	.decrypt		= atmel_tdes_cfb16_decrypt,
972},
973{
974	.base.cra_name		= "cfb32(des)",
975	.base.cra_driver_name	= "atmel-cfb32-des",
976	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
977	.base.cra_alignmask	= 0x3,
978
979	.min_keysize		= DES_KEY_SIZE,
980	.max_keysize		= DES_KEY_SIZE,
981	.ivsize			= DES_BLOCK_SIZE,
982	.setkey			= atmel_des_setkey,
983	.encrypt		= atmel_tdes_cfb32_encrypt,
984	.decrypt		= atmel_tdes_cfb32_decrypt,
985},
986{
987	.base.cra_name		= "ofb(des)",
988	.base.cra_driver_name	= "atmel-ofb-des",
989	.base.cra_blocksize	= 1,
990	.base.cra_alignmask	= 0x7,
991
992	.min_keysize		= DES_KEY_SIZE,
993	.max_keysize		= DES_KEY_SIZE,
994	.ivsize			= DES_BLOCK_SIZE,
995	.setkey			= atmel_des_setkey,
996	.encrypt		= atmel_tdes_ofb_encrypt,
997	.decrypt		= atmel_tdes_ofb_decrypt,
998},
999{
1000	.base.cra_name		= "ecb(des3_ede)",
1001	.base.cra_driver_name	= "atmel-ecb-tdes",
1002	.base.cra_blocksize	= DES_BLOCK_SIZE,
1003	.base.cra_alignmask	= 0x7,
1004
1005	.min_keysize		= DES3_EDE_KEY_SIZE,
1006	.max_keysize		= DES3_EDE_KEY_SIZE,
1007	.setkey			= atmel_tdes_setkey,
1008	.encrypt		= atmel_tdes_ecb_encrypt,
1009	.decrypt		= atmel_tdes_ecb_decrypt,
1010},
1011{
1012	.base.cra_name		= "cbc(des3_ede)",
1013	.base.cra_driver_name	= "atmel-cbc-tdes",
1014	.base.cra_blocksize	= DES_BLOCK_SIZE,
1015	.base.cra_alignmask	= 0x7,
1016
1017	.min_keysize		= DES3_EDE_KEY_SIZE,
1018	.max_keysize		= DES3_EDE_KEY_SIZE,
1019	.setkey			= atmel_tdes_setkey,
1020	.encrypt		= atmel_tdes_cbc_encrypt,
1021	.decrypt		= atmel_tdes_cbc_decrypt,
1022	.ivsize			= DES_BLOCK_SIZE,
1023},
1024{
1025	.base.cra_name		= "ofb(des3_ede)",
1026	.base.cra_driver_name	= "atmel-ofb-tdes",
1027	.base.cra_blocksize	= DES_BLOCK_SIZE,
1028	.base.cra_alignmask	= 0x7,
1029
1030	.min_keysize		= DES3_EDE_KEY_SIZE,
1031	.max_keysize		= DES3_EDE_KEY_SIZE,
1032	.setkey			= atmel_tdes_setkey,
1033	.encrypt		= atmel_tdes_ofb_encrypt,
1034	.decrypt		= atmel_tdes_ofb_decrypt,
1035	.ivsize			= DES_BLOCK_SIZE,
1036},
1037};
1038
1039static void atmel_tdes_queue_task(unsigned long data)
1040{
1041	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1042
1043	atmel_tdes_handle_queue(dd, NULL);
1044}
1045
1046static void atmel_tdes_done_task(unsigned long data)
1047{
1048	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1049	int err;
1050
1051	if (!(dd->flags & TDES_FLAGS_DMA))
1052		err = atmel_tdes_crypt_pdc_stop(dd);
1053	else
1054		err = atmel_tdes_crypt_dma_stop(dd);
1055
1056	if (dd->total && !err) {
1057		if (dd->flags & TDES_FLAGS_FAST) {
1058			dd->in_sg = sg_next(dd->in_sg);
1059			dd->out_sg = sg_next(dd->out_sg);
1060			if (!dd->in_sg || !dd->out_sg)
1061				err = -EINVAL;
1062		}
1063		if (!err)
1064			err = atmel_tdes_crypt_start(dd);
1065		if (!err)
1066			return; /* DMA started. Not fininishing. */
1067	}
1068
1069	atmel_tdes_finish_req(dd, err);
1070	atmel_tdes_handle_queue(dd, NULL);
1071}
1072
1073static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1074{
1075	struct atmel_tdes_dev *tdes_dd = dev_id;
1076	u32 reg;
1077
1078	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1079	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1080		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1081		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1082			tasklet_schedule(&tdes_dd->done_task);
1083		else
1084			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1085		return IRQ_HANDLED;
1086	}
1087
1088	return IRQ_NONE;
1089}
1090
1091static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1092{
1093	int i;
1094
1095	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1096		crypto_unregister_skcipher(&tdes_algs[i]);
1097}
1098
1099static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1100{
1101	int err, i, j;
1102
1103	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1104		atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
1105
1106		err = crypto_register_skcipher(&tdes_algs[i]);
1107		if (err)
1108			goto err_tdes_algs;
1109	}
1110
1111	return 0;
1112
1113err_tdes_algs:
1114	for (j = 0; j < i; j++)
1115		crypto_unregister_skcipher(&tdes_algs[j]);
1116
1117	return err;
1118}
1119
1120static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1121{
1122
1123	dd->caps.has_dma = 0;
1124	dd->caps.has_cfb_3keys = 0;
1125
1126	/* keep only major version number */
1127	switch (dd->hw_version & 0xf00) {
1128	case 0x800:
1129	case 0x700:
1130		dd->caps.has_dma = 1;
1131		dd->caps.has_cfb_3keys = 1;
1132		break;
1133	case 0x600:
1134		break;
1135	default:
1136		dev_warn(dd->dev,
1137				"Unmanaged tdes version, set minimum capabilities\n");
1138		break;
1139	}
1140}
1141
1142static const struct of_device_id atmel_tdes_dt_ids[] = {
1143	{ .compatible = "atmel,at91sam9g46-tdes" },
1144	{ /* sentinel */ }
1145};
1146MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1147
1148static int atmel_tdes_probe(struct platform_device *pdev)
1149{
1150	struct atmel_tdes_dev *tdes_dd;
1151	struct device *dev = &pdev->dev;
1152	struct resource *tdes_res;
1153	int err;
1154
1155	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1156	if (!tdes_dd)
1157		return -ENOMEM;
1158
1159	tdes_dd->dev = dev;
1160
1161	platform_set_drvdata(pdev, tdes_dd);
1162
1163	INIT_LIST_HEAD(&tdes_dd->list);
1164	spin_lock_init(&tdes_dd->lock);
1165
1166	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1167					(unsigned long)tdes_dd);
1168	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1169					(unsigned long)tdes_dd);
1170
1171	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1172
1173	tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
1174	if (IS_ERR(tdes_dd->io_base)) {
1175		err = PTR_ERR(tdes_dd->io_base);
1176		goto err_tasklet_kill;
1177	}
1178	tdes_dd->phys_base = tdes_res->start;
1179
1180	/* Get the IRQ */
1181	tdes_dd->irq = platform_get_irq(pdev,  0);
1182	if (tdes_dd->irq < 0) {
1183		err = tdes_dd->irq;
1184		goto err_tasklet_kill;
1185	}
1186
1187	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1188			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1189	if (err) {
1190		dev_err(dev, "unable to request tdes irq.\n");
1191		goto err_tasklet_kill;
1192	}
1193
1194	/* Initializing the clock */
1195	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1196	if (IS_ERR(tdes_dd->iclk)) {
1197		dev_err(dev, "clock initialization failed.\n");
1198		err = PTR_ERR(tdes_dd->iclk);
1199		goto err_tasklet_kill;
1200	}
1201
1202	err = atmel_tdes_hw_version_init(tdes_dd);
1203	if (err)
1204		goto err_tasklet_kill;
1205
1206	atmel_tdes_get_cap(tdes_dd);
1207
1208	err = atmel_tdes_buff_init(tdes_dd);
1209	if (err)
1210		goto err_tasklet_kill;
1211
1212	if (tdes_dd->caps.has_dma) {
1213		err = atmel_tdes_dma_init(tdes_dd);
1214		if (err)
1215			goto err_buff_cleanup;
1216
1217		dev_info(dev, "using %s, %s for DMA transfers\n",
1218				dma_chan_name(tdes_dd->dma_lch_in.chan),
1219				dma_chan_name(tdes_dd->dma_lch_out.chan));
1220	}
1221
1222	spin_lock(&atmel_tdes.lock);
1223	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1224	spin_unlock(&atmel_tdes.lock);
1225
1226	err = atmel_tdes_register_algs(tdes_dd);
1227	if (err)
1228		goto err_algs;
1229
1230	dev_info(dev, "Atmel DES/TDES\n");
1231
1232	return 0;
1233
1234err_algs:
1235	spin_lock(&atmel_tdes.lock);
1236	list_del(&tdes_dd->list);
1237	spin_unlock(&atmel_tdes.lock);
1238	if (tdes_dd->caps.has_dma)
1239		atmel_tdes_dma_cleanup(tdes_dd);
1240err_buff_cleanup:
1241	atmel_tdes_buff_cleanup(tdes_dd);
1242err_tasklet_kill:
1243	tasklet_kill(&tdes_dd->done_task);
1244	tasklet_kill(&tdes_dd->queue_task);
1245
1246	return err;
1247}
1248
1249static int atmel_tdes_remove(struct platform_device *pdev)
1250{
1251	struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
1252
1253	spin_lock(&atmel_tdes.lock);
1254	list_del(&tdes_dd->list);
1255	spin_unlock(&atmel_tdes.lock);
1256
1257	atmel_tdes_unregister_algs(tdes_dd);
1258
1259	tasklet_kill(&tdes_dd->done_task);
1260	tasklet_kill(&tdes_dd->queue_task);
1261
1262	if (tdes_dd->caps.has_dma)
1263		atmel_tdes_dma_cleanup(tdes_dd);
1264
1265	atmel_tdes_buff_cleanup(tdes_dd);
1266
1267	return 0;
1268}
1269
1270static struct platform_driver atmel_tdes_driver = {
1271	.probe		= atmel_tdes_probe,
1272	.remove		= atmel_tdes_remove,
1273	.driver		= {
1274		.name	= "atmel_tdes",
1275		.of_match_table = atmel_tdes_dt_ids,
1276	},
1277};
1278
1279module_platform_driver(atmel_tdes_driver);
1280
1281MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1282MODULE_LICENSE("GPL v2");
1283MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1284