1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for OMAP SHA1/MD5 HW acceleration.
6 *
7 * Copyright (c) 2010 Nokia Corporation
8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * Copyright (c) 2011 Texas Instruments Incorporated
10 *
11 * Some ideas are from old omap-sha1-md5.c driver.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/err.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/irq.h>
24#include <linux/io.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmaengine.h>
29#include <linux/pm_runtime.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_address.h>
33#include <linux/of_irq.h>
34#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <crypto/scatterwalk.h>
37#include <crypto/algapi.h>
38#include <crypto/sha.h>
39#include <crypto/hash.h>
40#include <crypto/hmac.h>
41#include <crypto/internal/hash.h>
42#include <crypto/engine.h>
43
44#define MD5_DIGEST_SIZE			16
45
46#define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
47#define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
48#define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
49
50#define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
51
52#define SHA_REG_CTRL			0x18
53#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
54#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
55#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
56#define SHA_REG_CTRL_ALGO		(1 << 2)
57#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
58#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
59
60#define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
61
62#define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
63#define SHA_REG_MASK_DMA_EN		(1 << 3)
64#define SHA_REG_MASK_IT_EN		(1 << 2)
65#define SHA_REG_MASK_SOFTRESET		(1 << 1)
66#define SHA_REG_AUTOIDLE		(1 << 0)
67
68#define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
69#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
70
71#define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
72#define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
73#define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
74#define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
75#define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
76
77#define SHA_REG_MODE_ALGO_MASK		(7 << 0)
78#define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
79#define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
80#define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
81#define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
82#define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
83#define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
84
85#define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
86
87#define SHA_REG_IRQSTATUS		0x118
88#define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
89#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
90#define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
91#define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
92
93#define SHA_REG_IRQENA			0x11C
94#define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
95#define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
96#define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
97#define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
98
99#define DEFAULT_TIMEOUT_INTERVAL	HZ
100
101#define DEFAULT_AUTOSUSPEND_DELAY	1000
102
103/* mostly device flags */
104#define FLAGS_FINAL		1
105#define FLAGS_DMA_ACTIVE	2
106#define FLAGS_OUTPUT_READY	3
107#define FLAGS_INIT		4
108#define FLAGS_CPU		5
109#define FLAGS_DMA_READY		6
110#define FLAGS_AUTO_XOR		7
111#define FLAGS_BE32_SHA1		8
112#define FLAGS_SGS_COPIED	9
113#define FLAGS_SGS_ALLOCED	10
114#define FLAGS_HUGE		11
115
116/* context flags */
117#define FLAGS_FINUP		16
118
119#define FLAGS_MODE_SHIFT	18
120#define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
121#define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122#define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123#define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124#define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125#define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126#define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
127
128#define FLAGS_HMAC		21
129#define FLAGS_ERROR		22
130
131#define OP_UPDATE		1
132#define OP_FINAL		2
133
134#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
135#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
136
137#define BUFLEN			SHA512_BLOCK_SIZE
138#define OMAP_SHA_DMA_THRESHOLD	256
139
140#define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
141
142struct omap_sham_dev;
143
144struct omap_sham_reqctx {
145	struct omap_sham_dev	*dd;
146	unsigned long		flags;
147	u8			op;
148
149	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
150	size_t			digcnt;
151	size_t			bufcnt;
152	size_t			buflen;
153
154	/* walk state */
155	struct scatterlist	*sg;
156	struct scatterlist	sgl[2];
157	int			offset;	/* offset in current sg */
158	int			sg_len;
159	unsigned int		total;	/* total request */
160
161	u8			buffer[] OMAP_ALIGNED;
162};
163
164struct omap_sham_hmac_ctx {
165	struct crypto_shash	*shash;
166	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
168};
169
170struct omap_sham_ctx {
171	struct crypto_engine_ctx	enginectx;
172	unsigned long		flags;
173
174	/* fallback stuff */
175	struct crypto_shash	*fallback;
176
177	struct omap_sham_hmac_ctx base[];
178};
179
180#define OMAP_SHAM_QUEUE_LENGTH	10
181
182struct omap_sham_algs_info {
183	struct ahash_alg	*algs_list;
184	unsigned int		size;
185	unsigned int		registered;
186};
187
188struct omap_sham_pdata {
189	struct omap_sham_algs_info	*algs_info;
190	unsigned int	algs_info_size;
191	unsigned long	flags;
192	int		digest_size;
193
194	void		(*copy_hash)(struct ahash_request *req, int out);
195	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
196				      int final, int dma);
197	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
198	int		(*poll_irq)(struct omap_sham_dev *dd);
199	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
200
201	u32		odigest_ofs;
202	u32		idigest_ofs;
203	u32		din_ofs;
204	u32		digcnt_ofs;
205	u32		rev_ofs;
206	u32		mask_ofs;
207	u32		sysstatus_ofs;
208	u32		mode_ofs;
209	u32		length_ofs;
210
211	u32		major_mask;
212	u32		major_shift;
213	u32		minor_mask;
214	u32		minor_shift;
215};
216
217struct omap_sham_dev {
218	struct list_head	list;
219	unsigned long		phys_base;
220	struct device		*dev;
221	void __iomem		*io_base;
222	int			irq;
223	int			err;
224	struct dma_chan		*dma_lch;
225	struct tasklet_struct	done_task;
226	u8			polling_mode;
227	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
228
229	unsigned long		flags;
230	int			fallback_sz;
231	struct crypto_queue	queue;
232	struct ahash_request	*req;
233	struct crypto_engine	*engine;
234
235	const struct omap_sham_pdata	*pdata;
236};
237
238struct omap_sham_drv {
239	struct list_head	dev_list;
240	spinlock_t		lock;
241	unsigned long		flags;
242};
243
244static struct omap_sham_drv sham = {
245	.dev_list = LIST_HEAD_INIT(sham.dev_list),
246	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
247};
248
249static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
250static void omap_sham_finish_req(struct ahash_request *req, int err);
251
252static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
253{
254	return __raw_readl(dd->io_base + offset);
255}
256
257static inline void omap_sham_write(struct omap_sham_dev *dd,
258					u32 offset, u32 value)
259{
260	__raw_writel(value, dd->io_base + offset);
261}
262
263static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
264					u32 value, u32 mask)
265{
266	u32 val;
267
268	val = omap_sham_read(dd, address);
269	val &= ~mask;
270	val |= value;
271	omap_sham_write(dd, address, val);
272}
273
274static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
275{
276	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
277
278	while (!(omap_sham_read(dd, offset) & bit)) {
279		if (time_is_before_jiffies(timeout))
280			return -ETIMEDOUT;
281	}
282
283	return 0;
284}
285
286static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
287{
288	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
289	struct omap_sham_dev *dd = ctx->dd;
290	u32 *hash = (u32 *)ctx->digest;
291	int i;
292
293	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
294		if (out)
295			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
296		else
297			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
298	}
299}
300
301static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
302{
303	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
304	struct omap_sham_dev *dd = ctx->dd;
305	int i;
306
307	if (ctx->flags & BIT(FLAGS_HMAC)) {
308		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
309		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
310		struct omap_sham_hmac_ctx *bctx = tctx->base;
311		u32 *opad = (u32 *)bctx->opad;
312
313		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
314			if (out)
315				opad[i] = omap_sham_read(dd,
316						SHA_REG_ODIGEST(dd, i));
317			else
318				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
319						opad[i]);
320		}
321	}
322
323	omap_sham_copy_hash_omap2(req, out);
324}
325
326static void omap_sham_copy_ready_hash(struct ahash_request *req)
327{
328	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
329	u32 *in = (u32 *)ctx->digest;
330	u32 *hash = (u32 *)req->result;
331	int i, d, big_endian = 0;
332
333	if (!hash)
334		return;
335
336	switch (ctx->flags & FLAGS_MODE_MASK) {
337	case FLAGS_MODE_MD5:
338		d = MD5_DIGEST_SIZE / sizeof(u32);
339		break;
340	case FLAGS_MODE_SHA1:
341		/* OMAP2 SHA1 is big endian */
342		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
343			big_endian = 1;
344		d = SHA1_DIGEST_SIZE / sizeof(u32);
345		break;
346	case FLAGS_MODE_SHA224:
347		d = SHA224_DIGEST_SIZE / sizeof(u32);
348		break;
349	case FLAGS_MODE_SHA256:
350		d = SHA256_DIGEST_SIZE / sizeof(u32);
351		break;
352	case FLAGS_MODE_SHA384:
353		d = SHA384_DIGEST_SIZE / sizeof(u32);
354		break;
355	case FLAGS_MODE_SHA512:
356		d = SHA512_DIGEST_SIZE / sizeof(u32);
357		break;
358	default:
359		d = 0;
360	}
361
362	if (big_endian)
363		for (i = 0; i < d; i++)
364			hash[i] = be32_to_cpup((__be32 *)in + i);
365	else
366		for (i = 0; i < d; i++)
367			hash[i] = le32_to_cpup((__le32 *)in + i);
368}
369
370static int omap_sham_hw_init(struct omap_sham_dev *dd)
371{
372	int err;
373
374	err = pm_runtime_resume_and_get(dd->dev);
375	if (err < 0) {
376		dev_err(dd->dev, "failed to get sync: %d\n", err);
377		return err;
378	}
379
380	if (!test_bit(FLAGS_INIT, &dd->flags)) {
381		set_bit(FLAGS_INIT, &dd->flags);
382		dd->err = 0;
383	}
384
385	return 0;
386}
387
388static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
389				 int final, int dma)
390{
391	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
392	u32 val = length << 5, mask;
393
394	if (likely(ctx->digcnt))
395		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
396
397	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
398		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
399		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
400	/*
401	 * Setting ALGO_CONST only for the first iteration
402	 * and CLOSE_HASH only for the last one.
403	 */
404	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
405		val |= SHA_REG_CTRL_ALGO;
406	if (!ctx->digcnt)
407		val |= SHA_REG_CTRL_ALGO_CONST;
408	if (final)
409		val |= SHA_REG_CTRL_CLOSE_HASH;
410
411	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
412			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
413
414	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
415}
416
417static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
418{
419}
420
421static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
422{
423	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
424}
425
426static int get_block_size(struct omap_sham_reqctx *ctx)
427{
428	int d;
429
430	switch (ctx->flags & FLAGS_MODE_MASK) {
431	case FLAGS_MODE_MD5:
432	case FLAGS_MODE_SHA1:
433		d = SHA1_BLOCK_SIZE;
434		break;
435	case FLAGS_MODE_SHA224:
436	case FLAGS_MODE_SHA256:
437		d = SHA256_BLOCK_SIZE;
438		break;
439	case FLAGS_MODE_SHA384:
440	case FLAGS_MODE_SHA512:
441		d = SHA512_BLOCK_SIZE;
442		break;
443	default:
444		d = 0;
445	}
446
447	return d;
448}
449
450static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
451				    u32 *value, int count)
452{
453	for (; count--; value++, offset += 4)
454		omap_sham_write(dd, offset, *value);
455}
456
457static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
458				 int final, int dma)
459{
460	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
461	u32 val, mask;
462
463	if (likely(ctx->digcnt))
464		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
465
466	/*
467	 * Setting ALGO_CONST only for the first iteration and
468	 * CLOSE_HASH only for the last one. Note that flags mode bits
469	 * correspond to algorithm encoding in mode register.
470	 */
471	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
472	if (!ctx->digcnt) {
473		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
474		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
475		struct omap_sham_hmac_ctx *bctx = tctx->base;
476		int bs, nr_dr;
477
478		val |= SHA_REG_MODE_ALGO_CONSTANT;
479
480		if (ctx->flags & BIT(FLAGS_HMAC)) {
481			bs = get_block_size(ctx);
482			nr_dr = bs / (2 * sizeof(u32));
483			val |= SHA_REG_MODE_HMAC_KEY_PROC;
484			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
485					  (u32 *)bctx->ipad, nr_dr);
486			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
487					  (u32 *)bctx->ipad + nr_dr, nr_dr);
488			ctx->digcnt += bs;
489		}
490	}
491
492	if (final) {
493		val |= SHA_REG_MODE_CLOSE_HASH;
494
495		if (ctx->flags & BIT(FLAGS_HMAC))
496			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
497	}
498
499	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
500	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
501	       SHA_REG_MODE_HMAC_KEY_PROC;
502
503	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
504	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
505	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
506	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
507			     SHA_REG_MASK_IT_EN |
508				     (dma ? SHA_REG_MASK_DMA_EN : 0),
509			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
510}
511
512static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
513{
514	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
515}
516
517static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
518{
519	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
520			      SHA_REG_IRQSTATUS_INPUT_RDY);
521}
522
523static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
524			      int final)
525{
526	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
527	int count, len32, bs32, offset = 0;
528	const u32 *buffer;
529	int mlen;
530	struct sg_mapping_iter mi;
531
532	dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
533						ctx->digcnt, length, final);
534
535	dd->pdata->write_ctrl(dd, length, final, 0);
536	dd->pdata->trigger(dd, length);
537
538	/* should be non-zero before next lines to disable clocks later */
539	ctx->digcnt += length;
540	ctx->total -= length;
541
542	if (final)
543		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
544
545	set_bit(FLAGS_CPU, &dd->flags);
546
547	len32 = DIV_ROUND_UP(length, sizeof(u32));
548	bs32 = get_block_size(ctx) / sizeof(u32);
549
550	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
551		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
552
553	mlen = 0;
554
555	while (len32) {
556		if (dd->pdata->poll_irq(dd))
557			return -ETIMEDOUT;
558
559		for (count = 0; count < min(len32, bs32); count++, offset++) {
560			if (!mlen) {
561				sg_miter_next(&mi);
562				mlen = mi.length;
563				if (!mlen) {
564					pr_err("sg miter failure.\n");
565					return -EINVAL;
566				}
567				offset = 0;
568				buffer = mi.addr;
569			}
570			omap_sham_write(dd, SHA_REG_DIN(dd, count),
571					buffer[offset]);
572			mlen -= 4;
573		}
574		len32 -= min(len32, bs32);
575	}
576
577	sg_miter_stop(&mi);
578
579	return -EINPROGRESS;
580}
581
582static void omap_sham_dma_callback(void *param)
583{
584	struct omap_sham_dev *dd = param;
585
586	set_bit(FLAGS_DMA_READY, &dd->flags);
587	tasklet_schedule(&dd->done_task);
588}
589
590static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
591			      int final)
592{
593	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
594	struct dma_async_tx_descriptor *tx;
595	struct dma_slave_config cfg;
596	int ret;
597
598	dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
599						ctx->digcnt, length, final);
600
601	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
602		dev_err(dd->dev, "dma_map_sg error\n");
603		return -EINVAL;
604	}
605
606	memset(&cfg, 0, sizeof(cfg));
607
608	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
609	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
610	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
611
612	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
613	if (ret) {
614		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
615		return ret;
616	}
617
618	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
619				     DMA_MEM_TO_DEV,
620				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
621
622	if (!tx) {
623		dev_err(dd->dev, "prep_slave_sg failed\n");
624		return -EINVAL;
625	}
626
627	tx->callback = omap_sham_dma_callback;
628	tx->callback_param = dd;
629
630	dd->pdata->write_ctrl(dd, length, final, 1);
631
632	ctx->digcnt += length;
633	ctx->total -= length;
634
635	if (final)
636		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
637
638	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
639
640	dmaengine_submit(tx);
641	dma_async_issue_pending(dd->dma_lch);
642
643	dd->pdata->trigger(dd, length);
644
645	return -EINPROGRESS;
646}
647
648static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
649				   struct scatterlist *sg, int bs, int new_len)
650{
651	int n = sg_nents(sg);
652	struct scatterlist *tmp;
653	int offset = ctx->offset;
654
655	ctx->total = new_len;
656
657	if (ctx->bufcnt)
658		n++;
659
660	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
661	if (!ctx->sg)
662		return -ENOMEM;
663
664	sg_init_table(ctx->sg, n);
665
666	tmp = ctx->sg;
667
668	ctx->sg_len = 0;
669
670	if (ctx->bufcnt) {
671		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
672		tmp = sg_next(tmp);
673		ctx->sg_len++;
674		new_len -= ctx->bufcnt;
675	}
676
677	while (sg && new_len) {
678		int len = sg->length - offset;
679
680		if (len <= 0) {
681			offset -= sg->length;
682			sg = sg_next(sg);
683			continue;
684		}
685
686		if (new_len < len)
687			len = new_len;
688
689		if (len > 0) {
690			new_len -= len;
691			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
692			offset = 0;
693			ctx->offset = 0;
694			ctx->sg_len++;
695			if (new_len <= 0)
696				break;
697			tmp = sg_next(tmp);
698		}
699
700		sg = sg_next(sg);
701	}
702
703	if (tmp)
704		sg_mark_end(tmp);
705
706	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
707
708	ctx->offset += new_len - ctx->bufcnt;
709	ctx->bufcnt = 0;
710
711	return 0;
712}
713
714static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
715			      struct scatterlist *sg, int bs,
716			      unsigned int new_len)
717{
718	int pages;
719	void *buf;
720
721	pages = get_order(new_len);
722
723	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
724	if (!buf) {
725		pr_err("Couldn't allocate pages for unaligned cases.\n");
726		return -ENOMEM;
727	}
728
729	if (ctx->bufcnt)
730		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
731
732	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
733				 min(new_len, ctx->total) - ctx->bufcnt, 0);
734	sg_init_table(ctx->sgl, 1);
735	sg_set_buf(ctx->sgl, buf, new_len);
736	ctx->sg = ctx->sgl;
737	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
738	ctx->sg_len = 1;
739	ctx->offset += new_len - ctx->bufcnt;
740	ctx->bufcnt = 0;
741	ctx->total = new_len;
742
743	return 0;
744}
745
746static int omap_sham_align_sgs(struct scatterlist *sg,
747			       int nbytes, int bs, bool final,
748			       struct omap_sham_reqctx *rctx)
749{
750	int n = 0;
751	bool aligned = true;
752	bool list_ok = true;
753	struct scatterlist *sg_tmp = sg;
754	int new_len;
755	int offset = rctx->offset;
756	int bufcnt = rctx->bufcnt;
757
758	if (!sg || !sg->length || !nbytes) {
759		if (bufcnt) {
760			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
761			sg_init_table(rctx->sgl, 1);
762			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
763			rctx->sg = rctx->sgl;
764			rctx->sg_len = 1;
765		}
766
767		return 0;
768	}
769
770	new_len = nbytes;
771
772	if (offset)
773		list_ok = false;
774
775	if (final)
776		new_len = DIV_ROUND_UP(new_len, bs) * bs;
777	else
778		new_len = (new_len - 1) / bs * bs;
779
780	if (!new_len)
781		return 0;
782
783	if (nbytes != new_len)
784		list_ok = false;
785
786	while (nbytes > 0 && sg_tmp) {
787		n++;
788
789		if (bufcnt) {
790			if (!IS_ALIGNED(bufcnt, bs)) {
791				aligned = false;
792				break;
793			}
794			nbytes -= bufcnt;
795			bufcnt = 0;
796			if (!nbytes)
797				list_ok = false;
798
799			continue;
800		}
801
802#ifdef CONFIG_ZONE_DMA
803		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
804			aligned = false;
805			break;
806		}
807#endif
808
809		if (offset < sg_tmp->length) {
810			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
811				aligned = false;
812				break;
813			}
814
815			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
816				aligned = false;
817				break;
818			}
819		}
820
821		if (offset) {
822			offset -= sg_tmp->length;
823			if (offset < 0) {
824				nbytes += offset;
825				offset = 0;
826			}
827		} else {
828			nbytes -= sg_tmp->length;
829		}
830
831		sg_tmp = sg_next(sg_tmp);
832
833		if (nbytes < 0) {
834			list_ok = false;
835			break;
836		}
837	}
838
839	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
840		new_len = OMAP_SHA_MAX_DMA_LEN;
841		aligned = false;
842	}
843
844	if (!aligned)
845		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
846	else if (!list_ok)
847		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
848
849	rctx->total = new_len;
850	rctx->offset += new_len;
851	rctx->sg_len = n;
852	if (rctx->bufcnt) {
853		sg_init_table(rctx->sgl, 2);
854		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
855		sg_chain(rctx->sgl, 2, sg);
856		rctx->sg = rctx->sgl;
857	} else {
858		rctx->sg = sg;
859	}
860
861	return 0;
862}
863
864static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
865{
866	struct ahash_request *req = container_of(areq, struct ahash_request,
867						 base);
868	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
869	int bs;
870	int ret;
871	unsigned int nbytes;
872	bool final = rctx->flags & BIT(FLAGS_FINUP);
873	bool update = rctx->op == OP_UPDATE;
874	int hash_later;
875
876	bs = get_block_size(rctx);
877
878	nbytes = rctx->bufcnt;
879
880	if (update)
881		nbytes += req->nbytes - rctx->offset;
882
883	dev_dbg(rctx->dd->dev,
884		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
885		__func__, nbytes, bs, rctx->total, rctx->offset,
886		rctx->bufcnt);
887
888	if (!nbytes)
889		return 0;
890
891	rctx->total = nbytes;
892
893	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
894		int len = bs - rctx->bufcnt % bs;
895
896		if (len > req->nbytes)
897			len = req->nbytes;
898		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
899					 0, len, 0);
900		rctx->bufcnt += len;
901		rctx->offset = len;
902	}
903
904	if (rctx->bufcnt)
905		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
906
907	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
908	if (ret)
909		return ret;
910
911	hash_later = nbytes - rctx->total;
912	if (hash_later < 0)
913		hash_later = 0;
914
915	if (hash_later && hash_later <= rctx->buflen) {
916		scatterwalk_map_and_copy(rctx->buffer,
917					 req->src,
918					 req->nbytes - hash_later,
919					 hash_later, 0);
920
921		rctx->bufcnt = hash_later;
922	} else {
923		rctx->bufcnt = 0;
924	}
925
926	if (hash_later > rctx->buflen)
927		set_bit(FLAGS_HUGE, &rctx->dd->flags);
928
929	rctx->total = min(nbytes, rctx->total);
930
931	return 0;
932}
933
934static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
935{
936	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
937
938	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
939
940	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
941
942	return 0;
943}
944
945static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
946{
947	struct omap_sham_dev *dd;
948
949	if (ctx->dd)
950		return ctx->dd;
951
952	spin_lock_bh(&sham.lock);
953	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
954	list_move_tail(&dd->list, &sham.dev_list);
955	ctx->dd = dd;
956	spin_unlock_bh(&sham.lock);
957
958	return dd;
959}
960
961static int omap_sham_init(struct ahash_request *req)
962{
963	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
964	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
965	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
966	struct omap_sham_dev *dd;
967	int bs = 0;
968
969	ctx->dd = NULL;
970
971	dd = omap_sham_find_dev(ctx);
972	if (!dd)
973		return -ENODEV;
974
975	ctx->flags = 0;
976
977	dev_dbg(dd->dev, "init: digest size: %d\n",
978		crypto_ahash_digestsize(tfm));
979
980	switch (crypto_ahash_digestsize(tfm)) {
981	case MD5_DIGEST_SIZE:
982		ctx->flags |= FLAGS_MODE_MD5;
983		bs = SHA1_BLOCK_SIZE;
984		break;
985	case SHA1_DIGEST_SIZE:
986		ctx->flags |= FLAGS_MODE_SHA1;
987		bs = SHA1_BLOCK_SIZE;
988		break;
989	case SHA224_DIGEST_SIZE:
990		ctx->flags |= FLAGS_MODE_SHA224;
991		bs = SHA224_BLOCK_SIZE;
992		break;
993	case SHA256_DIGEST_SIZE:
994		ctx->flags |= FLAGS_MODE_SHA256;
995		bs = SHA256_BLOCK_SIZE;
996		break;
997	case SHA384_DIGEST_SIZE:
998		ctx->flags |= FLAGS_MODE_SHA384;
999		bs = SHA384_BLOCK_SIZE;
1000		break;
1001	case SHA512_DIGEST_SIZE:
1002		ctx->flags |= FLAGS_MODE_SHA512;
1003		bs = SHA512_BLOCK_SIZE;
1004		break;
1005	}
1006
1007	ctx->bufcnt = 0;
1008	ctx->digcnt = 0;
1009	ctx->total = 0;
1010	ctx->offset = 0;
1011	ctx->buflen = BUFLEN;
1012
1013	if (tctx->flags & BIT(FLAGS_HMAC)) {
1014		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1015			struct omap_sham_hmac_ctx *bctx = tctx->base;
1016
1017			memcpy(ctx->buffer, bctx->ipad, bs);
1018			ctx->bufcnt = bs;
1019		}
1020
1021		ctx->flags |= BIT(FLAGS_HMAC);
1022	}
1023
1024	return 0;
1025
1026}
1027
1028static int omap_sham_update_req(struct omap_sham_dev *dd)
1029{
1030	struct ahash_request *req = dd->req;
1031	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1032	int err;
1033	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1034		!(dd->flags & BIT(FLAGS_HUGE));
1035
1036	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1037		ctx->total, ctx->digcnt, final);
1038
1039	if (ctx->total < get_block_size(ctx) ||
1040	    ctx->total < dd->fallback_sz)
1041		ctx->flags |= BIT(FLAGS_CPU);
1042
1043	if (ctx->flags & BIT(FLAGS_CPU))
1044		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1045	else
1046		err = omap_sham_xmit_dma(dd, ctx->total, final);
1047
1048	/* wait for dma completion before can take more data */
1049	dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1050
1051	return err;
1052}
1053
1054static int omap_sham_final_req(struct omap_sham_dev *dd)
1055{
1056	struct ahash_request *req = dd->req;
1057	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1058	int err = 0, use_dma = 1;
1059
1060	if (dd->flags & BIT(FLAGS_HUGE))
1061		return 0;
1062
1063	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1064		/*
1065		 * faster to handle last block with cpu or
1066		 * use cpu when dma is not present.
1067		 */
1068		use_dma = 0;
1069
1070	if (use_dma)
1071		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1072	else
1073		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1074
1075	ctx->bufcnt = 0;
1076
1077	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1078
1079	return err;
1080}
1081
1082static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1083{
1084	struct ahash_request *req = container_of(areq, struct ahash_request,
1085						 base);
1086	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1087	struct omap_sham_dev *dd = ctx->dd;
1088	int err;
1089	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1090			!(dd->flags & BIT(FLAGS_HUGE));
1091
1092	dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1093		ctx->op, ctx->total, ctx->digcnt, final);
1094
1095	dd->req = req;
1096
1097	err = omap_sham_hw_init(dd);
1098	if (err)
1099		return err;
1100
1101	if (ctx->digcnt)
1102		dd->pdata->copy_hash(req, 0);
1103
1104	if (ctx->op == OP_UPDATE)
1105		err = omap_sham_update_req(dd);
1106	else if (ctx->op == OP_FINAL)
1107		err = omap_sham_final_req(dd);
1108
1109	if (err != -EINPROGRESS)
1110		omap_sham_finish_req(req, err);
1111
1112	return 0;
1113}
1114
1115static int omap_sham_finish_hmac(struct ahash_request *req)
1116{
1117	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1118	struct omap_sham_hmac_ctx *bctx = tctx->base;
1119	int bs = crypto_shash_blocksize(bctx->shash);
1120	int ds = crypto_shash_digestsize(bctx->shash);
1121	SHASH_DESC_ON_STACK(shash, bctx->shash);
1122
1123	shash->tfm = bctx->shash;
1124
1125	return crypto_shash_init(shash) ?:
1126	       crypto_shash_update(shash, bctx->opad, bs) ?:
1127	       crypto_shash_finup(shash, req->result, ds, req->result);
1128}
1129
1130static int omap_sham_finish(struct ahash_request *req)
1131{
1132	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1133	struct omap_sham_dev *dd = ctx->dd;
1134	int err = 0;
1135
1136	if (ctx->digcnt) {
1137		omap_sham_copy_ready_hash(req);
1138		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1139				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1140			err = omap_sham_finish_hmac(req);
1141	}
1142
1143	dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1144
1145	return err;
1146}
1147
1148static void omap_sham_finish_req(struct ahash_request *req, int err)
1149{
1150	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1151	struct omap_sham_dev *dd = ctx->dd;
1152
1153	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1154		free_pages((unsigned long)sg_virt(ctx->sg),
1155			   get_order(ctx->sg->length));
1156
1157	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1158		kfree(ctx->sg);
1159
1160	ctx->sg = NULL;
1161
1162	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1163		       BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1164		       BIT(FLAGS_OUTPUT_READY));
1165
1166	if (!err)
1167		dd->pdata->copy_hash(req, 1);
1168
1169	if (dd->flags & BIT(FLAGS_HUGE)) {
1170		/* Re-enqueue the request */
1171		omap_sham_enqueue(req, ctx->op);
1172		return;
1173	}
1174
1175	if (!err) {
1176		if (test_bit(FLAGS_FINAL, &dd->flags))
1177			err = omap_sham_finish(req);
1178	} else {
1179		ctx->flags |= BIT(FLAGS_ERROR);
1180	}
1181
1182	/* atomic operation is not needed here */
1183	dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1184			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1185
1186	pm_runtime_mark_last_busy(dd->dev);
1187	pm_runtime_put_autosuspend(dd->dev);
1188
1189	ctx->offset = 0;
1190
1191	crypto_finalize_hash_request(dd->engine, req, err);
1192}
1193
1194static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1195				  struct ahash_request *req)
1196{
1197	return crypto_transfer_hash_request_to_engine(dd->engine, req);
1198}
1199
1200static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1201{
1202	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1203	struct omap_sham_dev *dd = ctx->dd;
1204
1205	ctx->op = op;
1206
1207	return omap_sham_handle_queue(dd, req);
1208}
1209
1210static int omap_sham_update(struct ahash_request *req)
1211{
1212	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1213	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1214
1215	if (!req->nbytes)
1216		return 0;
1217
1218	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1219		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1220					 0, req->nbytes, 0);
1221		ctx->bufcnt += req->nbytes;
1222		return 0;
1223	}
1224
1225	if (dd->polling_mode)
1226		ctx->flags |= BIT(FLAGS_CPU);
1227
1228	return omap_sham_enqueue(req, OP_UPDATE);
1229}
1230
1231static int omap_sham_final_shash(struct ahash_request *req)
1232{
1233	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1234	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1235	int offset = 0;
1236
1237	/*
1238	 * If we are running HMAC on limited hardware support, skip
1239	 * the ipad in the beginning of the buffer if we are going for
1240	 * software fallback algorithm.
1241	 */
1242	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1243	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1244		offset = get_block_size(ctx);
1245
1246	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1247				       ctx->bufcnt - offset, req->result);
1248}
1249
1250static int omap_sham_final(struct ahash_request *req)
1251{
1252	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1253
1254	ctx->flags |= BIT(FLAGS_FINUP);
1255
1256	if (ctx->flags & BIT(FLAGS_ERROR))
1257		return 0; /* uncompleted hash is not needed */
1258
1259	/*
1260	 * OMAP HW accel works only with buffers >= 9.
1261	 * HMAC is always >= 9 because ipad == block size.
1262	 * If buffersize is less than fallback_sz, we use fallback
1263	 * SW encoding, as using DMA + HW in this case doesn't provide
1264	 * any benefit.
1265	 */
1266	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1267		return omap_sham_final_shash(req);
1268	else if (ctx->bufcnt)
1269		return omap_sham_enqueue(req, OP_FINAL);
1270
1271	/* copy ready hash (+ finalize hmac) */
1272	return omap_sham_finish(req);
1273}
1274
1275static int omap_sham_finup(struct ahash_request *req)
1276{
1277	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1278	int err1, err2;
1279
1280	ctx->flags |= BIT(FLAGS_FINUP);
1281
1282	err1 = omap_sham_update(req);
1283	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1284		return err1;
1285	/*
1286	 * final() has to be always called to cleanup resources
1287	 * even if udpate() failed, except EINPROGRESS
1288	 */
1289	err2 = omap_sham_final(req);
1290
1291	return err1 ?: err2;
1292}
1293
1294static int omap_sham_digest(struct ahash_request *req)
1295{
1296	return omap_sham_init(req) ?: omap_sham_finup(req);
1297}
1298
1299static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1300		      unsigned int keylen)
1301{
1302	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1303	struct omap_sham_hmac_ctx *bctx = tctx->base;
1304	int bs = crypto_shash_blocksize(bctx->shash);
1305	int ds = crypto_shash_digestsize(bctx->shash);
1306	int err, i;
1307
1308	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1309	if (err)
1310		return err;
1311
1312	if (keylen > bs) {
1313		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1314					      bctx->ipad);
1315		if (err)
1316			return err;
1317		keylen = ds;
1318	} else {
1319		memcpy(bctx->ipad, key, keylen);
1320	}
1321
1322	memset(bctx->ipad + keylen, 0, bs - keylen);
1323
1324	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1325		memcpy(bctx->opad, bctx->ipad, bs);
1326
1327		for (i = 0; i < bs; i++) {
1328			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1329			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1330		}
1331	}
1332
1333	return err;
1334}
1335
1336static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1337{
1338	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1339	const char *alg_name = crypto_tfm_alg_name(tfm);
1340
1341	/* Allocate a fallback and abort if it failed. */
1342	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1343					    CRYPTO_ALG_NEED_FALLBACK);
1344	if (IS_ERR(tctx->fallback)) {
1345		pr_err("omap-sham: fallback driver '%s' "
1346				"could not be loaded.\n", alg_name);
1347		return PTR_ERR(tctx->fallback);
1348	}
1349
1350	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1351				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1352
1353	if (alg_base) {
1354		struct omap_sham_hmac_ctx *bctx = tctx->base;
1355		tctx->flags |= BIT(FLAGS_HMAC);
1356		bctx->shash = crypto_alloc_shash(alg_base, 0,
1357						CRYPTO_ALG_NEED_FALLBACK);
1358		if (IS_ERR(bctx->shash)) {
1359			pr_err("omap-sham: base driver '%s' "
1360					"could not be loaded.\n", alg_base);
1361			crypto_free_shash(tctx->fallback);
1362			return PTR_ERR(bctx->shash);
1363		}
1364
1365	}
1366
1367	tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
1368	tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
1369	tctx->enginectx.op.unprepare_request = NULL;
1370
1371	return 0;
1372}
1373
1374static int omap_sham_cra_init(struct crypto_tfm *tfm)
1375{
1376	return omap_sham_cra_init_alg(tfm, NULL);
1377}
1378
1379static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1380{
1381	return omap_sham_cra_init_alg(tfm, "sha1");
1382}
1383
1384static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1385{
1386	return omap_sham_cra_init_alg(tfm, "sha224");
1387}
1388
1389static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1390{
1391	return omap_sham_cra_init_alg(tfm, "sha256");
1392}
1393
1394static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1395{
1396	return omap_sham_cra_init_alg(tfm, "md5");
1397}
1398
1399static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1400{
1401	return omap_sham_cra_init_alg(tfm, "sha384");
1402}
1403
1404static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1405{
1406	return omap_sham_cra_init_alg(tfm, "sha512");
1407}
1408
1409static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1410{
1411	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1412
1413	crypto_free_shash(tctx->fallback);
1414	tctx->fallback = NULL;
1415
1416	if (tctx->flags & BIT(FLAGS_HMAC)) {
1417		struct omap_sham_hmac_ctx *bctx = tctx->base;
1418		crypto_free_shash(bctx->shash);
1419	}
1420}
1421
1422static int omap_sham_export(struct ahash_request *req, void *out)
1423{
1424	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1425
1426	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1427
1428	return 0;
1429}
1430
1431static int omap_sham_import(struct ahash_request *req, const void *in)
1432{
1433	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1434	const struct omap_sham_reqctx *ctx_in = in;
1435
1436	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1437
1438	return 0;
1439}
1440
1441static struct ahash_alg algs_sha1_md5[] = {
1442{
1443	.init		= omap_sham_init,
1444	.update		= omap_sham_update,
1445	.final		= omap_sham_final,
1446	.finup		= omap_sham_finup,
1447	.digest		= omap_sham_digest,
1448	.halg.digestsize	= SHA1_DIGEST_SIZE,
1449	.halg.base	= {
1450		.cra_name		= "sha1",
1451		.cra_driver_name	= "omap-sha1",
1452		.cra_priority		= 400,
1453		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1454						CRYPTO_ALG_ASYNC |
1455						CRYPTO_ALG_NEED_FALLBACK,
1456		.cra_blocksize		= SHA1_BLOCK_SIZE,
1457		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1458		.cra_alignmask		= OMAP_ALIGN_MASK,
1459		.cra_module		= THIS_MODULE,
1460		.cra_init		= omap_sham_cra_init,
1461		.cra_exit		= omap_sham_cra_exit,
1462	}
1463},
1464{
1465	.init		= omap_sham_init,
1466	.update		= omap_sham_update,
1467	.final		= omap_sham_final,
1468	.finup		= omap_sham_finup,
1469	.digest		= omap_sham_digest,
1470	.halg.digestsize	= MD5_DIGEST_SIZE,
1471	.halg.base	= {
1472		.cra_name		= "md5",
1473		.cra_driver_name	= "omap-md5",
1474		.cra_priority		= 400,
1475		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1476						CRYPTO_ALG_ASYNC |
1477						CRYPTO_ALG_NEED_FALLBACK,
1478		.cra_blocksize		= SHA1_BLOCK_SIZE,
1479		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1480		.cra_alignmask		= OMAP_ALIGN_MASK,
1481		.cra_module		= THIS_MODULE,
1482		.cra_init		= omap_sham_cra_init,
1483		.cra_exit		= omap_sham_cra_exit,
1484	}
1485},
1486{
1487	.init		= omap_sham_init,
1488	.update		= omap_sham_update,
1489	.final		= omap_sham_final,
1490	.finup		= omap_sham_finup,
1491	.digest		= omap_sham_digest,
1492	.setkey		= omap_sham_setkey,
1493	.halg.digestsize	= SHA1_DIGEST_SIZE,
1494	.halg.base	= {
1495		.cra_name		= "hmac(sha1)",
1496		.cra_driver_name	= "omap-hmac-sha1",
1497		.cra_priority		= 400,
1498		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1499						CRYPTO_ALG_ASYNC |
1500						CRYPTO_ALG_NEED_FALLBACK,
1501		.cra_blocksize		= SHA1_BLOCK_SIZE,
1502		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1503					sizeof(struct omap_sham_hmac_ctx),
1504		.cra_alignmask		= OMAP_ALIGN_MASK,
1505		.cra_module		= THIS_MODULE,
1506		.cra_init		= omap_sham_cra_sha1_init,
1507		.cra_exit		= omap_sham_cra_exit,
1508	}
1509},
1510{
1511	.init		= omap_sham_init,
1512	.update		= omap_sham_update,
1513	.final		= omap_sham_final,
1514	.finup		= omap_sham_finup,
1515	.digest		= omap_sham_digest,
1516	.setkey		= omap_sham_setkey,
1517	.halg.digestsize	= MD5_DIGEST_SIZE,
1518	.halg.base	= {
1519		.cra_name		= "hmac(md5)",
1520		.cra_driver_name	= "omap-hmac-md5",
1521		.cra_priority		= 400,
1522		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1523						CRYPTO_ALG_ASYNC |
1524						CRYPTO_ALG_NEED_FALLBACK,
1525		.cra_blocksize		= SHA1_BLOCK_SIZE,
1526		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1527					sizeof(struct omap_sham_hmac_ctx),
1528		.cra_alignmask		= OMAP_ALIGN_MASK,
1529		.cra_module		= THIS_MODULE,
1530		.cra_init		= omap_sham_cra_md5_init,
1531		.cra_exit		= omap_sham_cra_exit,
1532	}
1533}
1534};
1535
1536/* OMAP4 has some algs in addition to what OMAP2 has */
1537static struct ahash_alg algs_sha224_sha256[] = {
1538{
1539	.init		= omap_sham_init,
1540	.update		= omap_sham_update,
1541	.final		= omap_sham_final,
1542	.finup		= omap_sham_finup,
1543	.digest		= omap_sham_digest,
1544	.halg.digestsize	= SHA224_DIGEST_SIZE,
1545	.halg.base	= {
1546		.cra_name		= "sha224",
1547		.cra_driver_name	= "omap-sha224",
1548		.cra_priority		= 400,
1549		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1550						CRYPTO_ALG_ASYNC |
1551						CRYPTO_ALG_NEED_FALLBACK,
1552		.cra_blocksize		= SHA224_BLOCK_SIZE,
1553		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1554		.cra_alignmask		= OMAP_ALIGN_MASK,
1555		.cra_module		= THIS_MODULE,
1556		.cra_init		= omap_sham_cra_init,
1557		.cra_exit		= omap_sham_cra_exit,
1558	}
1559},
1560{
1561	.init		= omap_sham_init,
1562	.update		= omap_sham_update,
1563	.final		= omap_sham_final,
1564	.finup		= omap_sham_finup,
1565	.digest		= omap_sham_digest,
1566	.halg.digestsize	= SHA256_DIGEST_SIZE,
1567	.halg.base	= {
1568		.cra_name		= "sha256",
1569		.cra_driver_name	= "omap-sha256",
1570		.cra_priority		= 400,
1571		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1572						CRYPTO_ALG_ASYNC |
1573						CRYPTO_ALG_NEED_FALLBACK,
1574		.cra_blocksize		= SHA256_BLOCK_SIZE,
1575		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1576		.cra_alignmask		= OMAP_ALIGN_MASK,
1577		.cra_module		= THIS_MODULE,
1578		.cra_init		= omap_sham_cra_init,
1579		.cra_exit		= omap_sham_cra_exit,
1580	}
1581},
1582{
1583	.init		= omap_sham_init,
1584	.update		= omap_sham_update,
1585	.final		= omap_sham_final,
1586	.finup		= omap_sham_finup,
1587	.digest		= omap_sham_digest,
1588	.setkey		= omap_sham_setkey,
1589	.halg.digestsize	= SHA224_DIGEST_SIZE,
1590	.halg.base	= {
1591		.cra_name		= "hmac(sha224)",
1592		.cra_driver_name	= "omap-hmac-sha224",
1593		.cra_priority		= 400,
1594		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1595						CRYPTO_ALG_ASYNC |
1596						CRYPTO_ALG_NEED_FALLBACK,
1597		.cra_blocksize		= SHA224_BLOCK_SIZE,
1598		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1599					sizeof(struct omap_sham_hmac_ctx),
1600		.cra_alignmask		= OMAP_ALIGN_MASK,
1601		.cra_module		= THIS_MODULE,
1602		.cra_init		= omap_sham_cra_sha224_init,
1603		.cra_exit		= omap_sham_cra_exit,
1604	}
1605},
1606{
1607	.init		= omap_sham_init,
1608	.update		= omap_sham_update,
1609	.final		= omap_sham_final,
1610	.finup		= omap_sham_finup,
1611	.digest		= omap_sham_digest,
1612	.setkey		= omap_sham_setkey,
1613	.halg.digestsize	= SHA256_DIGEST_SIZE,
1614	.halg.base	= {
1615		.cra_name		= "hmac(sha256)",
1616		.cra_driver_name	= "omap-hmac-sha256",
1617		.cra_priority		= 400,
1618		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1619						CRYPTO_ALG_ASYNC |
1620						CRYPTO_ALG_NEED_FALLBACK,
1621		.cra_blocksize		= SHA256_BLOCK_SIZE,
1622		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1623					sizeof(struct omap_sham_hmac_ctx),
1624		.cra_alignmask		= OMAP_ALIGN_MASK,
1625		.cra_module		= THIS_MODULE,
1626		.cra_init		= omap_sham_cra_sha256_init,
1627		.cra_exit		= omap_sham_cra_exit,
1628	}
1629},
1630};
1631
1632static struct ahash_alg algs_sha384_sha512[] = {
1633{
1634	.init		= omap_sham_init,
1635	.update		= omap_sham_update,
1636	.final		= omap_sham_final,
1637	.finup		= omap_sham_finup,
1638	.digest		= omap_sham_digest,
1639	.halg.digestsize	= SHA384_DIGEST_SIZE,
1640	.halg.base	= {
1641		.cra_name		= "sha384",
1642		.cra_driver_name	= "omap-sha384",
1643		.cra_priority		= 400,
1644		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1645						CRYPTO_ALG_ASYNC |
1646						CRYPTO_ALG_NEED_FALLBACK,
1647		.cra_blocksize		= SHA384_BLOCK_SIZE,
1648		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1649		.cra_alignmask		= OMAP_ALIGN_MASK,
1650		.cra_module		= THIS_MODULE,
1651		.cra_init		= omap_sham_cra_init,
1652		.cra_exit		= omap_sham_cra_exit,
1653	}
1654},
1655{
1656	.init		= omap_sham_init,
1657	.update		= omap_sham_update,
1658	.final		= omap_sham_final,
1659	.finup		= omap_sham_finup,
1660	.digest		= omap_sham_digest,
1661	.halg.digestsize	= SHA512_DIGEST_SIZE,
1662	.halg.base	= {
1663		.cra_name		= "sha512",
1664		.cra_driver_name	= "omap-sha512",
1665		.cra_priority		= 400,
1666		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1667						CRYPTO_ALG_ASYNC |
1668						CRYPTO_ALG_NEED_FALLBACK,
1669		.cra_blocksize		= SHA512_BLOCK_SIZE,
1670		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1671		.cra_alignmask		= OMAP_ALIGN_MASK,
1672		.cra_module		= THIS_MODULE,
1673		.cra_init		= omap_sham_cra_init,
1674		.cra_exit		= omap_sham_cra_exit,
1675	}
1676},
1677{
1678	.init		= omap_sham_init,
1679	.update		= omap_sham_update,
1680	.final		= omap_sham_final,
1681	.finup		= omap_sham_finup,
1682	.digest		= omap_sham_digest,
1683	.setkey		= omap_sham_setkey,
1684	.halg.digestsize	= SHA384_DIGEST_SIZE,
1685	.halg.base	= {
1686		.cra_name		= "hmac(sha384)",
1687		.cra_driver_name	= "omap-hmac-sha384",
1688		.cra_priority		= 400,
1689		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1690						CRYPTO_ALG_ASYNC |
1691						CRYPTO_ALG_NEED_FALLBACK,
1692		.cra_blocksize		= SHA384_BLOCK_SIZE,
1693		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1694					sizeof(struct omap_sham_hmac_ctx),
1695		.cra_alignmask		= OMAP_ALIGN_MASK,
1696		.cra_module		= THIS_MODULE,
1697		.cra_init		= omap_sham_cra_sha384_init,
1698		.cra_exit		= omap_sham_cra_exit,
1699	}
1700},
1701{
1702	.init		= omap_sham_init,
1703	.update		= omap_sham_update,
1704	.final		= omap_sham_final,
1705	.finup		= omap_sham_finup,
1706	.digest		= omap_sham_digest,
1707	.setkey		= omap_sham_setkey,
1708	.halg.digestsize	= SHA512_DIGEST_SIZE,
1709	.halg.base	= {
1710		.cra_name		= "hmac(sha512)",
1711		.cra_driver_name	= "omap-hmac-sha512",
1712		.cra_priority		= 400,
1713		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1714						CRYPTO_ALG_ASYNC |
1715						CRYPTO_ALG_NEED_FALLBACK,
1716		.cra_blocksize		= SHA512_BLOCK_SIZE,
1717		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1718					sizeof(struct omap_sham_hmac_ctx),
1719		.cra_alignmask		= OMAP_ALIGN_MASK,
1720		.cra_module		= THIS_MODULE,
1721		.cra_init		= omap_sham_cra_sha512_init,
1722		.cra_exit		= omap_sham_cra_exit,
1723	}
1724},
1725};
1726
1727static void omap_sham_done_task(unsigned long data)
1728{
1729	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1730	int err = 0;
1731
1732	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1733
1734	if (test_bit(FLAGS_CPU, &dd->flags)) {
1735		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1736			goto finish;
1737	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1738		if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1739			omap_sham_update_dma_stop(dd);
1740			if (dd->err) {
1741				err = dd->err;
1742				goto finish;
1743			}
1744		}
1745		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1746			/* hash or semi-hash ready */
1747			clear_bit(FLAGS_DMA_READY, &dd->flags);
1748			goto finish;
1749		}
1750	}
1751
1752	return;
1753
1754finish:
1755	dev_dbg(dd->dev, "update done: err: %d\n", err);
1756	/* finish curent request */
1757	omap_sham_finish_req(dd->req, err);
1758}
1759
1760static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1761{
1762	set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1763	tasklet_schedule(&dd->done_task);
1764
1765	return IRQ_HANDLED;
1766}
1767
1768static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1769{
1770	struct omap_sham_dev *dd = dev_id;
1771
1772	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1773		/* final -> allow device to go to power-saving mode */
1774		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1775
1776	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1777				 SHA_REG_CTRL_OUTPUT_READY);
1778	omap_sham_read(dd, SHA_REG_CTRL);
1779
1780	return omap_sham_irq_common(dd);
1781}
1782
1783static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1784{
1785	struct omap_sham_dev *dd = dev_id;
1786
1787	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1788
1789	return omap_sham_irq_common(dd);
1790}
1791
1792static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1793	{
1794		.algs_list	= algs_sha1_md5,
1795		.size		= ARRAY_SIZE(algs_sha1_md5),
1796	},
1797};
1798
1799static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1800	.algs_info	= omap_sham_algs_info_omap2,
1801	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1802	.flags		= BIT(FLAGS_BE32_SHA1),
1803	.digest_size	= SHA1_DIGEST_SIZE,
1804	.copy_hash	= omap_sham_copy_hash_omap2,
1805	.write_ctrl	= omap_sham_write_ctrl_omap2,
1806	.trigger	= omap_sham_trigger_omap2,
1807	.poll_irq	= omap_sham_poll_irq_omap2,
1808	.intr_hdlr	= omap_sham_irq_omap2,
1809	.idigest_ofs	= 0x00,
1810	.din_ofs	= 0x1c,
1811	.digcnt_ofs	= 0x14,
1812	.rev_ofs	= 0x5c,
1813	.mask_ofs	= 0x60,
1814	.sysstatus_ofs	= 0x64,
1815	.major_mask	= 0xf0,
1816	.major_shift	= 4,
1817	.minor_mask	= 0x0f,
1818	.minor_shift	= 0,
1819};
1820
1821#ifdef CONFIG_OF
1822static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1823	{
1824		.algs_list	= algs_sha1_md5,
1825		.size		= ARRAY_SIZE(algs_sha1_md5),
1826	},
1827	{
1828		.algs_list	= algs_sha224_sha256,
1829		.size		= ARRAY_SIZE(algs_sha224_sha256),
1830	},
1831};
1832
1833static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1834	.algs_info	= omap_sham_algs_info_omap4,
1835	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1836	.flags		= BIT(FLAGS_AUTO_XOR),
1837	.digest_size	= SHA256_DIGEST_SIZE,
1838	.copy_hash	= omap_sham_copy_hash_omap4,
1839	.write_ctrl	= omap_sham_write_ctrl_omap4,
1840	.trigger	= omap_sham_trigger_omap4,
1841	.poll_irq	= omap_sham_poll_irq_omap4,
1842	.intr_hdlr	= omap_sham_irq_omap4,
1843	.idigest_ofs	= 0x020,
1844	.odigest_ofs	= 0x0,
1845	.din_ofs	= 0x080,
1846	.digcnt_ofs	= 0x040,
1847	.rev_ofs	= 0x100,
1848	.mask_ofs	= 0x110,
1849	.sysstatus_ofs	= 0x114,
1850	.mode_ofs	= 0x44,
1851	.length_ofs	= 0x48,
1852	.major_mask	= 0x0700,
1853	.major_shift	= 8,
1854	.minor_mask	= 0x003f,
1855	.minor_shift	= 0,
1856};
1857
1858static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1859	{
1860		.algs_list	= algs_sha1_md5,
1861		.size		= ARRAY_SIZE(algs_sha1_md5),
1862	},
1863	{
1864		.algs_list	= algs_sha224_sha256,
1865		.size		= ARRAY_SIZE(algs_sha224_sha256),
1866	},
1867	{
1868		.algs_list	= algs_sha384_sha512,
1869		.size		= ARRAY_SIZE(algs_sha384_sha512),
1870	},
1871};
1872
1873static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1874	.algs_info	= omap_sham_algs_info_omap5,
1875	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1876	.flags		= BIT(FLAGS_AUTO_XOR),
1877	.digest_size	= SHA512_DIGEST_SIZE,
1878	.copy_hash	= omap_sham_copy_hash_omap4,
1879	.write_ctrl	= omap_sham_write_ctrl_omap4,
1880	.trigger	= omap_sham_trigger_omap4,
1881	.poll_irq	= omap_sham_poll_irq_omap4,
1882	.intr_hdlr	= omap_sham_irq_omap4,
1883	.idigest_ofs	= 0x240,
1884	.odigest_ofs	= 0x200,
1885	.din_ofs	= 0x080,
1886	.digcnt_ofs	= 0x280,
1887	.rev_ofs	= 0x100,
1888	.mask_ofs	= 0x110,
1889	.sysstatus_ofs	= 0x114,
1890	.mode_ofs	= 0x284,
1891	.length_ofs	= 0x288,
1892	.major_mask	= 0x0700,
1893	.major_shift	= 8,
1894	.minor_mask	= 0x003f,
1895	.minor_shift	= 0,
1896};
1897
1898static const struct of_device_id omap_sham_of_match[] = {
1899	{
1900		.compatible	= "ti,omap2-sham",
1901		.data		= &omap_sham_pdata_omap2,
1902	},
1903	{
1904		.compatible	= "ti,omap3-sham",
1905		.data		= &omap_sham_pdata_omap2,
1906	},
1907	{
1908		.compatible	= "ti,omap4-sham",
1909		.data		= &omap_sham_pdata_omap4,
1910	},
1911	{
1912		.compatible	= "ti,omap5-sham",
1913		.data		= &omap_sham_pdata_omap5,
1914	},
1915	{},
1916};
1917MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1918
1919static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1920		struct device *dev, struct resource *res)
1921{
1922	struct device_node *node = dev->of_node;
1923	int err = 0;
1924
1925	dd->pdata = of_device_get_match_data(dev);
1926	if (!dd->pdata) {
1927		dev_err(dev, "no compatible OF match\n");
1928		err = -EINVAL;
1929		goto err;
1930	}
1931
1932	err = of_address_to_resource(node, 0, res);
1933	if (err < 0) {
1934		dev_err(dev, "can't translate OF node address\n");
1935		err = -EINVAL;
1936		goto err;
1937	}
1938
1939	dd->irq = irq_of_parse_and_map(node, 0);
1940	if (!dd->irq) {
1941		dev_err(dev, "can't translate OF irq value\n");
1942		err = -EINVAL;
1943		goto err;
1944	}
1945
1946err:
1947	return err;
1948}
1949#else
1950static const struct of_device_id omap_sham_of_match[] = {
1951	{},
1952};
1953
1954static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1955		struct device *dev, struct resource *res)
1956{
1957	return -EINVAL;
1958}
1959#endif
1960
1961static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1962		struct platform_device *pdev, struct resource *res)
1963{
1964	struct device *dev = &pdev->dev;
1965	struct resource *r;
1966	int err = 0;
1967
1968	/* Get the base address */
1969	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1970	if (!r) {
1971		dev_err(dev, "no MEM resource info\n");
1972		err = -ENODEV;
1973		goto err;
1974	}
1975	memcpy(res, r, sizeof(*res));
1976
1977	/* Get the IRQ */
1978	dd->irq = platform_get_irq(pdev, 0);
1979	if (dd->irq < 0) {
1980		err = dd->irq;
1981		goto err;
1982	}
1983
1984	/* Only OMAP2/3 can be non-DT */
1985	dd->pdata = &omap_sham_pdata_omap2;
1986
1987err:
1988	return err;
1989}
1990
1991static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1992			     char *buf)
1993{
1994	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1995
1996	return sprintf(buf, "%d\n", dd->fallback_sz);
1997}
1998
1999static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2000			      const char *buf, size_t size)
2001{
2002	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2003	ssize_t status;
2004	long value;
2005
2006	status = kstrtol(buf, 0, &value);
2007	if (status)
2008		return status;
2009
2010	/* HW accelerator only works with buffers > 9 */
2011	if (value < 9) {
2012		dev_err(dev, "minimum fallback size 9\n");
2013		return -EINVAL;
2014	}
2015
2016	dd->fallback_sz = value;
2017
2018	return size;
2019}
2020
2021static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2022			      char *buf)
2023{
2024	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2025
2026	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2027}
2028
2029static ssize_t queue_len_store(struct device *dev,
2030			       struct device_attribute *attr, const char *buf,
2031			       size_t size)
2032{
2033	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2034	ssize_t status;
2035	long value;
2036
2037	status = kstrtol(buf, 0, &value);
2038	if (status)
2039		return status;
2040
2041	if (value < 1)
2042		return -EINVAL;
2043
2044	/*
2045	 * Changing the queue size in fly is safe, if size becomes smaller
2046	 * than current size, it will just not accept new entries until
2047	 * it has shrank enough.
2048	 */
2049	dd->queue.max_qlen = value;
2050
2051	return size;
2052}
2053
2054static DEVICE_ATTR_RW(queue_len);
2055static DEVICE_ATTR_RW(fallback);
2056
2057static struct attribute *omap_sham_attrs[] = {
2058	&dev_attr_queue_len.attr,
2059	&dev_attr_fallback.attr,
2060	NULL,
2061};
2062
2063static struct attribute_group omap_sham_attr_group = {
2064	.attrs = omap_sham_attrs,
2065};
2066
2067static int omap_sham_probe(struct platform_device *pdev)
2068{
2069	struct omap_sham_dev *dd;
2070	struct device *dev = &pdev->dev;
2071	struct resource res;
2072	dma_cap_mask_t mask;
2073	int err, i, j;
2074	u32 rev;
2075
2076	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2077	if (dd == NULL) {
2078		dev_err(dev, "unable to alloc data struct.\n");
2079		err = -ENOMEM;
2080		goto data_err;
2081	}
2082	dd->dev = dev;
2083	platform_set_drvdata(pdev, dd);
2084
2085	INIT_LIST_HEAD(&dd->list);
2086	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2087	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2088
2089	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2090			       omap_sham_get_res_pdev(dd, pdev, &res);
2091	if (err)
2092		goto data_err;
2093
2094	dd->io_base = devm_ioremap_resource(dev, &res);
2095	if (IS_ERR(dd->io_base)) {
2096		err = PTR_ERR(dd->io_base);
2097		goto data_err;
2098	}
2099	dd->phys_base = res.start;
2100
2101	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2102			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2103	if (err) {
2104		dev_err(dev, "unable to request irq %d, err = %d\n",
2105			dd->irq, err);
2106		goto data_err;
2107	}
2108
2109	dma_cap_zero(mask);
2110	dma_cap_set(DMA_SLAVE, mask);
2111
2112	dd->dma_lch = dma_request_chan(dev, "rx");
2113	if (IS_ERR(dd->dma_lch)) {
2114		err = PTR_ERR(dd->dma_lch);
2115		if (err == -EPROBE_DEFER)
2116			goto data_err;
2117
2118		dd->polling_mode = 1;
2119		dev_dbg(dev, "using polling mode instead of dma\n");
2120	}
2121
2122	dd->flags |= dd->pdata->flags;
2123	sham.flags |= dd->pdata->flags;
2124
2125	pm_runtime_use_autosuspend(dev);
2126	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2127
2128	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2129
2130	pm_runtime_enable(dev);
2131	pm_runtime_irq_safe(dev);
2132
2133	err = pm_runtime_resume_and_get(dev);
2134	if (err < 0) {
2135		dev_err(dev, "failed to get sync: %d\n", err);
2136		goto err_pm;
2137	}
2138
2139	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2140	pm_runtime_put_sync(&pdev->dev);
2141
2142	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2143		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2144		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2145
2146	spin_lock_bh(&sham.lock);
2147	list_add_tail(&dd->list, &sham.dev_list);
2148	spin_unlock_bh(&sham.lock);
2149
2150	dd->engine = crypto_engine_alloc_init(dev, 1);
2151	if (!dd->engine) {
2152		err = -ENOMEM;
2153		goto err_engine;
2154	}
2155
2156	err = crypto_engine_start(dd->engine);
2157	if (err)
2158		goto err_engine_start;
2159
2160	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2161		if (dd->pdata->algs_info[i].registered)
2162			break;
2163
2164		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2165			struct ahash_alg *alg;
2166
2167			alg = &dd->pdata->algs_info[i].algs_list[j];
2168			alg->export = omap_sham_export;
2169			alg->import = omap_sham_import;
2170			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2171					      BUFLEN;
2172			err = crypto_register_ahash(alg);
2173			if (err)
2174				goto err_algs;
2175
2176			dd->pdata->algs_info[i].registered++;
2177		}
2178	}
2179
2180	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2181	if (err) {
2182		dev_err(dev, "could not create sysfs device attrs\n");
2183		goto err_algs;
2184	}
2185
2186	return 0;
2187
2188err_algs:
2189	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2190		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2191			crypto_unregister_ahash(
2192					&dd->pdata->algs_info[i].algs_list[j]);
2193err_engine_start:
2194	crypto_engine_exit(dd->engine);
2195err_engine:
2196	spin_lock_bh(&sham.lock);
2197	list_del(&dd->list);
2198	spin_unlock_bh(&sham.lock);
2199err_pm:
2200	pm_runtime_disable(dev);
2201	if (!dd->polling_mode)
2202		dma_release_channel(dd->dma_lch);
2203data_err:
2204	dev_err(dev, "initialization failed.\n");
2205
2206	return err;
2207}
2208
2209static int omap_sham_remove(struct platform_device *pdev)
2210{
2211	struct omap_sham_dev *dd;
2212	int i, j;
2213
2214	dd = platform_get_drvdata(pdev);
2215	if (!dd)
2216		return -ENODEV;
2217	spin_lock_bh(&sham.lock);
2218	list_del(&dd->list);
2219	spin_unlock_bh(&sham.lock);
2220	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2221		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2222			crypto_unregister_ahash(
2223					&dd->pdata->algs_info[i].algs_list[j]);
2224			dd->pdata->algs_info[i].registered--;
2225		}
2226	tasklet_kill(&dd->done_task);
2227	pm_runtime_disable(&pdev->dev);
2228
2229	if (!dd->polling_mode)
2230		dma_release_channel(dd->dma_lch);
2231
2232	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2233
2234	return 0;
2235}
2236
2237#ifdef CONFIG_PM_SLEEP
2238static int omap_sham_suspend(struct device *dev)
2239{
2240	pm_runtime_put_sync(dev);
2241	return 0;
2242}
2243
2244static int omap_sham_resume(struct device *dev)
2245{
2246	int err = pm_runtime_resume_and_get(dev);
2247	if (err < 0) {
2248		dev_err(dev, "failed to get sync: %d\n", err);
2249		return err;
2250	}
2251	return 0;
2252}
2253#endif
2254
2255static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2256
2257static struct platform_driver omap_sham_driver = {
2258	.probe	= omap_sham_probe,
2259	.remove	= omap_sham_remove,
2260	.driver	= {
2261		.name	= "omap-sham",
2262		.pm	= &omap_sham_pm_ops,
2263		.of_match_table	= omap_sham_of_match,
2264	},
2265};
2266
2267module_platform_driver(omap_sham_driver);
2268
2269MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2270MODULE_LICENSE("GPL v2");
2271MODULE_AUTHOR("Dmitry Kasatkin");
2272MODULE_ALIAS("platform:omap-sham");
2273