1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
4 * that can be found on the following platform: Orion, Kirkwood, Armada. This
5 * driver supports the TDMA engine on platforms on which it is available.
6 *
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
8 * Author: Arnaud Ebalard <arno@natisbad.org>
9 *
10 * This work is based on an initial version written by
11 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 */
13
14#include <linux/delay.h>
15#include <linux/dma-mapping.h>
16#include <linux/genalloc.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/kthread.h>
20#include <linux/mbus.h>
21#include <linux/platform_device.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/clk.h>
26#include <linux/of.h>
27#include <linux/of_platform.h>
28#include <linux/of_irq.h>
29
30#include "cesa.h"
31
32/* Limit of the crypto queue before reaching the backlog */
33#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
34
35struct mv_cesa_dev *cesa_dev;
36
37struct crypto_async_request *
38mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
39			   struct crypto_async_request **backlog)
40{
41	struct crypto_async_request *req;
42
43	*backlog = crypto_get_backlog(&engine->queue);
44	req = crypto_dequeue_request(&engine->queue);
45
46	if (!req)
47		return NULL;
48
49	return req;
50}
51
52static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
53{
54	struct crypto_async_request *req = NULL, *backlog = NULL;
55	struct mv_cesa_ctx *ctx;
56
57
58	spin_lock_bh(&engine->lock);
59	if (!engine->req) {
60		req = mv_cesa_dequeue_req_locked(engine, &backlog);
61		engine->req = req;
62	}
63	spin_unlock_bh(&engine->lock);
64
65	if (!req)
66		return;
67
68	if (backlog)
69		backlog->complete(backlog, -EINPROGRESS);
70
71	ctx = crypto_tfm_ctx(req->tfm);
72	ctx->ops->step(req);
73}
74
75static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
76{
77	struct crypto_async_request *req;
78	struct mv_cesa_ctx *ctx;
79	int res;
80
81	req = engine->req;
82	ctx = crypto_tfm_ctx(req->tfm);
83	res = ctx->ops->process(req, status);
84
85	if (res == 0) {
86		ctx->ops->complete(req);
87		mv_cesa_engine_enqueue_complete_request(engine, req);
88	} else if (res == -EINPROGRESS) {
89		ctx->ops->step(req);
90	}
91
92	return res;
93}
94
95static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
96{
97	if (engine->chain.first && engine->chain.last)
98		return mv_cesa_tdma_process(engine, status);
99
100	return mv_cesa_std_process(engine, status);
101}
102
103static inline void
104mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
105		     int res)
106{
107	ctx->ops->cleanup(req);
108	local_bh_disable();
109	req->complete(req, res);
110	local_bh_enable();
111}
112
113static irqreturn_t mv_cesa_int(int irq, void *priv)
114{
115	struct mv_cesa_engine *engine = priv;
116	struct crypto_async_request *req;
117	struct mv_cesa_ctx *ctx;
118	u32 status, mask;
119	irqreturn_t ret = IRQ_NONE;
120
121	while (true) {
122		int res;
123
124		mask = mv_cesa_get_int_mask(engine);
125		status = readl(engine->regs + CESA_SA_INT_STATUS);
126
127		if (!(status & mask))
128			break;
129
130		/*
131		 * TODO: avoid clearing the FPGA_INT_STATUS if this not
132		 * relevant on some platforms.
133		 */
134		writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
135		writel(~status, engine->regs + CESA_SA_INT_STATUS);
136
137		/* Process fetched requests */
138		res = mv_cesa_int_process(engine, status & mask);
139		ret = IRQ_HANDLED;
140
141		spin_lock_bh(&engine->lock);
142		req = engine->req;
143		if (res != -EINPROGRESS)
144			engine->req = NULL;
145		spin_unlock_bh(&engine->lock);
146
147		ctx = crypto_tfm_ctx(req->tfm);
148
149		if (res && res != -EINPROGRESS)
150			mv_cesa_complete_req(ctx, req, res);
151
152		/* Launch the next pending request */
153		mv_cesa_rearm_engine(engine);
154
155		/* Iterate over the complete queue */
156		while (true) {
157			req = mv_cesa_engine_dequeue_complete_request(engine);
158			if (!req)
159				break;
160
161			ctx = crypto_tfm_ctx(req->tfm);
162			mv_cesa_complete_req(ctx, req, 0);
163		}
164	}
165
166	return ret;
167}
168
169int mv_cesa_queue_req(struct crypto_async_request *req,
170		      struct mv_cesa_req *creq)
171{
172	int ret;
173	struct mv_cesa_engine *engine = creq->engine;
174
175	spin_lock_bh(&engine->lock);
176	ret = crypto_enqueue_request(&engine->queue, req);
177	if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
178	    (ret == -EINPROGRESS || ret == -EBUSY))
179		mv_cesa_tdma_chain(engine, creq);
180	spin_unlock_bh(&engine->lock);
181
182	if (ret != -EINPROGRESS)
183		return ret;
184
185	mv_cesa_rearm_engine(engine);
186
187	return -EINPROGRESS;
188}
189
190static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
191{
192	int ret;
193	int i, j;
194
195	for (i = 0; i < cesa->caps->ncipher_algs; i++) {
196		ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
197		if (ret)
198			goto err_unregister_crypto;
199	}
200
201	for (i = 0; i < cesa->caps->nahash_algs; i++) {
202		ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
203		if (ret)
204			goto err_unregister_ahash;
205	}
206
207	return 0;
208
209err_unregister_ahash:
210	for (j = 0; j < i; j++)
211		crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
212	i = cesa->caps->ncipher_algs;
213
214err_unregister_crypto:
215	for (j = 0; j < i; j++)
216		crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
217
218	return ret;
219}
220
221static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
222{
223	int i;
224
225	for (i = 0; i < cesa->caps->nahash_algs; i++)
226		crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
227
228	for (i = 0; i < cesa->caps->ncipher_algs; i++)
229		crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
230}
231
232static struct skcipher_alg *orion_cipher_algs[] = {
233	&mv_cesa_ecb_des_alg,
234	&mv_cesa_cbc_des_alg,
235	&mv_cesa_ecb_des3_ede_alg,
236	&mv_cesa_cbc_des3_ede_alg,
237	&mv_cesa_ecb_aes_alg,
238	&mv_cesa_cbc_aes_alg,
239};
240
241static struct ahash_alg *orion_ahash_algs[] = {
242	&mv_md5_alg,
243	&mv_sha1_alg,
244	&mv_ahmac_md5_alg,
245	&mv_ahmac_sha1_alg,
246};
247
248static struct skcipher_alg *armada_370_cipher_algs[] = {
249	&mv_cesa_ecb_des_alg,
250	&mv_cesa_cbc_des_alg,
251	&mv_cesa_ecb_des3_ede_alg,
252	&mv_cesa_cbc_des3_ede_alg,
253	&mv_cesa_ecb_aes_alg,
254	&mv_cesa_cbc_aes_alg,
255};
256
257static struct ahash_alg *armada_370_ahash_algs[] = {
258	&mv_md5_alg,
259	&mv_sha1_alg,
260	&mv_sha256_alg,
261	&mv_ahmac_md5_alg,
262	&mv_ahmac_sha1_alg,
263	&mv_ahmac_sha256_alg,
264};
265
266static const struct mv_cesa_caps orion_caps = {
267	.nengines = 1,
268	.cipher_algs = orion_cipher_algs,
269	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
270	.ahash_algs = orion_ahash_algs,
271	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
272	.has_tdma = false,
273};
274
275static const struct mv_cesa_caps kirkwood_caps = {
276	.nengines = 1,
277	.cipher_algs = orion_cipher_algs,
278	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
279	.ahash_algs = orion_ahash_algs,
280	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
281	.has_tdma = true,
282};
283
284static const struct mv_cesa_caps armada_370_caps = {
285	.nengines = 1,
286	.cipher_algs = armada_370_cipher_algs,
287	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
288	.ahash_algs = armada_370_ahash_algs,
289	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
290	.has_tdma = true,
291};
292
293static const struct mv_cesa_caps armada_xp_caps = {
294	.nengines = 2,
295	.cipher_algs = armada_370_cipher_algs,
296	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
297	.ahash_algs = armada_370_ahash_algs,
298	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
299	.has_tdma = true,
300};
301
302static const struct of_device_id mv_cesa_of_match_table[] = {
303	{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
304	{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
305	{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
306	{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
307	{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
308	{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
309	{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
310	{}
311};
312MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
313
314static void
315mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
316			  const struct mbus_dram_target_info *dram)
317{
318	void __iomem *iobase = engine->regs;
319	int i;
320
321	for (i = 0; i < 4; i++) {
322		writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
323		writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
324	}
325
326	for (i = 0; i < dram->num_cs; i++) {
327		const struct mbus_dram_window *cs = dram->cs + i;
328
329		writel(((cs->size - 1) & 0xffff0000) |
330		       (cs->mbus_attr << 8) |
331		       (dram->mbus_dram_target_id << 4) | 1,
332		       iobase + CESA_TDMA_WINDOW_CTRL(i));
333		writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
334	}
335}
336
337static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
338{
339	struct device *dev = cesa->dev;
340	struct mv_cesa_dev_dma *dma;
341
342	if (!cesa->caps->has_tdma)
343		return 0;
344
345	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
346	if (!dma)
347		return -ENOMEM;
348
349	dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
350					sizeof(struct mv_cesa_tdma_desc),
351					16, 0);
352	if (!dma->tdma_desc_pool)
353		return -ENOMEM;
354
355	dma->op_pool = dmam_pool_create("cesa_op", dev,
356					sizeof(struct mv_cesa_op_ctx), 16, 0);
357	if (!dma->op_pool)
358		return -ENOMEM;
359
360	dma->cache_pool = dmam_pool_create("cesa_cache", dev,
361					   CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
362	if (!dma->cache_pool)
363		return -ENOMEM;
364
365	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
366	if (!dma->padding_pool)
367		return -ENOMEM;
368
369	cesa->dma = dma;
370
371	return 0;
372}
373
374static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
375{
376	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
377	struct mv_cesa_engine *engine = &cesa->engines[idx];
378	const char *res_name = "sram";
379	struct resource *res;
380
381	engine->pool = of_gen_pool_get(cesa->dev->of_node,
382				       "marvell,crypto-srams", idx);
383	if (engine->pool) {
384		engine->sram = gen_pool_dma_alloc(engine->pool,
385						  cesa->sram_size,
386						  &engine->sram_dma);
387		if (engine->sram)
388			return 0;
389
390		engine->pool = NULL;
391		return -ENOMEM;
392	}
393
394	if (cesa->caps->nengines > 1) {
395		if (!idx)
396			res_name = "sram0";
397		else
398			res_name = "sram1";
399	}
400
401	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
402					   res_name);
403	if (!res || resource_size(res) < cesa->sram_size)
404		return -EINVAL;
405
406	engine->sram = devm_ioremap_resource(cesa->dev, res);
407	if (IS_ERR(engine->sram))
408		return PTR_ERR(engine->sram);
409
410	engine->sram_dma = dma_map_resource(cesa->dev, res->start,
411					    cesa->sram_size,
412					    DMA_BIDIRECTIONAL, 0);
413	if (dma_mapping_error(cesa->dev, engine->sram_dma))
414		return -ENOMEM;
415
416	return 0;
417}
418
419static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
420{
421	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
422	struct mv_cesa_engine *engine = &cesa->engines[idx];
423
424	if (engine->pool)
425		gen_pool_free(engine->pool, (unsigned long)engine->sram,
426			      cesa->sram_size);
427	else
428		dma_unmap_resource(cesa->dev, engine->sram_dma,
429				   cesa->sram_size, DMA_BIDIRECTIONAL, 0);
430}
431
432static int mv_cesa_probe(struct platform_device *pdev)
433{
434	const struct mv_cesa_caps *caps = &orion_caps;
435	const struct mbus_dram_target_info *dram;
436	const struct of_device_id *match;
437	struct device *dev = &pdev->dev;
438	struct mv_cesa_dev *cesa;
439	struct mv_cesa_engine *engines;
440	int irq, ret, i, cpu;
441	u32 sram_size;
442
443	if (cesa_dev) {
444		dev_err(&pdev->dev, "Only one CESA device authorized\n");
445		return -EEXIST;
446	}
447
448	if (dev->of_node) {
449		match = of_match_node(mv_cesa_of_match_table, dev->of_node);
450		if (!match || !match->data)
451			return -ENOTSUPP;
452
453		caps = match->data;
454	}
455
456	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
457	if (!cesa)
458		return -ENOMEM;
459
460	cesa->caps = caps;
461	cesa->dev = dev;
462
463	sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
464	of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
465			     &sram_size);
466	if (sram_size < CESA_SA_MIN_SRAM_SIZE)
467		sram_size = CESA_SA_MIN_SRAM_SIZE;
468
469	cesa->sram_size = sram_size;
470	cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
471				     GFP_KERNEL);
472	if (!cesa->engines)
473		return -ENOMEM;
474
475	spin_lock_init(&cesa->lock);
476
477	cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
478	if (IS_ERR(cesa->regs))
479		return PTR_ERR(cesa->regs);
480
481	ret = mv_cesa_dev_dma_init(cesa);
482	if (ret)
483		return ret;
484
485	dram = mv_mbus_dram_info_nooverlap();
486
487	platform_set_drvdata(pdev, cesa);
488
489	for (i = 0; i < caps->nengines; i++) {
490		struct mv_cesa_engine *engine = &cesa->engines[i];
491		char res_name[7];
492
493		engine->id = i;
494		spin_lock_init(&engine->lock);
495
496		ret = mv_cesa_get_sram(pdev, i);
497		if (ret)
498			goto err_cleanup;
499
500		irq = platform_get_irq(pdev, i);
501		if (irq < 0) {
502			ret = irq;
503			goto err_cleanup;
504		}
505
506		engine->irq = irq;
507
508		/*
509		 * Not all platforms can gate the CESA clocks: do not complain
510		 * if the clock does not exist.
511		 */
512		snprintf(res_name, sizeof(res_name), "cesa%d", i);
513		engine->clk = devm_clk_get(dev, res_name);
514		if (IS_ERR(engine->clk)) {
515			engine->clk = devm_clk_get(dev, NULL);
516			if (IS_ERR(engine->clk))
517				engine->clk = NULL;
518		}
519
520		snprintf(res_name, sizeof(res_name), "cesaz%d", i);
521		engine->zclk = devm_clk_get(dev, res_name);
522		if (IS_ERR(engine->zclk))
523			engine->zclk = NULL;
524
525		ret = clk_prepare_enable(engine->clk);
526		if (ret)
527			goto err_cleanup;
528
529		ret = clk_prepare_enable(engine->zclk);
530		if (ret)
531			goto err_cleanup;
532
533		engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
534
535		if (dram && cesa->caps->has_tdma)
536			mv_cesa_conf_mbus_windows(engine, dram);
537
538		writel(0, engine->regs + CESA_SA_INT_STATUS);
539		writel(CESA_SA_CFG_STOP_DIG_ERR,
540		       engine->regs + CESA_SA_CFG);
541		writel(engine->sram_dma & CESA_SA_SRAM_MSK,
542		       engine->regs + CESA_SA_DESC_P0);
543
544		ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
545						IRQF_ONESHOT,
546						dev_name(&pdev->dev),
547						engine);
548		if (ret)
549			goto err_cleanup;
550
551		/* Set affinity */
552		cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
553		irq_set_affinity_hint(irq, get_cpu_mask(cpu));
554
555		crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
556		atomic_set(&engine->load, 0);
557		INIT_LIST_HEAD(&engine->complete_queue);
558	}
559
560	cesa_dev = cesa;
561
562	ret = mv_cesa_add_algs(cesa);
563	if (ret) {
564		cesa_dev = NULL;
565		goto err_cleanup;
566	}
567
568	dev_info(dev, "CESA device successfully registered\n");
569
570	return 0;
571
572err_cleanup:
573	for (i = 0; i < caps->nengines; i++) {
574		clk_disable_unprepare(cesa->engines[i].zclk);
575		clk_disable_unprepare(cesa->engines[i].clk);
576		mv_cesa_put_sram(pdev, i);
577		if (cesa->engines[i].irq > 0)
578			irq_set_affinity_hint(cesa->engines[i].irq, NULL);
579	}
580
581	return ret;
582}
583
584static int mv_cesa_remove(struct platform_device *pdev)
585{
586	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
587	int i;
588
589	mv_cesa_remove_algs(cesa);
590
591	for (i = 0; i < cesa->caps->nengines; i++) {
592		clk_disable_unprepare(cesa->engines[i].zclk);
593		clk_disable_unprepare(cesa->engines[i].clk);
594		mv_cesa_put_sram(pdev, i);
595		irq_set_affinity_hint(cesa->engines[i].irq, NULL);
596	}
597
598	return 0;
599}
600
601static const struct platform_device_id mv_cesa_plat_id_table[] = {
602	{ .name = "mv_crypto" },
603	{ /* sentinel */ },
604};
605MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
606
607static struct platform_driver marvell_cesa = {
608	.probe		= mv_cesa_probe,
609	.remove		= mv_cesa_remove,
610	.id_table	= mv_cesa_plat_id_table,
611	.driver		= {
612		.name	= "marvell-cesa",
613		.of_match_table = mv_cesa_of_match_table,
614	},
615};
616module_platform_driver(marvell_cesa);
617
618MODULE_ALIAS("platform:mv_crypto");
619MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
620MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
621MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
622MODULE_LICENSE("GPL v2");
623