1// SPDX-License-Identifier: GPL-2.0
2/*
3 * sun8i-ce-cipher.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7 *
8 * This file add support for AES cipher with 128,192,256 bits keysize in
9 * CBC and ECB mode.
10 *
11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12 */
13
14#include <linux/bottom_half.h>
15#include <linux/crypto.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/pm_runtime.h>
19#include <crypto/scatterwalk.h>
20#include <crypto/internal/des.h>
21#include <crypto/internal/skcipher.h>
22#include "sun8i-ce.h"
23
24static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25{
26	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27	struct scatterlist *sg;
28
29	if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
30		return true;
31
32	if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
33		return true;
34
35	if (areq->cryptlen == 0 || areq->cryptlen % 16)
36		return true;
37
38	sg = areq->src;
39	while (sg) {
40		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
41			return true;
42		sg = sg_next(sg);
43	}
44	sg = areq->dst;
45	while (sg) {
46		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
47			return true;
48		sg = sg_next(sg);
49	}
50	return false;
51}
52
53static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
54{
55	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
56	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
57	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
58	int err;
59#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
60	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
61	struct sun8i_ce_alg_template *algt;
62
63	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
64	algt->stat_fb++;
65#endif
66
67	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
68	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
69				      areq->base.complete, areq->base.data);
70	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
71				   areq->cryptlen, areq->iv);
72	if (rctx->op_dir & CE_DECRYPTION)
73		err = crypto_skcipher_decrypt(&rctx->fallback_req);
74	else
75		err = crypto_skcipher_encrypt(&rctx->fallback_req);
76	return err;
77}
78
79static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
80{
81	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
82	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
83	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
84	struct sun8i_ce_dev *ce = op->ce;
85	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
86	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
87	struct sun8i_ce_alg_template *algt;
88	struct sun8i_ce_flow *chan;
89	struct ce_task *cet;
90	struct scatterlist *sg;
91	unsigned int todo, len, offset, ivsize;
92	u32 common, sym;
93	int flow, i;
94	int nr_sgs = 0;
95	int nr_sgd = 0;
96	int err = 0;
97
98	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
99
100	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
101		crypto_tfm_alg_name(areq->base.tfm),
102		areq->cryptlen,
103		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
104		op->keylen);
105
106#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
107	algt->stat_req++;
108#endif
109
110	flow = rctx->flow;
111
112	chan = &ce->chanlist[flow];
113
114	cet = chan->tl;
115	memset(cet, 0, sizeof(struct ce_task));
116
117	cet->t_id = cpu_to_le32(flow);
118	common = ce->variant->alg_cipher[algt->ce_algo_id];
119	common |= rctx->op_dir | CE_COMM_INT;
120	cet->t_common_ctl = cpu_to_le32(common);
121	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
122	if (ce->variant->cipher_t_dlen_in_bytes)
123		cet->t_dlen = cpu_to_le32(areq->cryptlen);
124	else
125		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
126
127	sym = ce->variant->op_mode[algt->ce_blockmode];
128	len = op->keylen;
129	switch (len) {
130	case 128 / 8:
131		sym |= CE_AES_128BITS;
132		break;
133	case 192 / 8:
134		sym |= CE_AES_192BITS;
135		break;
136	case 256 / 8:
137		sym |= CE_AES_256BITS;
138		break;
139	}
140
141	cet->t_sym_ctl = cpu_to_le32(sym);
142	cet->t_asym_ctl = 0;
143
144	rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
145	if (dma_mapping_error(ce->dev, rctx->addr_key)) {
146		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
147		err = -EFAULT;
148		goto theend;
149	}
150	cet->t_key = cpu_to_le32(rctx->addr_key);
151
152	ivsize = crypto_skcipher_ivsize(tfm);
153	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
154		rctx->ivlen = ivsize;
155		rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
156		if (!rctx->bounce_iv) {
157			err = -ENOMEM;
158			goto theend_key;
159		}
160		if (rctx->op_dir & CE_DECRYPTION) {
161			rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
162			if (!rctx->backup_iv) {
163				err = -ENOMEM;
164				goto theend_key;
165			}
166			offset = areq->cryptlen - ivsize;
167			scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
168						 offset, ivsize, 0);
169		}
170		memcpy(rctx->bounce_iv, areq->iv, ivsize);
171		rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
172					       DMA_TO_DEVICE);
173		if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
174			dev_err(ce->dev, "Cannot DMA MAP IV\n");
175			err = -ENOMEM;
176			goto theend_iv;
177		}
178		cet->t_iv = cpu_to_le32(rctx->addr_iv);
179	}
180
181	if (areq->src == areq->dst) {
182		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
183				    DMA_BIDIRECTIONAL);
184		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
185			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
186			err = -EINVAL;
187			goto theend_iv;
188		}
189		nr_sgd = nr_sgs;
190	} else {
191		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
192				    DMA_TO_DEVICE);
193		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
194			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
195			err = -EINVAL;
196			goto theend_iv;
197		}
198		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
199				    DMA_FROM_DEVICE);
200		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
201			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
202			err = -EINVAL;
203			goto theend_sgs;
204		}
205	}
206
207	len = areq->cryptlen;
208	for_each_sg(areq->src, sg, nr_sgs, i) {
209		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
210		todo = min(len, sg_dma_len(sg));
211		cet->t_src[i].len = cpu_to_le32(todo / 4);
212		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
213			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
214		len -= todo;
215	}
216	if (len > 0) {
217		dev_err(ce->dev, "remaining len %d\n", len);
218		err = -EINVAL;
219		goto theend_sgs;
220	}
221
222	len = areq->cryptlen;
223	for_each_sg(areq->dst, sg, nr_sgd, i) {
224		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
225		todo = min(len, sg_dma_len(sg));
226		cet->t_dst[i].len = cpu_to_le32(todo / 4);
227		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
228			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
229		len -= todo;
230	}
231	if (len > 0) {
232		dev_err(ce->dev, "remaining len %d\n", len);
233		err = -EINVAL;
234		goto theend_sgs;
235	}
236
237	chan->timeout = areq->cryptlen;
238	rctx->nr_sgs = nr_sgs;
239	rctx->nr_sgd = nr_sgd;
240	return 0;
241
242theend_sgs:
243	if (areq->src == areq->dst) {
244		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
245	} else {
246		if (nr_sgs > 0)
247			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
248		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
249	}
250
251theend_iv:
252	if (areq->iv && ivsize > 0) {
253		if (rctx->addr_iv)
254			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
255		offset = areq->cryptlen - ivsize;
256		if (rctx->op_dir & CE_DECRYPTION) {
257			memcpy(areq->iv, rctx->backup_iv, ivsize);
258			kfree_sensitive(rctx->backup_iv);
259		} else {
260			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
261						 ivsize, 0);
262		}
263		kfree(rctx->bounce_iv);
264	}
265
266theend_key:
267	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
268
269theend:
270	return err;
271}
272
273static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
274{
275	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
276	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
277	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
278	struct sun8i_ce_dev *ce = op->ce;
279	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
280	int flow, err;
281
282	flow = rctx->flow;
283	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
284	local_bh_disable();
285	crypto_finalize_skcipher_request(engine, breq, err);
286	local_bh_enable();
287	return 0;
288}
289
290static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
291{
292	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
293	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
294	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
295	struct sun8i_ce_dev *ce = op->ce;
296	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
297	struct sun8i_ce_flow *chan;
298	struct ce_task *cet;
299	unsigned int ivsize, offset;
300	int nr_sgs = rctx->nr_sgs;
301	int nr_sgd = rctx->nr_sgd;
302	int flow;
303
304	flow = rctx->flow;
305	chan = &ce->chanlist[flow];
306	cet = chan->tl;
307	ivsize = crypto_skcipher_ivsize(tfm);
308
309	if (areq->src == areq->dst) {
310		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
311	} else {
312		if (nr_sgs > 0)
313			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
314		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
315	}
316
317	if (areq->iv && ivsize > 0) {
318		if (cet->t_iv)
319			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
320		offset = areq->cryptlen - ivsize;
321		if (rctx->op_dir & CE_DECRYPTION) {
322			memcpy(areq->iv, rctx->backup_iv, ivsize);
323			kfree_sensitive(rctx->backup_iv);
324		} else {
325			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
326						 ivsize, 0);
327		}
328		kfree(rctx->bounce_iv);
329	}
330
331	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
332
333	return 0;
334}
335
336int sun8i_ce_skdecrypt(struct skcipher_request *areq)
337{
338	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
339	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
340	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
341	struct crypto_engine *engine;
342	int e;
343
344	rctx->op_dir = CE_DECRYPTION;
345	if (sun8i_ce_cipher_need_fallback(areq))
346		return sun8i_ce_cipher_fallback(areq);
347
348	e = sun8i_ce_get_engine_number(op->ce);
349	rctx->flow = e;
350	engine = op->ce->chanlist[e].engine;
351
352	return crypto_transfer_skcipher_request_to_engine(engine, areq);
353}
354
355int sun8i_ce_skencrypt(struct skcipher_request *areq)
356{
357	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
358	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
359	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
360	struct crypto_engine *engine;
361	int e;
362
363	rctx->op_dir = CE_ENCRYPTION;
364	if (sun8i_ce_cipher_need_fallback(areq))
365		return sun8i_ce_cipher_fallback(areq);
366
367	e = sun8i_ce_get_engine_number(op->ce);
368	rctx->flow = e;
369	engine = op->ce->chanlist[e].engine;
370
371	return crypto_transfer_skcipher_request_to_engine(engine, areq);
372}
373
374int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
375{
376	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
377	struct sun8i_ce_alg_template *algt;
378	const char *name = crypto_tfm_alg_name(tfm);
379	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
380	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
381	int err;
382
383	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
384
385	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
386	op->ce = algt->ce;
387
388	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
389	if (IS_ERR(op->fallback_tfm)) {
390		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
391			name, PTR_ERR(op->fallback_tfm));
392		return PTR_ERR(op->fallback_tfm);
393	}
394
395	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
396			 crypto_skcipher_reqsize(op->fallback_tfm);
397
398
399	dev_info(op->ce->dev, "Fallback for %s is %s\n",
400		 crypto_tfm_alg_driver_name(&sktfm->base),
401		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
402
403	op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
404	op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
405	op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
406
407	err = pm_runtime_get_sync(op->ce->dev);
408	if (err < 0)
409		goto error_pm;
410
411	return 0;
412error_pm:
413	pm_runtime_put_noidle(op->ce->dev);
414	crypto_free_skcipher(op->fallback_tfm);
415	return err;
416}
417
418void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
419{
420	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
421
422	kfree_sensitive(op->key);
423	crypto_free_skcipher(op->fallback_tfm);
424	pm_runtime_put_sync_suspend(op->ce->dev);
425}
426
427int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
428			unsigned int keylen)
429{
430	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431	struct sun8i_ce_dev *ce = op->ce;
432
433	switch (keylen) {
434	case 128 / 8:
435		break;
436	case 192 / 8:
437		break;
438	case 256 / 8:
439		break;
440	default:
441		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
442		return -EINVAL;
443	}
444	kfree_sensitive(op->key);
445	op->keylen = keylen;
446	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
447	if (!op->key)
448		return -ENOMEM;
449
450	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
451	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
452
453	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
454}
455
456int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
457			 unsigned int keylen)
458{
459	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
460	int err;
461
462	err = verify_skcipher_des3_key(tfm, key);
463	if (err)
464		return err;
465
466	kfree_sensitive(op->key);
467	op->keylen = keylen;
468	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
469	if (!op->key)
470		return -ENOMEM;
471
472	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
473	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
474
475	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
476}
477