1// SPDX-License-Identifier: GPL-2.0
2/*
3 * sun8i-ss-cipher.c - hardware cryptographic offloader for
4 * Allwinner A80/A83T SoC
5 *
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7 *
8 * This file add support for AES cipher with 128,192,256 bits keysize in
9 * CBC and ECB mode.
10 *
11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12 */
13
14#include <linux/bottom_half.h>
15#include <linux/crypto.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/pm_runtime.h>
19#include <crypto/scatterwalk.h>
20#include <crypto/internal/skcipher.h>
21#include "sun8i-ss.h"
22
23static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
24{
25	struct scatterlist *in_sg = areq->src;
26	struct scatterlist *out_sg = areq->dst;
27	struct scatterlist *sg;
28
29	if (areq->cryptlen == 0 || areq->cryptlen % 16)
30		return true;
31
32	if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
33		return true;
34
35	sg = areq->src;
36	while (sg) {
37		if ((sg->length % 16) != 0)
38			return true;
39		if ((sg_dma_len(sg) % 16) != 0)
40			return true;
41		if (!IS_ALIGNED(sg->offset, 16))
42			return true;
43		sg = sg_next(sg);
44	}
45	sg = areq->dst;
46	while (sg) {
47		if ((sg->length % 16) != 0)
48			return true;
49		if ((sg_dma_len(sg) % 16) != 0)
50			return true;
51		if (!IS_ALIGNED(sg->offset, 16))
52			return true;
53		sg = sg_next(sg);
54	}
55
56	/* SS need same numbers of SG (with same length) for source and destination */
57	in_sg = areq->src;
58	out_sg = areq->dst;
59	while (in_sg && out_sg) {
60		if (in_sg->length != out_sg->length)
61			return true;
62		in_sg = sg_next(in_sg);
63		out_sg = sg_next(out_sg);
64	}
65	if (in_sg || out_sg)
66		return true;
67	return false;
68}
69
70static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
71{
72	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
73	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
74	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
75	int err;
76
77#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
78	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
79	struct sun8i_ss_alg_template *algt;
80
81	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
82	algt->stat_fb++;
83#endif
84	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
85	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
86				      areq->base.complete, areq->base.data);
87	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
88				   areq->cryptlen, areq->iv);
89	if (rctx->op_dir & SS_DECRYPTION)
90		err = crypto_skcipher_decrypt(&rctx->fallback_req);
91	else
92		err = crypto_skcipher_encrypt(&rctx->fallback_req);
93	return err;
94}
95
96static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
97{
98	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
99	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
100	struct sun8i_ss_dev *ss = op->ss;
101	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
102	struct scatterlist *sg = areq->src;
103	unsigned int todo, offset;
104	unsigned int len = areq->cryptlen;
105	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
106	struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
107	int i = 0;
108	dma_addr_t a;
109	int err;
110
111	rctx->ivlen = ivsize;
112	if (rctx->op_dir & SS_DECRYPTION) {
113		offset = areq->cryptlen - ivsize;
114		scatterwalk_map_and_copy(sf->biv, areq->src, offset,
115					 ivsize, 0);
116	}
117
118	/* we need to copy all IVs from source in case DMA is bi-directionnal */
119	while (sg && len) {
120		if (sg_dma_len(sg) == 0) {
121			sg = sg_next(sg);
122			continue;
123		}
124		if (i == 0)
125			memcpy(sf->iv[0], areq->iv, ivsize);
126		a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
127		if (dma_mapping_error(ss->dev, a)) {
128			memzero_explicit(sf->iv[i], ivsize);
129			dev_err(ss->dev, "Cannot DMA MAP IV\n");
130			err = -EFAULT;
131			goto dma_iv_error;
132		}
133		rctx->p_iv[i] = a;
134		/* we need to setup all others IVs only in the decrypt way */
135		if (rctx->op_dir == SS_ENCRYPTION)
136			return 0;
137		todo = min(len, sg_dma_len(sg));
138		len -= todo;
139		i++;
140		if (i < MAX_SG) {
141			offset = sg->length - ivsize;
142			scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
143		}
144		rctx->niv = i;
145		sg = sg_next(sg);
146	}
147
148	return 0;
149dma_iv_error:
150	i--;
151	while (i >= 0) {
152		dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
153		memzero_explicit(sf->iv[i], ivsize);
154		i--;
155	}
156	return err;
157}
158
159static int sun8i_ss_cipher(struct skcipher_request *areq)
160{
161	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
162	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
163	struct sun8i_ss_dev *ss = op->ss;
164	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
165	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
166	struct sun8i_ss_alg_template *algt;
167	struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
168	struct scatterlist *sg;
169	unsigned int todo, len, offset, ivsize;
170	int nr_sgs = 0;
171	int nr_sgd = 0;
172	int err = 0;
173	int i;
174
175	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
176
177	dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
178		crypto_tfm_alg_name(areq->base.tfm),
179		areq->cryptlen,
180		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
181		op->keylen);
182
183#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
184	algt->stat_req++;
185#endif
186
187	rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
188	rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
189	rctx->keylen = op->keylen;
190
191	rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
192	if (dma_mapping_error(ss->dev, rctx->p_key)) {
193		dev_err(ss->dev, "Cannot DMA MAP KEY\n");
194		err = -EFAULT;
195		goto theend;
196	}
197
198	ivsize = crypto_skcipher_ivsize(tfm);
199	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
200		err = sun8i_ss_setup_ivs(areq);
201		if (err)
202			goto theend_key;
203	}
204	if (areq->src == areq->dst) {
205		nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
206				    DMA_BIDIRECTIONAL);
207		if (nr_sgs <= 0 || nr_sgs > 8) {
208			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
209			err = -EINVAL;
210			goto theend_iv;
211		}
212		nr_sgd = nr_sgs;
213	} else {
214		nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
215				    DMA_TO_DEVICE);
216		if (nr_sgs <= 0 || nr_sgs > 8) {
217			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
218			err = -EINVAL;
219			goto theend_iv;
220		}
221		nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
222				    DMA_FROM_DEVICE);
223		if (nr_sgd <= 0 || nr_sgd > 8) {
224			dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
225			err = -EINVAL;
226			goto theend_sgs;
227		}
228	}
229
230	len = areq->cryptlen;
231	i = 0;
232	sg = areq->src;
233	while (i < nr_sgs && sg && len) {
234		if (sg_dma_len(sg) == 0)
235			goto sgs_next;
236		rctx->t_src[i].addr = sg_dma_address(sg);
237		todo = min(len, sg_dma_len(sg));
238		rctx->t_src[i].len = todo / 4;
239		dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
240			areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
241		len -= todo;
242		i++;
243sgs_next:
244		sg = sg_next(sg);
245	}
246	if (len > 0) {
247		dev_err(ss->dev, "remaining len %d\n", len);
248		err = -EINVAL;
249		goto theend_sgs;
250	}
251
252	len = areq->cryptlen;
253	i = 0;
254	sg = areq->dst;
255	while (i < nr_sgd && sg && len) {
256		if (sg_dma_len(sg) == 0)
257			goto sgd_next;
258		rctx->t_dst[i].addr = sg_dma_address(sg);
259		todo = min(len, sg_dma_len(sg));
260		rctx->t_dst[i].len = todo / 4;
261		dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
262			areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
263		len -= todo;
264		i++;
265sgd_next:
266		sg = sg_next(sg);
267	}
268	if (len > 0) {
269		dev_err(ss->dev, "remaining len %d\n", len);
270		err = -EINVAL;
271		goto theend_sgs;
272	}
273
274	err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
275
276theend_sgs:
277	if (areq->src == areq->dst) {
278		dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
279	} else {
280		dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
281		dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
282	}
283
284theend_iv:
285	if (areq->iv && ivsize > 0) {
286		for (i = 0; i < rctx->niv; i++) {
287			dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
288			memzero_explicit(sf->iv[i], ivsize);
289		}
290
291		offset = areq->cryptlen - ivsize;
292		if (rctx->op_dir & SS_DECRYPTION) {
293			memcpy(areq->iv, sf->biv, ivsize);
294			memzero_explicit(sf->biv, ivsize);
295		} else {
296			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
297					ivsize, 0);
298		}
299	}
300
301theend_key:
302	dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
303
304theend:
305
306	return err;
307}
308
309static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
310{
311	int err;
312	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
313
314	err = sun8i_ss_cipher(breq);
315	local_bh_disable();
316	crypto_finalize_skcipher_request(engine, breq, err);
317	local_bh_enable();
318
319	return 0;
320}
321
322int sun8i_ss_skdecrypt(struct skcipher_request *areq)
323{
324	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
325	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
326	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
327	struct crypto_engine *engine;
328	int e;
329
330	memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
331	rctx->op_dir = SS_DECRYPTION;
332
333	if (sun8i_ss_need_fallback(areq))
334		return sun8i_ss_cipher_fallback(areq);
335
336	e = sun8i_ss_get_engine_number(op->ss);
337	engine = op->ss->flows[e].engine;
338	rctx->flow = e;
339
340	return crypto_transfer_skcipher_request_to_engine(engine, areq);
341}
342
343int sun8i_ss_skencrypt(struct skcipher_request *areq)
344{
345	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
346	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
347	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
348	struct crypto_engine *engine;
349	int e;
350
351	memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
352	rctx->op_dir = SS_ENCRYPTION;
353
354	if (sun8i_ss_need_fallback(areq))
355		return sun8i_ss_cipher_fallback(areq);
356
357	e = sun8i_ss_get_engine_number(op->ss);
358	engine = op->ss->flows[e].engine;
359	rctx->flow = e;
360
361	return crypto_transfer_skcipher_request_to_engine(engine, areq);
362}
363
364int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
365{
366	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
367	struct sun8i_ss_alg_template *algt;
368	const char *name = crypto_tfm_alg_name(tfm);
369	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
370	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
371	int err;
372
373	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
374
375	algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
376	op->ss = algt->ss;
377
378	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
379	if (IS_ERR(op->fallback_tfm)) {
380		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
381			name, PTR_ERR(op->fallback_tfm));
382		return PTR_ERR(op->fallback_tfm);
383	}
384
385	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
386			 crypto_skcipher_reqsize(op->fallback_tfm);
387
388
389	dev_info(op->ss->dev, "Fallback for %s is %s\n",
390		 crypto_tfm_alg_driver_name(&sktfm->base),
391		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
392
393	op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
394	op->enginectx.op.prepare_request = NULL;
395	op->enginectx.op.unprepare_request = NULL;
396
397	err = pm_runtime_resume_and_get(op->ss->dev);
398	if (err < 0) {
399		dev_err(op->ss->dev, "pm error %d\n", err);
400		goto error_pm;
401	}
402
403	return 0;
404error_pm:
405	crypto_free_skcipher(op->fallback_tfm);
406	return err;
407}
408
409void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
410{
411	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
412
413	kfree_sensitive(op->key);
414	crypto_free_skcipher(op->fallback_tfm);
415	pm_runtime_put_sync(op->ss->dev);
416}
417
418int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
419			unsigned int keylen)
420{
421	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
422	struct sun8i_ss_dev *ss = op->ss;
423
424	switch (keylen) {
425	case 128 / 8:
426		break;
427	case 192 / 8:
428		break;
429	case 256 / 8:
430		break;
431	default:
432		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
433		return -EINVAL;
434	}
435	kfree_sensitive(op->key);
436	op->keylen = keylen;
437	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
438	if (!op->key)
439		return -ENOMEM;
440
441	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
442	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
443
444	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
445}
446
447int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
448			 unsigned int keylen)
449{
450	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
451	struct sun8i_ss_dev *ss = op->ss;
452
453	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
454		dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
455		return -EINVAL;
456	}
457
458	kfree_sensitive(op->key);
459	op->keylen = keylen;
460	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
461	if (!op->key)
462		return -ENOMEM;
463
464	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
465	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
466
467	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
468}
469