1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
7 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
20#define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21#define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22				 SIZEOF_RSA_PRIV_F1_PDB)
23#define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24				 SIZEOF_RSA_PRIV_F2_PDB)
25#define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26				 SIZEOF_RSA_PRIV_F3_PDB)
27#define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28
29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer;
31
32/*
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
35 */
36static bool init_done;
37
38struct caam_akcipher_alg {
39	struct akcipher_alg akcipher;
40	bool registered;
41};
42
43static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44			 struct akcipher_request *req)
45{
46	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47
48	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50
51	if (edesc->sec4_sg_bytes)
52		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53				 DMA_TO_DEVICE);
54}
55
56static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57			  struct akcipher_request *req)
58{
59	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61	struct caam_rsa_key *key = &ctx->key;
62	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63
64	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66}
67
68static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69			      struct akcipher_request *req)
70{
71	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73	struct caam_rsa_key *key = &ctx->key;
74	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75
76	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78}
79
80static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81			      struct akcipher_request *req)
82{
83	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85	struct caam_rsa_key *key = &ctx->key;
86	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87	size_t p_sz = key->p_sz;
88	size_t q_sz = key->q_sz;
89
90	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95}
96
97static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98			      struct akcipher_request *req)
99{
100	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102	struct caam_rsa_key *key = &ctx->key;
103	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104	size_t p_sz = key->p_sz;
105	size_t q_sz = key->q_sz;
106
107	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114}
115
116/* RSA Job Completion handler */
117static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118{
119	struct akcipher_request *req = context;
120	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122	struct rsa_edesc *edesc;
123	int ecode = 0;
124	bool has_bklog;
125
126	if (err)
127		ecode = caam_jr_strstatus(dev, err);
128
129	edesc = req_ctx->edesc;
130	has_bklog = edesc->bklog;
131
132	rsa_pub_unmap(dev, edesc, req);
133	rsa_io_unmap(dev, edesc, req);
134	kfree(edesc);
135
136	/*
137	 * If no backlog flag, the completion of the request is done
138	 * by CAAM, not crypto engine.
139	 */
140	if (!has_bklog)
141		akcipher_request_complete(req, ecode);
142	else
143		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
144}
145
146static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147			    void *context)
148{
149	struct akcipher_request *req = context;
150	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153	struct caam_rsa_key *key = &ctx->key;
154	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155	struct rsa_edesc *edesc;
156	int ecode = 0;
157	bool has_bklog;
158
159	if (err)
160		ecode = caam_jr_strstatus(dev, err);
161
162	edesc = req_ctx->edesc;
163	has_bklog = edesc->bklog;
164
165	switch (key->priv_form) {
166	case FORM1:
167		rsa_priv_f1_unmap(dev, edesc, req);
168		break;
169	case FORM2:
170		rsa_priv_f2_unmap(dev, edesc, req);
171		break;
172	case FORM3:
173		rsa_priv_f3_unmap(dev, edesc, req);
174	}
175
176	rsa_io_unmap(dev, edesc, req);
177	kfree(edesc);
178
179	/*
180	 * If no backlog flag, the completion of the request is done
181	 * by CAAM, not crypto engine.
182	 */
183	if (!has_bklog)
184		akcipher_request_complete(req, ecode);
185	else
186		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
187}
188
189/**
190 * Count leading zeros, need it to strip, from a given scatterlist
191 *
192 * @sgl   : scatterlist to count zeros from
193 * @nbytes: number of zeros, in bytes, to strip
194 * @flags : operation flags
195 */
196static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
197					unsigned int nbytes,
198					unsigned int flags)
199{
200	struct sg_mapping_iter miter;
201	int lzeros, ents;
202	unsigned int len;
203	unsigned int tbytes = nbytes;
204	const u8 *buff;
205
206	ents = sg_nents_for_len(sgl, nbytes);
207	if (ents < 0)
208		return ents;
209
210	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
211
212	lzeros = 0;
213	len = 0;
214	while (nbytes > 0) {
215		/* do not strip more than given bytes */
216		while (len && !*buff && lzeros < nbytes) {
217			lzeros++;
218			len--;
219			buff++;
220		}
221
222		if (len && *buff)
223			break;
224
225		if (!sg_miter_next(&miter))
226			break;
227
228		buff = miter.addr;
229		len = miter.length;
230
231		nbytes -= lzeros;
232		lzeros = 0;
233	}
234
235	miter.consumed = lzeros;
236	sg_miter_stop(&miter);
237	nbytes -= lzeros;
238
239	return tbytes - nbytes;
240}
241
242static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
243					 size_t desclen)
244{
245	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
246	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
247	struct device *dev = ctx->dev;
248	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
249	struct caam_rsa_key *key = &ctx->key;
250	struct rsa_edesc *edesc;
251	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
252		       GFP_KERNEL : GFP_ATOMIC;
253	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
254	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
255	int src_nents, dst_nents;
256	int mapped_src_nents, mapped_dst_nents;
257	unsigned int diff_size = 0;
258	int lzeros;
259
260	if (req->src_len > key->n_sz) {
261		/*
262		 * strip leading zeros and
263		 * return the number of zeros to skip
264		 */
265		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
266						      key->n_sz, sg_flags);
267		if (lzeros < 0)
268			return ERR_PTR(lzeros);
269
270		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
271						      lzeros);
272		req_ctx->fixup_src_len = req->src_len - lzeros;
273	} else {
274		/*
275		 * input src is less then n key modulus,
276		 * so there will be zero padding
277		 */
278		diff_size = key->n_sz - req->src_len;
279		req_ctx->fixup_src = req->src;
280		req_ctx->fixup_src_len = req->src_len;
281	}
282
283	src_nents = sg_nents_for_len(req_ctx->fixup_src,
284				     req_ctx->fixup_src_len);
285	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
286
287	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
288				      DMA_TO_DEVICE);
289	if (unlikely(!mapped_src_nents)) {
290		dev_err(dev, "unable to map source\n");
291		return ERR_PTR(-ENOMEM);
292	}
293	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
294				      DMA_FROM_DEVICE);
295	if (unlikely(!mapped_dst_nents)) {
296		dev_err(dev, "unable to map destination\n");
297		goto src_fail;
298	}
299
300	if (!diff_size && mapped_src_nents == 1)
301		sec4_sg_len = 0; /* no need for an input hw s/g table */
302	else
303		sec4_sg_len = mapped_src_nents + !!diff_size;
304	sec4_sg_index = sec4_sg_len;
305
306	if (mapped_dst_nents > 1)
307		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
308	else
309		sec4_sg_len = pad_sg_nents(sec4_sg_len);
310
311	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
312
313	/* allocate space for base edesc, hw desc commands and link tables */
314	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
315			GFP_DMA | flags);
316	if (!edesc)
317		goto dst_fail;
318
319	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
320	if (diff_size)
321		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
322				   0);
323
324	if (sec4_sg_index)
325		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
326				   edesc->sec4_sg + !!diff_size, 0);
327
328	if (mapped_dst_nents > 1)
329		sg_to_sec4_sg_last(req->dst, req->dst_len,
330				   edesc->sec4_sg + sec4_sg_index, 0);
331
332	/* Save nents for later use in Job Descriptor */
333	edesc->src_nents = src_nents;
334	edesc->dst_nents = dst_nents;
335
336	req_ctx->edesc = edesc;
337
338	if (!sec4_sg_bytes)
339		return edesc;
340
341	edesc->mapped_src_nents = mapped_src_nents;
342	edesc->mapped_dst_nents = mapped_dst_nents;
343
344	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
345					    sec4_sg_bytes, DMA_TO_DEVICE);
346	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
347		dev_err(dev, "unable to map S/G table\n");
348		goto sec4_sg_fail;
349	}
350
351	edesc->sec4_sg_bytes = sec4_sg_bytes;
352
353	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
354			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
355			     edesc->sec4_sg_bytes, 1);
356
357	return edesc;
358
359sec4_sg_fail:
360	kfree(edesc);
361dst_fail:
362	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
363src_fail:
364	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
365	return ERR_PTR(-ENOMEM);
366}
367
368static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
369{
370	struct akcipher_request *req = container_of(areq,
371						    struct akcipher_request,
372						    base);
373	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
374	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
375	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
376	struct device *jrdev = ctx->dev;
377	u32 *desc = req_ctx->edesc->hw_desc;
378	int ret;
379
380	req_ctx->edesc->bklog = true;
381
382	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
383
384	if (ret != -EINPROGRESS) {
385		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
386		rsa_io_unmap(jrdev, req_ctx->edesc, req);
387		kfree(req_ctx->edesc);
388	} else {
389		ret = 0;
390	}
391
392	return ret;
393}
394
395static int set_rsa_pub_pdb(struct akcipher_request *req,
396			   struct rsa_edesc *edesc)
397{
398	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
399	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
400	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
401	struct caam_rsa_key *key = &ctx->key;
402	struct device *dev = ctx->dev;
403	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
404	int sec4_sg_index = 0;
405
406	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
407	if (dma_mapping_error(dev, pdb->n_dma)) {
408		dev_err(dev, "Unable to map RSA modulus memory\n");
409		return -ENOMEM;
410	}
411
412	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
413	if (dma_mapping_error(dev, pdb->e_dma)) {
414		dev_err(dev, "Unable to map RSA public exponent memory\n");
415		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
416		return -ENOMEM;
417	}
418
419	if (edesc->mapped_src_nents > 1) {
420		pdb->sgf |= RSA_PDB_SGF_F;
421		pdb->f_dma = edesc->sec4_sg_dma;
422		sec4_sg_index += edesc->mapped_src_nents;
423	} else {
424		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
425	}
426
427	if (edesc->mapped_dst_nents > 1) {
428		pdb->sgf |= RSA_PDB_SGF_G;
429		pdb->g_dma = edesc->sec4_sg_dma +
430			     sec4_sg_index * sizeof(struct sec4_sg_entry);
431	} else {
432		pdb->g_dma = sg_dma_address(req->dst);
433	}
434
435	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
436	pdb->f_len = req_ctx->fixup_src_len;
437
438	return 0;
439}
440
441static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
442			       struct rsa_edesc *edesc)
443{
444	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
445	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
446	struct caam_rsa_key *key = &ctx->key;
447	struct device *dev = ctx->dev;
448	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
449	int sec4_sg_index = 0;
450
451	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
452	if (dma_mapping_error(dev, pdb->n_dma)) {
453		dev_err(dev, "Unable to map modulus memory\n");
454		return -ENOMEM;
455	}
456
457	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
458	if (dma_mapping_error(dev, pdb->d_dma)) {
459		dev_err(dev, "Unable to map RSA private exponent memory\n");
460		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
461		return -ENOMEM;
462	}
463
464	if (edesc->mapped_src_nents > 1) {
465		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
466		pdb->g_dma = edesc->sec4_sg_dma;
467		sec4_sg_index += edesc->mapped_src_nents;
468
469	} else {
470		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
471
472		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
473	}
474
475	if (edesc->mapped_dst_nents > 1) {
476		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
477		pdb->f_dma = edesc->sec4_sg_dma +
478			     sec4_sg_index * sizeof(struct sec4_sg_entry);
479	} else {
480		pdb->f_dma = sg_dma_address(req->dst);
481	}
482
483	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
484
485	return 0;
486}
487
488static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
489			       struct rsa_edesc *edesc)
490{
491	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
492	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
493	struct caam_rsa_key *key = &ctx->key;
494	struct device *dev = ctx->dev;
495	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
496	int sec4_sg_index = 0;
497	size_t p_sz = key->p_sz;
498	size_t q_sz = key->q_sz;
499
500	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
501	if (dma_mapping_error(dev, pdb->d_dma)) {
502		dev_err(dev, "Unable to map RSA private exponent memory\n");
503		return -ENOMEM;
504	}
505
506	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
507	if (dma_mapping_error(dev, pdb->p_dma)) {
508		dev_err(dev, "Unable to map RSA prime factor p memory\n");
509		goto unmap_d;
510	}
511
512	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
513	if (dma_mapping_error(dev, pdb->q_dma)) {
514		dev_err(dev, "Unable to map RSA prime factor q memory\n");
515		goto unmap_p;
516	}
517
518	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
519	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
520		dev_err(dev, "Unable to map RSA tmp1 memory\n");
521		goto unmap_q;
522	}
523
524	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
525	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
526		dev_err(dev, "Unable to map RSA tmp2 memory\n");
527		goto unmap_tmp1;
528	}
529
530	if (edesc->mapped_src_nents > 1) {
531		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
532		pdb->g_dma = edesc->sec4_sg_dma;
533		sec4_sg_index += edesc->mapped_src_nents;
534	} else {
535		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
536
537		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
538	}
539
540	if (edesc->mapped_dst_nents > 1) {
541		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
542		pdb->f_dma = edesc->sec4_sg_dma +
543			     sec4_sg_index * sizeof(struct sec4_sg_entry);
544	} else {
545		pdb->f_dma = sg_dma_address(req->dst);
546	}
547
548	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
549	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
550
551	return 0;
552
553unmap_tmp1:
554	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
555unmap_q:
556	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
557unmap_p:
558	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
559unmap_d:
560	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
561
562	return -ENOMEM;
563}
564
565static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
566			       struct rsa_edesc *edesc)
567{
568	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
569	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
570	struct caam_rsa_key *key = &ctx->key;
571	struct device *dev = ctx->dev;
572	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
573	int sec4_sg_index = 0;
574	size_t p_sz = key->p_sz;
575	size_t q_sz = key->q_sz;
576
577	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
578	if (dma_mapping_error(dev, pdb->p_dma)) {
579		dev_err(dev, "Unable to map RSA prime factor p memory\n");
580		return -ENOMEM;
581	}
582
583	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
584	if (dma_mapping_error(dev, pdb->q_dma)) {
585		dev_err(dev, "Unable to map RSA prime factor q memory\n");
586		goto unmap_p;
587	}
588
589	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
590	if (dma_mapping_error(dev, pdb->dp_dma)) {
591		dev_err(dev, "Unable to map RSA exponent dp memory\n");
592		goto unmap_q;
593	}
594
595	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
596	if (dma_mapping_error(dev, pdb->dq_dma)) {
597		dev_err(dev, "Unable to map RSA exponent dq memory\n");
598		goto unmap_dp;
599	}
600
601	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
602	if (dma_mapping_error(dev, pdb->c_dma)) {
603		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
604		goto unmap_dq;
605	}
606
607	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
608	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
609		dev_err(dev, "Unable to map RSA tmp1 memory\n");
610		goto unmap_qinv;
611	}
612
613	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
614	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
615		dev_err(dev, "Unable to map RSA tmp2 memory\n");
616		goto unmap_tmp1;
617	}
618
619	if (edesc->mapped_src_nents > 1) {
620		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
621		pdb->g_dma = edesc->sec4_sg_dma;
622		sec4_sg_index += edesc->mapped_src_nents;
623	} else {
624		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
625
626		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
627	}
628
629	if (edesc->mapped_dst_nents > 1) {
630		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
631		pdb->f_dma = edesc->sec4_sg_dma +
632			     sec4_sg_index * sizeof(struct sec4_sg_entry);
633	} else {
634		pdb->f_dma = sg_dma_address(req->dst);
635	}
636
637	pdb->sgf |= key->n_sz;
638	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
639
640	return 0;
641
642unmap_tmp1:
643	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
644unmap_qinv:
645	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
646unmap_dq:
647	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
648unmap_dp:
649	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
650unmap_q:
651	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
652unmap_p:
653	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
654
655	return -ENOMEM;
656}
657
658static int akcipher_enqueue_req(struct device *jrdev,
659				void (*cbk)(struct device *jrdev, u32 *desc,
660					    u32 err, void *context),
661				struct akcipher_request *req)
662{
663	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
664	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
665	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
666	struct caam_rsa_key *key = &ctx->key;
667	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
668	struct rsa_edesc *edesc = req_ctx->edesc;
669	u32 *desc = edesc->hw_desc;
670	int ret;
671
672	req_ctx->akcipher_op_done = cbk;
673	/*
674	 * Only the backlog request are sent to crypto-engine since the others
675	 * can be handled by CAAM, if free, especially since JR has up to 1024
676	 * entries (more than the 10 entries from crypto-engine).
677	 */
678	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
679		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
680								 req);
681	else
682		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
683
684	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
685		switch (key->priv_form) {
686		case FORM1:
687			rsa_priv_f1_unmap(jrdev, edesc, req);
688			break;
689		case FORM2:
690			rsa_priv_f2_unmap(jrdev, edesc, req);
691			break;
692		case FORM3:
693			rsa_priv_f3_unmap(jrdev, edesc, req);
694			break;
695		default:
696			rsa_pub_unmap(jrdev, edesc, req);
697		}
698		rsa_io_unmap(jrdev, edesc, req);
699		kfree(edesc);
700	}
701
702	return ret;
703}
704
705static int caam_rsa_enc(struct akcipher_request *req)
706{
707	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
708	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
709	struct caam_rsa_key *key = &ctx->key;
710	struct device *jrdev = ctx->dev;
711	struct rsa_edesc *edesc;
712	int ret;
713
714	if (unlikely(!key->n || !key->e))
715		return -EINVAL;
716
717	if (req->dst_len < key->n_sz) {
718		req->dst_len = key->n_sz;
719		dev_err(jrdev, "Output buffer length less than parameter n\n");
720		return -EOVERFLOW;
721	}
722
723	/* Allocate extended descriptor */
724	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
725	if (IS_ERR(edesc))
726		return PTR_ERR(edesc);
727
728	/* Set RSA Encrypt Protocol Data Block */
729	ret = set_rsa_pub_pdb(req, edesc);
730	if (ret)
731		goto init_fail;
732
733	/* Initialize Job Descriptor */
734	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
735
736	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
737
738init_fail:
739	rsa_io_unmap(jrdev, edesc, req);
740	kfree(edesc);
741	return ret;
742}
743
744static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
745{
746	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
747	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
748	struct device *jrdev = ctx->dev;
749	struct rsa_edesc *edesc;
750	int ret;
751
752	/* Allocate extended descriptor */
753	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
754	if (IS_ERR(edesc))
755		return PTR_ERR(edesc);
756
757	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
758	ret = set_rsa_priv_f1_pdb(req, edesc);
759	if (ret)
760		goto init_fail;
761
762	/* Initialize Job Descriptor */
763	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
764
765	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
766
767init_fail:
768	rsa_io_unmap(jrdev, edesc, req);
769	kfree(edesc);
770	return ret;
771}
772
773static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
774{
775	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
776	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
777	struct device *jrdev = ctx->dev;
778	struct rsa_edesc *edesc;
779	int ret;
780
781	/* Allocate extended descriptor */
782	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
783	if (IS_ERR(edesc))
784		return PTR_ERR(edesc);
785
786	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
787	ret = set_rsa_priv_f2_pdb(req, edesc);
788	if (ret)
789		goto init_fail;
790
791	/* Initialize Job Descriptor */
792	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
793
794	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
795
796init_fail:
797	rsa_io_unmap(jrdev, edesc, req);
798	kfree(edesc);
799	return ret;
800}
801
802static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
803{
804	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
805	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
806	struct device *jrdev = ctx->dev;
807	struct rsa_edesc *edesc;
808	int ret;
809
810	/* Allocate extended descriptor */
811	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
812	if (IS_ERR(edesc))
813		return PTR_ERR(edesc);
814
815	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
816	ret = set_rsa_priv_f3_pdb(req, edesc);
817	if (ret)
818		goto init_fail;
819
820	/* Initialize Job Descriptor */
821	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
822
823	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
824
825init_fail:
826	rsa_io_unmap(jrdev, edesc, req);
827	kfree(edesc);
828	return ret;
829}
830
831static int caam_rsa_dec(struct akcipher_request *req)
832{
833	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
834	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
835	struct caam_rsa_key *key = &ctx->key;
836	int ret;
837
838	if (unlikely(!key->n || !key->d))
839		return -EINVAL;
840
841	if (req->dst_len < key->n_sz) {
842		req->dst_len = key->n_sz;
843		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
844		return -EOVERFLOW;
845	}
846
847	if (key->priv_form == FORM3)
848		ret = caam_rsa_dec_priv_f3(req);
849	else if (key->priv_form == FORM2)
850		ret = caam_rsa_dec_priv_f2(req);
851	else
852		ret = caam_rsa_dec_priv_f1(req);
853
854	return ret;
855}
856
857static void caam_rsa_free_key(struct caam_rsa_key *key)
858{
859	kfree_sensitive(key->d);
860	kfree_sensitive(key->p);
861	kfree_sensitive(key->q);
862	kfree_sensitive(key->dp);
863	kfree_sensitive(key->dq);
864	kfree_sensitive(key->qinv);
865	kfree_sensitive(key->tmp1);
866	kfree_sensitive(key->tmp2);
867	kfree(key->e);
868	kfree(key->n);
869	memset(key, 0, sizeof(*key));
870}
871
872static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
873{
874	while (!**ptr && *nbytes) {
875		(*ptr)++;
876		(*nbytes)--;
877	}
878}
879
880/**
881 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
882 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
883 * BER-encoding requires that the minimum number of bytes be used to encode the
884 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
885 * length.
886 *
887 * @ptr   : pointer to {dP, dQ, qInv} CRT member
888 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
889 * @dstlen: length in bytes of corresponding p or q prime factor
890 */
891static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
892{
893	u8 *dst;
894
895	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
896	if (!nbytes)
897		return NULL;
898
899	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
900	if (!dst)
901		return NULL;
902
903	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
904
905	return dst;
906}
907
908/**
909 * caam_read_raw_data - Read a raw byte stream as a positive integer.
910 * The function skips buffer's leading zeros, copies the remained data
911 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
912 * the address of the new buffer.
913 *
914 * @buf   : The data to read
915 * @nbytes: The amount of data to read
916 */
917static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
918{
919
920	caam_rsa_drop_leading_zeros(&buf, nbytes);
921	if (!*nbytes)
922		return NULL;
923
924	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
925}
926
927static int caam_rsa_check_key_length(unsigned int len)
928{
929	if (len > 4096)
930		return -EINVAL;
931	return 0;
932}
933
934static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
935				unsigned int keylen)
936{
937	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
938	struct rsa_key raw_key = {NULL};
939	struct caam_rsa_key *rsa_key = &ctx->key;
940	int ret;
941
942	/* Free the old RSA key if any */
943	caam_rsa_free_key(rsa_key);
944
945	ret = rsa_parse_pub_key(&raw_key, key, keylen);
946	if (ret)
947		return ret;
948
949	/* Copy key in DMA zone */
950	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
951	if (!rsa_key->e)
952		goto err;
953
954	/*
955	 * Skip leading zeros and copy the positive integer to a buffer
956	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
957	 * expects a positive integer for the RSA modulus and uses its length as
958	 * decryption output length.
959	 */
960	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
961	if (!rsa_key->n)
962		goto err;
963
964	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
965		caam_rsa_free_key(rsa_key);
966		return -EINVAL;
967	}
968
969	rsa_key->e_sz = raw_key.e_sz;
970	rsa_key->n_sz = raw_key.n_sz;
971
972	return 0;
973err:
974	caam_rsa_free_key(rsa_key);
975	return -ENOMEM;
976}
977
978static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
979				       struct rsa_key *raw_key)
980{
981	struct caam_rsa_key *rsa_key = &ctx->key;
982	size_t p_sz = raw_key->p_sz;
983	size_t q_sz = raw_key->q_sz;
984
985	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
986	if (!rsa_key->p)
987		return;
988	rsa_key->p_sz = p_sz;
989
990	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
991	if (!rsa_key->q)
992		goto free_p;
993	rsa_key->q_sz = q_sz;
994
995	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
996	if (!rsa_key->tmp1)
997		goto free_q;
998
999	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
1000	if (!rsa_key->tmp2)
1001		goto free_tmp1;
1002
1003	rsa_key->priv_form = FORM2;
1004
1005	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1006	if (!rsa_key->dp)
1007		goto free_tmp2;
1008
1009	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1010	if (!rsa_key->dq)
1011		goto free_dp;
1012
1013	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1014					  q_sz);
1015	if (!rsa_key->qinv)
1016		goto free_dq;
1017
1018	rsa_key->priv_form = FORM3;
1019
1020	return;
1021
1022free_dq:
1023	kfree_sensitive(rsa_key->dq);
1024free_dp:
1025	kfree_sensitive(rsa_key->dp);
1026free_tmp2:
1027	kfree_sensitive(rsa_key->tmp2);
1028free_tmp1:
1029	kfree_sensitive(rsa_key->tmp1);
1030free_q:
1031	kfree_sensitive(rsa_key->q);
1032free_p:
1033	kfree_sensitive(rsa_key->p);
1034}
1035
1036static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1037				 unsigned int keylen)
1038{
1039	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1040	struct rsa_key raw_key = {NULL};
1041	struct caam_rsa_key *rsa_key = &ctx->key;
1042	int ret;
1043
1044	/* Free the old RSA key if any */
1045	caam_rsa_free_key(rsa_key);
1046
1047	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1048	if (ret)
1049		return ret;
1050
1051	/* Copy key in DMA zone */
1052	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1053	if (!rsa_key->d)
1054		goto err;
1055
1056	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1057	if (!rsa_key->e)
1058		goto err;
1059
1060	/*
1061	 * Skip leading zeros and copy the positive integer to a buffer
1062	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1063	 * expects a positive integer for the RSA modulus and uses its length as
1064	 * decryption output length.
1065	 */
1066	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1067	if (!rsa_key->n)
1068		goto err;
1069
1070	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1071		caam_rsa_free_key(rsa_key);
1072		return -EINVAL;
1073	}
1074
1075	rsa_key->d_sz = raw_key.d_sz;
1076	rsa_key->e_sz = raw_key.e_sz;
1077	rsa_key->n_sz = raw_key.n_sz;
1078
1079	caam_rsa_set_priv_key_form(ctx, &raw_key);
1080
1081	return 0;
1082
1083err:
1084	caam_rsa_free_key(rsa_key);
1085	return -ENOMEM;
1086}
1087
1088static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1089{
1090	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1091
1092	return ctx->key.n_sz;
1093}
1094
1095/* Per session pkc's driver context creation function */
1096static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1097{
1098	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1099
1100	ctx->dev = caam_jr_alloc();
1101
1102	if (IS_ERR(ctx->dev)) {
1103		pr_err("Job Ring Device allocation for transform failed\n");
1104		return PTR_ERR(ctx->dev);
1105	}
1106
1107	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1108					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1109					  DMA_TO_DEVICE);
1110	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1111		dev_err(ctx->dev, "unable to map padding\n");
1112		caam_jr_free(ctx->dev);
1113		return -ENOMEM;
1114	}
1115
1116	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1117
1118	return 0;
1119}
1120
1121/* Per session pkc's driver context cleanup function */
1122static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1123{
1124	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1125	struct caam_rsa_key *key = &ctx->key;
1126
1127	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1128			 1, DMA_TO_DEVICE);
1129	caam_rsa_free_key(key);
1130	caam_jr_free(ctx->dev);
1131}
1132
1133static struct caam_akcipher_alg caam_rsa = {
1134	.akcipher = {
1135		.encrypt = caam_rsa_enc,
1136		.decrypt = caam_rsa_dec,
1137		.set_pub_key = caam_rsa_set_pub_key,
1138		.set_priv_key = caam_rsa_set_priv_key,
1139		.max_size = caam_rsa_max_size,
1140		.init = caam_rsa_init_tfm,
1141		.exit = caam_rsa_exit_tfm,
1142		.reqsize = sizeof(struct caam_rsa_req_ctx),
1143		.base = {
1144			.cra_name = "rsa",
1145			.cra_driver_name = "rsa-caam",
1146			.cra_priority = 3000,
1147			.cra_module = THIS_MODULE,
1148			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1149		},
1150	}
1151};
1152
1153/* Public Key Cryptography module initialization handler */
1154int caam_pkc_init(struct device *ctrldev)
1155{
1156	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1157	u32 pk_inst, pkha;
1158	int err;
1159	init_done = false;
1160
1161	/* Determine public key hardware accelerator presence. */
1162	if (priv->era < 10) {
1163		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1164			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1165	} else {
1166		pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1167		pk_inst = pkha & CHA_VER_NUM_MASK;
1168
1169		/*
1170		 * Newer CAAMs support partially disabled functionality. If this is the
1171		 * case, the number is non-zero, but this bit is set to indicate that
1172		 * no encryption or decryption is supported. Only signing and verifying
1173		 * is supported.
1174		 */
1175		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1176			pk_inst = 0;
1177	}
1178
1179	/* Do not register algorithms if PKHA is not present. */
1180	if (!pk_inst)
1181		return 0;
1182
1183	/* allocate zero buffer, used for padding input */
1184	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1185			      GFP_KERNEL);
1186	if (!zero_buffer)
1187		return -ENOMEM;
1188
1189	err = crypto_register_akcipher(&caam_rsa.akcipher);
1190
1191	if (err) {
1192		kfree(zero_buffer);
1193		dev_warn(ctrldev, "%s alg registration failed\n",
1194			 caam_rsa.akcipher.base.cra_driver_name);
1195	} else {
1196		init_done = true;
1197		caam_rsa.registered = true;
1198		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1199	}
1200
1201	return err;
1202}
1203
1204void caam_pkc_exit(void)
1205{
1206	if (!init_done)
1207		return;
1208
1209	if (caam_rsa.registered)
1210		crypto_unregister_akcipher(&caam_rsa.akcipher);
1211
1212	kfree(zero_buffer);
1213}
1214