1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <crypto/akcipher.h>
4#include <crypto/curve25519.h>
5#include <crypto/dh.h>
6#include <crypto/ecc_curve.h>
7#include <crypto/ecdh.h>
8#include <crypto/rng.h>
9#include <crypto/internal/akcipher.h>
10#include <crypto/internal/kpp.h>
11#include <crypto/internal/rsa.h>
12#include <crypto/kpp.h>
13#include <crypto/scatterwalk.h>
14#include <linux/dma-mapping.h>
15#include <linux/fips.h>
16#include <linux/module.h>
17#include <linux/time.h>
18#include "hpre.h"
19
20struct hpre_ctx;
21
22#define HPRE_CRYPTO_ALG_PRI	1000
23#define HPRE_ALIGN_SZ		64
24#define HPRE_BITS_2_BYTES_SHIFT	3
25#define HPRE_RSA_512BITS_KSZ	64
26#define HPRE_RSA_1536BITS_KSZ	192
27#define HPRE_CRT_PRMS		5
28#define HPRE_CRT_Q		2
29#define HPRE_CRT_P		3
30#define HPRE_CRT_INV		4
31#define HPRE_DH_G_FLAG		0x02
32#define HPRE_TRY_SEND_TIMES	100
33#define HPRE_INVLD_REQ_ID		(-1)
34
35#define HPRE_SQE_ALG_BITS	5
36#define HPRE_SQE_DONE_SHIFT	30
37#define HPRE_DH_MAX_P_SZ	512
38
39#define HPRE_DFX_SEC_TO_US	1000000
40#define HPRE_DFX_US_TO_NS	1000
41
42/* due to nist p521  */
43#define HPRE_ECC_MAX_KSZ	66
44
45/* size in bytes of the n prime */
46#define HPRE_ECC_NIST_P192_N_SIZE	24
47#define HPRE_ECC_NIST_P256_N_SIZE	32
48#define HPRE_ECC_NIST_P384_N_SIZE	48
49
50/* size in bytes */
51#define HPRE_ECC_HW256_KSZ_B	32
52#define HPRE_ECC_HW384_KSZ_B	48
53
54/* capability register mask of driver */
55#define HPRE_DRV_RSA_MASK_CAP		BIT(0)
56#define HPRE_DRV_DH_MASK_CAP		BIT(1)
57#define HPRE_DRV_ECDH_MASK_CAP		BIT(2)
58#define HPRE_DRV_X25519_MASK_CAP	BIT(5)
59
60typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
61
62struct hpre_rsa_ctx {
63	/* low address: e--->n */
64	char *pubkey;
65	dma_addr_t dma_pubkey;
66
67	/* low address: d--->n */
68	char *prikey;
69	dma_addr_t dma_prikey;
70
71	/* low address: dq->dp->q->p->qinv */
72	char *crt_prikey;
73	dma_addr_t dma_crt_prikey;
74
75	struct crypto_akcipher *soft_tfm;
76};
77
78struct hpre_dh_ctx {
79	/*
80	 * If base is g we compute the public key
81	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
82	 * else if base if the counterpart public key we
83	 * compute the shared secret
84	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
85	 * low address: d--->n, please refer to Hisilicon HPRE UM
86	 */
87	char *xa_p;
88	dma_addr_t dma_xa_p;
89
90	char *g; /* m */
91	dma_addr_t dma_g;
92};
93
94struct hpre_ecdh_ctx {
95	/* low address: p->a->k->b */
96	unsigned char *p;
97	dma_addr_t dma_p;
98
99	/* low address: x->y */
100	unsigned char *g;
101	dma_addr_t dma_g;
102};
103
104struct hpre_curve25519_ctx {
105	/* low address: p->a->k */
106	unsigned char *p;
107	dma_addr_t dma_p;
108
109	/* gx coordinate */
110	unsigned char *g;
111	dma_addr_t dma_g;
112};
113
114struct hpre_ctx {
115	struct hisi_qp *qp;
116	struct device *dev;
117	struct hpre_asym_request **req_list;
118	struct hpre *hpre;
119	spinlock_t req_lock;
120	unsigned int key_sz;
121	bool crt_g2_mode;
122	struct idr req_idr;
123	union {
124		struct hpre_rsa_ctx rsa;
125		struct hpre_dh_ctx dh;
126		struct hpre_ecdh_ctx ecdh;
127		struct hpre_curve25519_ctx curve25519;
128	};
129	/* for ecc algorithms */
130	unsigned int curve_id;
131};
132
133struct hpre_asym_request {
134	char *src;
135	char *dst;
136	struct hpre_sqe req;
137	struct hpre_ctx *ctx;
138	union {
139		struct akcipher_request *rsa;
140		struct kpp_request *dh;
141		struct kpp_request *ecdh;
142		struct kpp_request *curve25519;
143	} areq;
144	int err;
145	int req_id;
146	hpre_cb cb;
147	struct timespec64 req_time;
148};
149
150static inline unsigned int hpre_align_sz(void)
151{
152	return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
153}
154
155static inline unsigned int hpre_align_pd(void)
156{
157	return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
158}
159
160static int hpre_alloc_req_id(struct hpre_ctx *ctx)
161{
162	unsigned long flags;
163	int id;
164
165	spin_lock_irqsave(&ctx->req_lock, flags);
166	id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
167	spin_unlock_irqrestore(&ctx->req_lock, flags);
168
169	return id;
170}
171
172static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
173{
174	unsigned long flags;
175
176	spin_lock_irqsave(&ctx->req_lock, flags);
177	idr_remove(&ctx->req_idr, req_id);
178	spin_unlock_irqrestore(&ctx->req_lock, flags);
179}
180
181static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
182{
183	struct hpre_ctx *ctx;
184	struct hpre_dfx *dfx;
185	int id;
186
187	ctx = hpre_req->ctx;
188	id = hpre_alloc_req_id(ctx);
189	if (unlikely(id < 0))
190		return -EINVAL;
191
192	ctx->req_list[id] = hpre_req;
193	hpre_req->req_id = id;
194
195	dfx = ctx->hpre->debug.dfx;
196	if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
197		ktime_get_ts64(&hpre_req->req_time);
198
199	return id;
200}
201
202static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
203{
204	struct hpre_ctx *ctx = hpre_req->ctx;
205	int id = hpre_req->req_id;
206
207	if (hpre_req->req_id >= 0) {
208		hpre_req->req_id = HPRE_INVLD_REQ_ID;
209		ctx->req_list[id] = NULL;
210		hpre_free_req_id(ctx, id);
211	}
212}
213
214static struct hisi_qp *hpre_get_qp_and_start(u8 type)
215{
216	struct hisi_qp *qp;
217	int ret;
218
219	qp = hpre_create_qp(type);
220	if (!qp) {
221		pr_err("Can not create hpre qp!\n");
222		return ERR_PTR(-ENODEV);
223	}
224
225	ret = hisi_qm_start_qp(qp, 0);
226	if (ret < 0) {
227		hisi_qm_free_qps(&qp, 1);
228		pci_err(qp->qm->pdev, "Can not start qp!\n");
229		return ERR_PTR(-EINVAL);
230	}
231
232	return qp;
233}
234
235static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
236				  struct scatterlist *data, unsigned int len,
237				  int is_src, dma_addr_t *tmp)
238{
239	struct device *dev = hpre_req->ctx->dev;
240	enum dma_data_direction dma_dir;
241
242	if (is_src) {
243		hpre_req->src = NULL;
244		dma_dir = DMA_TO_DEVICE;
245	} else {
246		hpre_req->dst = NULL;
247		dma_dir = DMA_FROM_DEVICE;
248	}
249	*tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
250	if (unlikely(dma_mapping_error(dev, *tmp))) {
251		dev_err(dev, "dma map data err!\n");
252		return -ENOMEM;
253	}
254
255	return 0;
256}
257
258static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
259				struct scatterlist *data, unsigned int len,
260				int is_src, dma_addr_t *tmp)
261{
262	struct hpre_ctx *ctx = hpre_req->ctx;
263	struct device *dev = ctx->dev;
264	void *ptr;
265	int shift;
266
267	shift = ctx->key_sz - len;
268	if (unlikely(shift < 0))
269		return -EINVAL;
270
271	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
272	if (unlikely(!ptr))
273		return -ENOMEM;
274
275	if (is_src) {
276		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
277		hpre_req->src = ptr;
278	} else {
279		hpre_req->dst = ptr;
280	}
281
282	return 0;
283}
284
285static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
286			     struct scatterlist *data, unsigned int len,
287			     int is_src, int is_dh)
288{
289	struct hpre_sqe *msg = &hpre_req->req;
290	struct hpre_ctx *ctx = hpre_req->ctx;
291	dma_addr_t tmp = 0;
292	int ret;
293
294	/* when the data is dh's source, we should format it */
295	if ((sg_is_last(data) && len == ctx->key_sz) &&
296	    ((is_dh && !is_src) || !is_dh))
297		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
298	else
299		ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
300
301	if (unlikely(ret))
302		return ret;
303
304	if (is_src)
305		msg->in = cpu_to_le64(tmp);
306	else
307		msg->out = cpu_to_le64(tmp);
308
309	return 0;
310}
311
312static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
313				 struct hpre_asym_request *req,
314				 struct scatterlist *dst,
315				 struct scatterlist *src)
316{
317	struct device *dev = ctx->dev;
318	struct hpre_sqe *sqe = &req->req;
319	dma_addr_t tmp;
320
321	tmp = le64_to_cpu(sqe->in);
322	if (unlikely(dma_mapping_error(dev, tmp)))
323		return;
324
325	if (src) {
326		if (req->src)
327			dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
328		else
329			dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
330	}
331
332	tmp = le64_to_cpu(sqe->out);
333	if (unlikely(dma_mapping_error(dev, tmp)))
334		return;
335
336	if (req->dst) {
337		if (dst)
338			scatterwalk_map_and_copy(req->dst, dst, 0,
339						 ctx->key_sz, 1);
340		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
341	} else {
342		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
343	}
344}
345
346static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
347				void **kreq)
348{
349	struct hpre_asym_request *req;
350	unsigned int err, done, alg;
351	int id;
352
353#define HPRE_NO_HW_ERR		0
354#define HPRE_HW_TASK_DONE	3
355#define HREE_HW_ERR_MASK	GENMASK(10, 0)
356#define HREE_SQE_DONE_MASK	GENMASK(1, 0)
357#define HREE_ALG_TYPE_MASK	GENMASK(4, 0)
358	id = (int)le16_to_cpu(sqe->tag);
359	req = ctx->req_list[id];
360	hpre_rm_req_from_ctx(req);
361	*kreq = req;
362
363	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
364		HREE_HW_ERR_MASK;
365
366	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
367		HREE_SQE_DONE_MASK;
368
369	if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
370		return 0;
371
372	alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
373	dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
374		alg, done, err);
375
376	return -EINVAL;
377}
378
379static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
380{
381	struct hpre *hpre;
382
383	if (!ctx || !qp || qlen < 0)
384		return -EINVAL;
385
386	spin_lock_init(&ctx->req_lock);
387	ctx->qp = qp;
388	ctx->dev = &qp->qm->pdev->dev;
389
390	hpre = container_of(ctx->qp->qm, struct hpre, qm);
391	ctx->hpre = hpre;
392	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
393	if (!ctx->req_list)
394		return -ENOMEM;
395	ctx->key_sz = 0;
396	ctx->crt_g2_mode = false;
397	idr_init(&ctx->req_idr);
398
399	return 0;
400}
401
402static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
403{
404	if (is_clear_all) {
405		idr_destroy(&ctx->req_idr);
406		kfree(ctx->req_list);
407		hisi_qm_free_qps(&ctx->qp, 1);
408	}
409
410	ctx->crt_g2_mode = false;
411	ctx->key_sz = 0;
412}
413
414static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
415			       u64 overtime_thrhld)
416{
417	struct timespec64 reply_time;
418	u64 time_use_us;
419
420	ktime_get_ts64(&reply_time);
421	time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
422		HPRE_DFX_SEC_TO_US +
423		(reply_time.tv_nsec - req->req_time.tv_nsec) /
424		HPRE_DFX_US_TO_NS;
425
426	if (time_use_us <= overtime_thrhld)
427		return false;
428
429	return true;
430}
431
432static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
433{
434	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
435	struct hpre_asym_request *req;
436	struct kpp_request *areq;
437	u64 overtime_thrhld;
438	int ret;
439
440	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
441	areq = req->areq.dh;
442	areq->dst_len = ctx->key_sz;
443
444	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
445	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
446		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
447
448	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
449	kpp_request_complete(areq, ret);
450	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
451}
452
453static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
454{
455	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
456	struct hpre_asym_request *req;
457	struct akcipher_request *areq;
458	u64 overtime_thrhld;
459	int ret;
460
461	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
462
463	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
464	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
465		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
466
467	areq = req->areq.rsa;
468	areq->dst_len = ctx->key_sz;
469	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
470	akcipher_request_complete(areq, ret);
471	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
472}
473
474static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
475{
476	struct hpre_ctx *ctx = qp->qp_ctx;
477	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
478	struct hpre_sqe *sqe = resp;
479	struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
480
481	if (unlikely(!req)) {
482		atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
483		return;
484	}
485
486	req->cb(ctx, resp);
487}
488
489static void hpre_stop_qp_and_put(struct hisi_qp *qp)
490{
491	hisi_qm_stop_qp(qp);
492	hisi_qm_free_qps(&qp, 1);
493}
494
495static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
496{
497	struct hisi_qp *qp;
498	int ret;
499
500	qp = hpre_get_qp_and_start(type);
501	if (IS_ERR(qp))
502		return PTR_ERR(qp);
503
504	qp->qp_ctx = ctx;
505	qp->req_cb = hpre_alg_cb;
506
507	ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
508	if (ret)
509		hpre_stop_qp_and_put(qp);
510
511	return ret;
512}
513
514static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
515{
516	struct hpre_asym_request *h_req;
517	struct hpre_sqe *msg;
518	int req_id;
519	void *tmp;
520
521	if (is_rsa) {
522		struct akcipher_request *akreq = req;
523
524		if (akreq->dst_len < ctx->key_sz) {
525			akreq->dst_len = ctx->key_sz;
526			return -EOVERFLOW;
527		}
528
529		tmp = akcipher_request_ctx(akreq);
530		h_req = PTR_ALIGN(tmp, hpre_align_sz());
531		h_req->cb = hpre_rsa_cb;
532		h_req->areq.rsa = akreq;
533		msg = &h_req->req;
534		memset(msg, 0, sizeof(*msg));
535	} else {
536		struct kpp_request *kreq = req;
537
538		if (kreq->dst_len < ctx->key_sz) {
539			kreq->dst_len = ctx->key_sz;
540			return -EOVERFLOW;
541		}
542
543		tmp = kpp_request_ctx(kreq);
544		h_req = PTR_ALIGN(tmp, hpre_align_sz());
545		h_req->cb = hpre_dh_cb;
546		h_req->areq.dh = kreq;
547		msg = &h_req->req;
548		memset(msg, 0, sizeof(*msg));
549		msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
550	}
551
552	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
553	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
554	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
555	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
556	h_req->ctx = ctx;
557
558	req_id = hpre_add_req_to_ctx(h_req);
559	if (req_id < 0)
560		return -EBUSY;
561
562	msg->tag = cpu_to_le16((u16)req_id);
563
564	return 0;
565}
566
567static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
568{
569	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
570	int ctr = 0;
571	int ret;
572
573	do {
574		atomic64_inc(&dfx[HPRE_SEND_CNT].value);
575		ret = hisi_qp_send(ctx->qp, msg);
576		if (ret != -EBUSY)
577			break;
578		atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
579	} while (ctr++ < HPRE_TRY_SEND_TIMES);
580
581	if (likely(!ret))
582		return ret;
583
584	if (ret != -EBUSY)
585		atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
586
587	return ret;
588}
589
590static int hpre_dh_compute_value(struct kpp_request *req)
591{
592	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
593	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
594	void *tmp = kpp_request_ctx(req);
595	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
596	struct hpre_sqe *msg = &hpre_req->req;
597	int ret;
598
599	ret = hpre_msg_request_set(ctx, req, false);
600	if (unlikely(ret))
601		return ret;
602
603	if (req->src) {
604		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
605		if (unlikely(ret))
606			goto clear_all;
607	} else {
608		msg->in = cpu_to_le64(ctx->dh.dma_g);
609	}
610
611	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
612	if (unlikely(ret))
613		goto clear_all;
614
615	if (ctx->crt_g2_mode && !req->src)
616		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
617	else
618		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
619
620	/* success */
621	ret = hpre_send(ctx, msg);
622	if (likely(!ret))
623		return -EINPROGRESS;
624
625clear_all:
626	hpre_rm_req_from_ctx(hpre_req);
627	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
628
629	return ret;
630}
631
632static int hpre_is_dh_params_length_valid(unsigned int key_sz)
633{
634#define _HPRE_DH_GRP1		768
635#define _HPRE_DH_GRP2		1024
636#define _HPRE_DH_GRP5		1536
637#define _HPRE_DH_GRP14		2048
638#define _HPRE_DH_GRP15		3072
639#define _HPRE_DH_GRP16		4096
640	switch (key_sz) {
641	case _HPRE_DH_GRP1:
642	case _HPRE_DH_GRP2:
643	case _HPRE_DH_GRP5:
644	case _HPRE_DH_GRP14:
645	case _HPRE_DH_GRP15:
646	case _HPRE_DH_GRP16:
647		return 0;
648	default:
649		return -EINVAL;
650	}
651}
652
653static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
654{
655	struct device *dev = ctx->dev;
656	unsigned int sz;
657
658	if (params->p_size > HPRE_DH_MAX_P_SZ)
659		return -EINVAL;
660
661	if (hpre_is_dh_params_length_valid(params->p_size <<
662					   HPRE_BITS_2_BYTES_SHIFT))
663		return -EINVAL;
664
665	sz = ctx->key_sz = params->p_size;
666	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
667					  &ctx->dh.dma_xa_p, GFP_KERNEL);
668	if (!ctx->dh.xa_p)
669		return -ENOMEM;
670
671	memcpy(ctx->dh.xa_p + sz, params->p, sz);
672
673	/* If g equals 2 don't copy it */
674	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
675		ctx->crt_g2_mode = true;
676		return 0;
677	}
678
679	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
680	if (!ctx->dh.g) {
681		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
682				  ctx->dh.dma_xa_p);
683		ctx->dh.xa_p = NULL;
684		return -ENOMEM;
685	}
686
687	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
688
689	return 0;
690}
691
692static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
693{
694	struct device *dev = ctx->dev;
695	unsigned int sz = ctx->key_sz;
696
697	if (is_clear_all)
698		hisi_qm_stop_qp(ctx->qp);
699
700	if (ctx->dh.g) {
701		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
702		ctx->dh.g = NULL;
703	}
704
705	if (ctx->dh.xa_p) {
706		memzero_explicit(ctx->dh.xa_p, sz);
707		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
708				  ctx->dh.dma_xa_p);
709		ctx->dh.xa_p = NULL;
710	}
711
712	hpre_ctx_clear(ctx, is_clear_all);
713}
714
715static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
716			      unsigned int len)
717{
718	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
719	struct dh params;
720	int ret;
721
722	if (crypto_dh_decode_key(buf, len, &params) < 0)
723		return -EINVAL;
724
725	/* Free old secret if any */
726	hpre_dh_clear_ctx(ctx, false);
727
728	ret = hpre_dh_set_params(ctx, &params);
729	if (ret < 0)
730		goto err_clear_ctx;
731
732	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
733	       params.key_size);
734
735	return 0;
736
737err_clear_ctx:
738	hpre_dh_clear_ctx(ctx, false);
739	return ret;
740}
741
742static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
743{
744	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
745
746	return ctx->key_sz;
747}
748
749static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
750{
751	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
752
753	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
754
755	return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
756}
757
758static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
759{
760	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
761
762	hpre_dh_clear_ctx(ctx, true);
763}
764
765static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
766{
767	while (!**ptr && *len) {
768		(*ptr)++;
769		(*len)--;
770	}
771}
772
773static bool hpre_rsa_key_size_is_support(unsigned int len)
774{
775	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
776
777#define _RSA_1024BITS_KEY_WDTH		1024
778#define _RSA_2048BITS_KEY_WDTH		2048
779#define _RSA_3072BITS_KEY_WDTH		3072
780#define _RSA_4096BITS_KEY_WDTH		4096
781
782	switch (bits) {
783	case _RSA_1024BITS_KEY_WDTH:
784	case _RSA_2048BITS_KEY_WDTH:
785	case _RSA_3072BITS_KEY_WDTH:
786	case _RSA_4096BITS_KEY_WDTH:
787		return true;
788	default:
789		return false;
790	}
791}
792
793static int hpre_rsa_enc(struct akcipher_request *req)
794{
795	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
796	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
797	void *tmp = akcipher_request_ctx(req);
798	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
799	struct hpre_sqe *msg = &hpre_req->req;
800	int ret;
801
802	/* For 512 and 1536 bits key size, use soft tfm instead */
803	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
804	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
805		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
806		ret = crypto_akcipher_encrypt(req);
807		akcipher_request_set_tfm(req, tfm);
808		return ret;
809	}
810
811	if (unlikely(!ctx->rsa.pubkey))
812		return -EINVAL;
813
814	ret = hpre_msg_request_set(ctx, req, true);
815	if (unlikely(ret))
816		return ret;
817
818	msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
819	msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
820
821	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
822	if (unlikely(ret))
823		goto clear_all;
824
825	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
826	if (unlikely(ret))
827		goto clear_all;
828
829	/* success */
830	ret = hpre_send(ctx, msg);
831	if (likely(!ret))
832		return -EINPROGRESS;
833
834clear_all:
835	hpre_rm_req_from_ctx(hpre_req);
836	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
837
838	return ret;
839}
840
841static int hpre_rsa_dec(struct akcipher_request *req)
842{
843	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
844	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
845	void *tmp = akcipher_request_ctx(req);
846	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
847	struct hpre_sqe *msg = &hpre_req->req;
848	int ret;
849
850	/* For 512 and 1536 bits key size, use soft tfm instead */
851	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
852	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
853		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
854		ret = crypto_akcipher_decrypt(req);
855		akcipher_request_set_tfm(req, tfm);
856		return ret;
857	}
858
859	if (unlikely(!ctx->rsa.prikey))
860		return -EINVAL;
861
862	ret = hpre_msg_request_set(ctx, req, true);
863	if (unlikely(ret))
864		return ret;
865
866	if (ctx->crt_g2_mode) {
867		msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
868		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
869				       HPRE_ALG_NC_CRT);
870	} else {
871		msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
872		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
873				       HPRE_ALG_NC_NCRT);
874	}
875
876	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
877	if (unlikely(ret))
878		goto clear_all;
879
880	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
881	if (unlikely(ret))
882		goto clear_all;
883
884	/* success */
885	ret = hpre_send(ctx, msg);
886	if (likely(!ret))
887		return -EINPROGRESS;
888
889clear_all:
890	hpre_rm_req_from_ctx(hpre_req);
891	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
892
893	return ret;
894}
895
896static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
897			  size_t vlen, bool private)
898{
899	const char *ptr = value;
900
901	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
902
903	ctx->key_sz = vlen;
904
905	/* if invalid key size provided, we use software tfm */
906	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
907		return 0;
908
909	ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
910					     &ctx->rsa.dma_pubkey,
911					     GFP_KERNEL);
912	if (!ctx->rsa.pubkey)
913		return -ENOMEM;
914
915	if (private) {
916		ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
917						     &ctx->rsa.dma_prikey,
918						     GFP_KERNEL);
919		if (!ctx->rsa.prikey) {
920			dma_free_coherent(ctx->dev, vlen << 1,
921					  ctx->rsa.pubkey,
922					  ctx->rsa.dma_pubkey);
923			ctx->rsa.pubkey = NULL;
924			return -ENOMEM;
925		}
926		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
927	}
928	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
929
930	/* Using hardware HPRE to do RSA */
931	return 1;
932}
933
934static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
935			  size_t vlen)
936{
937	const char *ptr = value;
938
939	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
940
941	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
942		return -EINVAL;
943
944	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
945
946	return 0;
947}
948
949static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
950			  size_t vlen)
951{
952	const char *ptr = value;
953
954	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
955
956	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
957		return -EINVAL;
958
959	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
960
961	return 0;
962}
963
964static int hpre_crt_para_get(char *para, size_t para_sz,
965			     const char *raw, size_t raw_sz)
966{
967	const char *ptr = raw;
968	size_t len = raw_sz;
969
970	hpre_rsa_drop_leading_zeros(&ptr, &len);
971	if (!len || len > para_sz)
972		return -EINVAL;
973
974	memcpy(para + para_sz - len, ptr, len);
975
976	return 0;
977}
978
979static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
980{
981	unsigned int hlf_ksz = ctx->key_sz >> 1;
982	struct device *dev = ctx->dev;
983	u64 offset;
984	int ret;
985
986	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
987					&ctx->rsa.dma_crt_prikey,
988					GFP_KERNEL);
989	if (!ctx->rsa.crt_prikey)
990		return -ENOMEM;
991
992	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
993				rsa_key->dq, rsa_key->dq_sz);
994	if (ret)
995		goto free_key;
996
997	offset = hlf_ksz;
998	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
999				rsa_key->dp, rsa_key->dp_sz);
1000	if (ret)
1001		goto free_key;
1002
1003	offset = hlf_ksz * HPRE_CRT_Q;
1004	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1005				rsa_key->q, rsa_key->q_sz);
1006	if (ret)
1007		goto free_key;
1008
1009	offset = hlf_ksz * HPRE_CRT_P;
1010	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1011				rsa_key->p, rsa_key->p_sz);
1012	if (ret)
1013		goto free_key;
1014
1015	offset = hlf_ksz * HPRE_CRT_INV;
1016	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1017				rsa_key->qinv, rsa_key->qinv_sz);
1018	if (ret)
1019		goto free_key;
1020
1021	ctx->crt_g2_mode = true;
1022
1023	return 0;
1024
1025free_key:
1026	offset = hlf_ksz * HPRE_CRT_PRMS;
1027	memzero_explicit(ctx->rsa.crt_prikey, offset);
1028	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1029			  ctx->rsa.dma_crt_prikey);
1030	ctx->rsa.crt_prikey = NULL;
1031	ctx->crt_g2_mode = false;
1032
1033	return ret;
1034}
1035
1036/* If it is clear all, all the resources of the QP will be cleaned. */
1037static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1038{
1039	unsigned int half_key_sz = ctx->key_sz >> 1;
1040	struct device *dev = ctx->dev;
1041
1042	if (is_clear_all)
1043		hisi_qm_stop_qp(ctx->qp);
1044
1045	if (ctx->rsa.pubkey) {
1046		dma_free_coherent(dev, ctx->key_sz << 1,
1047				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1048		ctx->rsa.pubkey = NULL;
1049	}
1050
1051	if (ctx->rsa.crt_prikey) {
1052		memzero_explicit(ctx->rsa.crt_prikey,
1053				 half_key_sz * HPRE_CRT_PRMS);
1054		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1055				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1056		ctx->rsa.crt_prikey = NULL;
1057	}
1058
1059	if (ctx->rsa.prikey) {
1060		memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1061		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1062				  ctx->rsa.dma_prikey);
1063		ctx->rsa.prikey = NULL;
1064	}
1065
1066	hpre_ctx_clear(ctx, is_clear_all);
1067}
1068
1069/*
1070 * we should judge if it is CRT or not,
1071 * CRT: return true,  N-CRT: return false .
1072 */
1073static bool hpre_is_crt_key(struct rsa_key *key)
1074{
1075	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1076		  key->qinv_sz;
1077
1078#define LEN_OF_NCRT_PARA	5
1079
1080	/* N-CRT less than 5 parameters */
1081	return len > LEN_OF_NCRT_PARA;
1082}
1083
1084static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1085			   unsigned int keylen, bool private)
1086{
1087	struct rsa_key rsa_key;
1088	int ret;
1089
1090	hpre_rsa_clear_ctx(ctx, false);
1091
1092	if (private)
1093		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1094	else
1095		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1096	if (ret < 0)
1097		return ret;
1098
1099	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1100	if (ret <= 0)
1101		return ret;
1102
1103	if (private) {
1104		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1105		if (ret < 0)
1106			goto free;
1107
1108		if (hpre_is_crt_key(&rsa_key)) {
1109			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1110			if (ret < 0)
1111				goto free;
1112		}
1113	}
1114
1115	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1116	if (ret < 0)
1117		goto free;
1118
1119	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1120		ret = -EINVAL;
1121		goto free;
1122	}
1123
1124	return 0;
1125
1126free:
1127	hpre_rsa_clear_ctx(ctx, false);
1128	return ret;
1129}
1130
1131static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1132			      unsigned int keylen)
1133{
1134	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1135	int ret;
1136
1137	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1138	if (ret)
1139		return ret;
1140
1141	return hpre_rsa_setkey(ctx, key, keylen, false);
1142}
1143
1144static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1145			       unsigned int keylen)
1146{
1147	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1148	int ret;
1149
1150	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1151	if (ret)
1152		return ret;
1153
1154	return hpre_rsa_setkey(ctx, key, keylen, true);
1155}
1156
1157static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1158{
1159	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1160
1161	/* For 512 and 1536 bits key size, use soft tfm instead */
1162	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1163	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1164		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1165
1166	return ctx->key_sz;
1167}
1168
1169static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1170{
1171	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1172	int ret;
1173
1174	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1175	if (IS_ERR(ctx->rsa.soft_tfm)) {
1176		pr_err("Can not alloc_akcipher!\n");
1177		return PTR_ERR(ctx->rsa.soft_tfm);
1178	}
1179
1180	akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
1181				  hpre_align_pd());
1182
1183	ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1184	if (ret)
1185		crypto_free_akcipher(ctx->rsa.soft_tfm);
1186
1187	return ret;
1188}
1189
1190static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1191{
1192	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1193
1194	hpre_rsa_clear_ctx(ctx, true);
1195	crypto_free_akcipher(ctx->rsa.soft_tfm);
1196}
1197
1198static void hpre_key_to_big_end(u8 *data, int len)
1199{
1200	int i, j;
1201
1202	for (i = 0; i < len / 2; i++) {
1203		j = len - i - 1;
1204		swap(data[j], data[i]);
1205	}
1206}
1207
1208static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1209			       bool is_ecdh)
1210{
1211	struct device *dev = ctx->dev;
1212	unsigned int sz = ctx->key_sz;
1213	unsigned int shift = sz << 1;
1214
1215	if (is_clear_all)
1216		hisi_qm_stop_qp(ctx->qp);
1217
1218	if (is_ecdh && ctx->ecdh.p) {
1219		/* ecdh: p->a->k->b */
1220		memzero_explicit(ctx->ecdh.p + shift, sz);
1221		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1222		ctx->ecdh.p = NULL;
1223	} else if (!is_ecdh && ctx->curve25519.p) {
1224		/* curve25519: p->a->k */
1225		memzero_explicit(ctx->curve25519.p + shift, sz);
1226		dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1227				  ctx->curve25519.dma_p);
1228		ctx->curve25519.p = NULL;
1229	}
1230
1231	hpre_ctx_clear(ctx, is_clear_all);
1232}
1233
1234/*
1235 * The bits of 192/224/256/384/521 are supported by HPRE,
1236 * and convert the bits like:
1237 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1238 * If the parameter bit width is insufficient, then we fill in the
1239 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1240 */
1241static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1242{
1243	switch (id) {
1244	case ECC_CURVE_NIST_P192:
1245	case ECC_CURVE_NIST_P256:
1246		return HPRE_ECC_HW256_KSZ_B;
1247	case ECC_CURVE_NIST_P384:
1248		return HPRE_ECC_HW384_KSZ_B;
1249	default:
1250		break;
1251	}
1252
1253	return 0;
1254}
1255
1256static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1257{
1258	unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1259	u8 i = 0;
1260
1261	while (i < ndigits - 1) {
1262		memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
1263		i++;
1264	}
1265
1266	memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
1267	hpre_key_to_big_end((u8 *)addr, cur_sz);
1268}
1269
1270static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1271				unsigned int cur_sz)
1272{
1273	unsigned int shifta = ctx->key_sz << 1;
1274	unsigned int shiftb = ctx->key_sz << 2;
1275	void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1276	void *a = ctx->ecdh.p + shifta - cur_sz;
1277	void *b = ctx->ecdh.p + shiftb - cur_sz;
1278	void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1279	void *y = ctx->ecdh.g + shifta - cur_sz;
1280	const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1281	char *n;
1282
1283	if (unlikely(!curve))
1284		return -EINVAL;
1285
1286	n = kzalloc(ctx->key_sz, GFP_KERNEL);
1287	if (!n)
1288		return -ENOMEM;
1289
1290	fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1291	fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1292	fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1293	fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1294	fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1295	fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1296
1297	if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1298		kfree(n);
1299		return -EINVAL;
1300	}
1301
1302	kfree(n);
1303	return 0;
1304}
1305
1306static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1307{
1308	switch (id) {
1309	case ECC_CURVE_NIST_P192:
1310		return HPRE_ECC_NIST_P192_N_SIZE;
1311	case ECC_CURVE_NIST_P256:
1312		return HPRE_ECC_NIST_P256_N_SIZE;
1313	case ECC_CURVE_NIST_P384:
1314		return HPRE_ECC_NIST_P384_N_SIZE;
1315	default:
1316		break;
1317	}
1318
1319	return 0;
1320}
1321
1322static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1323{
1324	struct device *dev = ctx->dev;
1325	unsigned int sz, shift, curve_sz;
1326	int ret;
1327
1328	ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1329	if (!ctx->key_sz)
1330		return -EINVAL;
1331
1332	curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1333	if (!curve_sz || params->key_size > curve_sz)
1334		return -EINVAL;
1335
1336	sz = ctx->key_sz;
1337
1338	if (!ctx->ecdh.p) {
1339		ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1340						 GFP_KERNEL);
1341		if (!ctx->ecdh.p)
1342			return -ENOMEM;
1343	}
1344
1345	shift = sz << 2;
1346	ctx->ecdh.g = ctx->ecdh.p + shift;
1347	ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1348
1349	ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1350	if (ret) {
1351		dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1352		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1353		ctx->ecdh.p = NULL;
1354		return ret;
1355	}
1356
1357	return 0;
1358}
1359
1360static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1361{
1362	int i;
1363
1364	for (i = 0; i < key_sz; i++)
1365		if (key[i])
1366			return false;
1367
1368	return true;
1369}
1370
1371static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1372{
1373	struct device *dev = ctx->dev;
1374	int ret;
1375
1376	ret = crypto_get_default_rng();
1377	if (ret) {
1378		dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1379		return ret;
1380	}
1381
1382	ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1383				   params->key_size);
1384	crypto_put_default_rng();
1385	if (ret)
1386		dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1387
1388	return ret;
1389}
1390
1391static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1392				unsigned int len)
1393{
1394	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1395	unsigned int sz, sz_shift, curve_sz;
1396	struct device *dev = ctx->dev;
1397	char key[HPRE_ECC_MAX_KSZ];
1398	struct ecdh params;
1399	int ret;
1400
1401	if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
1402		dev_err(dev, "failed to decode ecdh key!\n");
1403		return -EINVAL;
1404	}
1405
1406	/* Use stdrng to generate private key */
1407	if (!params.key || !params.key_size) {
1408		params.key = key;
1409		curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1410		if (!curve_sz) {
1411			dev_err(dev, "Invalid curve size!\n");
1412			return -EINVAL;
1413		}
1414
1415		params.key_size = curve_sz - 1;
1416		ret = ecdh_gen_privkey(ctx, &params);
1417		if (ret)
1418			return ret;
1419	}
1420
1421	if (hpre_key_is_zero(params.key, params.key_size)) {
1422		dev_err(dev, "Invalid hpre key!\n");
1423		return -EINVAL;
1424	}
1425
1426	hpre_ecc_clear_ctx(ctx, false, true);
1427
1428	ret = hpre_ecdh_set_param(ctx, &params);
1429	if (ret < 0) {
1430		dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1431		return ret;
1432	}
1433
1434	sz = ctx->key_sz;
1435	sz_shift = (sz << 1) + sz - params.key_size;
1436	memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1437
1438	return 0;
1439}
1440
1441static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1442				      struct hpre_asym_request *req,
1443				      struct scatterlist *dst,
1444				      struct scatterlist *src)
1445{
1446	struct device *dev = ctx->dev;
1447	struct hpre_sqe *sqe = &req->req;
1448	dma_addr_t dma;
1449
1450	dma = le64_to_cpu(sqe->in);
1451	if (unlikely(dma_mapping_error(dev, dma)))
1452		return;
1453
1454	if (src && req->src)
1455		dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1456
1457	dma = le64_to_cpu(sqe->out);
1458	if (unlikely(dma_mapping_error(dev, dma)))
1459		return;
1460
1461	if (req->dst)
1462		dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1463	if (dst)
1464		dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1465}
1466
1467static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1468{
1469	unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1470	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1471	struct hpre_asym_request *req = NULL;
1472	struct kpp_request *areq;
1473	u64 overtime_thrhld;
1474	char *p;
1475	int ret;
1476
1477	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1478	areq = req->areq.ecdh;
1479	areq->dst_len = ctx->key_sz << 1;
1480
1481	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1482	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1483		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1484
1485	p = sg_virt(areq->dst);
1486	memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1487	memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1488
1489	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1490	kpp_request_complete(areq, ret);
1491
1492	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1493}
1494
1495static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1496				     struct kpp_request *req)
1497{
1498	struct hpre_asym_request *h_req;
1499	struct hpre_sqe *msg;
1500	int req_id;
1501	void *tmp;
1502
1503	if (req->dst_len < ctx->key_sz << 1) {
1504		req->dst_len = ctx->key_sz << 1;
1505		return -EINVAL;
1506	}
1507
1508	tmp = kpp_request_ctx(req);
1509	h_req = PTR_ALIGN(tmp, hpre_align_sz());
1510	h_req->cb = hpre_ecdh_cb;
1511	h_req->areq.ecdh = req;
1512	msg = &h_req->req;
1513	memset(msg, 0, sizeof(*msg));
1514	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1515	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1516	msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1517
1518	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1519	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1520	h_req->ctx = ctx;
1521
1522	req_id = hpre_add_req_to_ctx(h_req);
1523	if (req_id < 0)
1524		return -EBUSY;
1525
1526	msg->tag = cpu_to_le16((u16)req_id);
1527	return 0;
1528}
1529
1530static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1531				   struct scatterlist *data, unsigned int len)
1532{
1533	struct hpre_sqe *msg = &hpre_req->req;
1534	struct hpre_ctx *ctx = hpre_req->ctx;
1535	struct device *dev = ctx->dev;
1536	unsigned int tmpshift;
1537	dma_addr_t dma = 0;
1538	void *ptr;
1539	int shift;
1540
1541	/* Src_data include gx and gy. */
1542	shift = ctx->key_sz - (len >> 1);
1543	if (unlikely(shift < 0))
1544		return -EINVAL;
1545
1546	ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1547	if (unlikely(!ptr))
1548		return -ENOMEM;
1549
1550	tmpshift = ctx->key_sz << 1;
1551	scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1552	memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1553	memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1554
1555	hpre_req->src = ptr;
1556	msg->in = cpu_to_le64(dma);
1557	return 0;
1558}
1559
1560static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1561				   struct scatterlist *data, unsigned int len)
1562{
1563	struct hpre_sqe *msg = &hpre_req->req;
1564	struct hpre_ctx *ctx = hpre_req->ctx;
1565	struct device *dev = ctx->dev;
1566	dma_addr_t dma;
1567
1568	if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1569		dev_err(dev, "data or data length is illegal!\n");
1570		return -EINVAL;
1571	}
1572
1573	hpre_req->dst = NULL;
1574	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1575	if (unlikely(dma_mapping_error(dev, dma))) {
1576		dev_err(dev, "dma map data err!\n");
1577		return -ENOMEM;
1578	}
1579
1580	msg->out = cpu_to_le64(dma);
1581	return 0;
1582}
1583
1584static int hpre_ecdh_compute_value(struct kpp_request *req)
1585{
1586	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1587	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1588	struct device *dev = ctx->dev;
1589	void *tmp = kpp_request_ctx(req);
1590	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1591	struct hpre_sqe *msg = &hpre_req->req;
1592	int ret;
1593
1594	ret = hpre_ecdh_msg_request_set(ctx, req);
1595	if (unlikely(ret)) {
1596		dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1597		return ret;
1598	}
1599
1600	if (req->src) {
1601		ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1602		if (unlikely(ret)) {
1603			dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1604			goto clear_all;
1605		}
1606	} else {
1607		msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1608	}
1609
1610	ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1611	if (unlikely(ret)) {
1612		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1613		goto clear_all;
1614	}
1615
1616	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1617	ret = hpre_send(ctx, msg);
1618	if (likely(!ret))
1619		return -EINPROGRESS;
1620
1621clear_all:
1622	hpre_rm_req_from_ctx(hpre_req);
1623	hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1624	return ret;
1625}
1626
1627static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1628{
1629	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1630
1631	/* max size is the pub_key_size, include x and y */
1632	return ctx->key_sz << 1;
1633}
1634
1635static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1636{
1637	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1638
1639	ctx->curve_id = ECC_CURVE_NIST_P192;
1640
1641	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1642
1643	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1644}
1645
1646static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1647{
1648	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1649
1650	ctx->curve_id = ECC_CURVE_NIST_P256;
1651
1652	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1653
1654	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1655}
1656
1657static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1658{
1659	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1660
1661	ctx->curve_id = ECC_CURVE_NIST_P384;
1662
1663	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1664
1665	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1666}
1667
1668static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1669{
1670	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1671
1672	hpre_ecc_clear_ctx(ctx, true, true);
1673}
1674
1675static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1676				       unsigned int len)
1677{
1678	u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1679	unsigned int sz = ctx->key_sz;
1680	const struct ecc_curve *curve;
1681	unsigned int shift = sz << 1;
1682	void *p;
1683
1684	/*
1685	 * The key from 'buf' is in little-endian, we should preprocess it as
1686	 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1687	 * then convert it to big endian. Only in this way, the result can be
1688	 * the same as the software curve-25519 that exists in crypto.
1689	 */
1690	memcpy(secret, buf, len);
1691	curve25519_clamp_secret(secret);
1692	hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1693
1694	p = ctx->curve25519.p + sz - len;
1695
1696	curve = ecc_get_curve25519();
1697
1698	/* fill curve parameters */
1699	fill_curve_param(p, curve->p, len, curve->g.ndigits);
1700	fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1701	memcpy(p + shift, secret, len);
1702	fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1703	memzero_explicit(secret, CURVE25519_KEY_SIZE);
1704}
1705
1706static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1707				     unsigned int len)
1708{
1709	struct device *dev = ctx->dev;
1710	unsigned int sz = ctx->key_sz;
1711	unsigned int shift = sz << 1;
1712
1713	/* p->a->k->gx */
1714	if (!ctx->curve25519.p) {
1715		ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1716						       &ctx->curve25519.dma_p,
1717						       GFP_KERNEL);
1718		if (!ctx->curve25519.p)
1719			return -ENOMEM;
1720	}
1721
1722	ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1723	ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1724
1725	hpre_curve25519_fill_curve(ctx, buf, len);
1726
1727	return 0;
1728}
1729
1730static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1731				      unsigned int len)
1732{
1733	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1734	struct device *dev = ctx->dev;
1735	int ret = -EINVAL;
1736
1737	if (len != CURVE25519_KEY_SIZE ||
1738	    !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1739		dev_err(dev, "key is null or key len is not 32bytes!\n");
1740		return ret;
1741	}
1742
1743	/* Free old secret if any */
1744	hpre_ecc_clear_ctx(ctx, false, false);
1745
1746	ctx->key_sz = CURVE25519_KEY_SIZE;
1747	ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1748	if (ret) {
1749		dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1750		hpre_ecc_clear_ctx(ctx, false, false);
1751		return ret;
1752	}
1753
1754	return 0;
1755}
1756
1757static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1758					    struct hpre_asym_request *req,
1759					    struct scatterlist *dst,
1760					    struct scatterlist *src)
1761{
1762	struct device *dev = ctx->dev;
1763	struct hpre_sqe *sqe = &req->req;
1764	dma_addr_t dma;
1765
1766	dma = le64_to_cpu(sqe->in);
1767	if (unlikely(dma_mapping_error(dev, dma)))
1768		return;
1769
1770	if (src && req->src)
1771		dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1772
1773	dma = le64_to_cpu(sqe->out);
1774	if (unlikely(dma_mapping_error(dev, dma)))
1775		return;
1776
1777	if (req->dst)
1778		dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1779	if (dst)
1780		dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1781}
1782
1783static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1784{
1785	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1786	struct hpre_asym_request *req = NULL;
1787	struct kpp_request *areq;
1788	u64 overtime_thrhld;
1789	int ret;
1790
1791	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1792	areq = req->areq.curve25519;
1793	areq->dst_len = ctx->key_sz;
1794
1795	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1796	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1797		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1798
1799	hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1800
1801	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1802	kpp_request_complete(areq, ret);
1803
1804	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1805}
1806
1807static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1808					   struct kpp_request *req)
1809{
1810	struct hpre_asym_request *h_req;
1811	struct hpre_sqe *msg;
1812	int req_id;
1813	void *tmp;
1814
1815	if (unlikely(req->dst_len < ctx->key_sz)) {
1816		req->dst_len = ctx->key_sz;
1817		return -EINVAL;
1818	}
1819
1820	tmp = kpp_request_ctx(req);
1821	h_req = PTR_ALIGN(tmp, hpre_align_sz());
1822	h_req->cb = hpre_curve25519_cb;
1823	h_req->areq.curve25519 = req;
1824	msg = &h_req->req;
1825	memset(msg, 0, sizeof(*msg));
1826	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1827	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1828	msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1829
1830	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1831	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1832	h_req->ctx = ctx;
1833
1834	req_id = hpre_add_req_to_ctx(h_req);
1835	if (req_id < 0)
1836		return -EBUSY;
1837
1838	msg->tag = cpu_to_le16((u16)req_id);
1839	return 0;
1840}
1841
1842static void hpre_curve25519_src_modulo_p(u8 *ptr)
1843{
1844	int i;
1845
1846	for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1847		ptr[i] = 0;
1848
1849	/* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1850	ptr[i] -= 0xed;
1851}
1852
1853static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1854				    struct scatterlist *data, unsigned int len)
1855{
1856	struct hpre_sqe *msg = &hpre_req->req;
1857	struct hpre_ctx *ctx = hpre_req->ctx;
1858	struct device *dev = ctx->dev;
1859	u8 p[CURVE25519_KEY_SIZE] = { 0 };
1860	const struct ecc_curve *curve;
1861	dma_addr_t dma = 0;
1862	u8 *ptr;
1863
1864	if (len != CURVE25519_KEY_SIZE) {
1865		dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1866		return -EINVAL;
1867	}
1868
1869	ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1870	if (unlikely(!ptr))
1871		return -ENOMEM;
1872
1873	scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1874
1875	if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1876		dev_err(dev, "gx is null!\n");
1877		goto err;
1878	}
1879
1880	/*
1881	 * Src_data(gx) is in little-endian order, MSB in the final byte should
1882	 * be masked as described in RFC7748, then transform it to big-endian
1883	 * form, then hisi_hpre can use the data.
1884	 */
1885	ptr[31] &= 0x7f;
1886	hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1887
1888	curve = ecc_get_curve25519();
1889
1890	fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1891
1892	/*
1893	 * When src_data equals (2^255 - 19) ~  (2^255 - 1), it is out of p,
1894	 * we get its modulus to p, and then use it.
1895	 */
1896	if (memcmp(ptr, p, ctx->key_sz) == 0) {
1897		dev_err(dev, "gx is p!\n");
1898		goto err;
1899	} else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1900		hpre_curve25519_src_modulo_p(ptr);
1901	}
1902
1903	hpre_req->src = ptr;
1904	msg->in = cpu_to_le64(dma);
1905	return 0;
1906
1907err:
1908	dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1909	return -EINVAL;
1910}
1911
1912static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1913				    struct scatterlist *data, unsigned int len)
1914{
1915	struct hpre_sqe *msg = &hpre_req->req;
1916	struct hpre_ctx *ctx = hpre_req->ctx;
1917	struct device *dev = ctx->dev;
1918	dma_addr_t dma;
1919
1920	if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1921		dev_err(dev, "data or data length is illegal!\n");
1922		return -EINVAL;
1923	}
1924
1925	hpre_req->dst = NULL;
1926	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1927	if (unlikely(dma_mapping_error(dev, dma))) {
1928		dev_err(dev, "dma map data err!\n");
1929		return -ENOMEM;
1930	}
1931
1932	msg->out = cpu_to_le64(dma);
1933	return 0;
1934}
1935
1936static int hpre_curve25519_compute_value(struct kpp_request *req)
1937{
1938	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1939	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1940	struct device *dev = ctx->dev;
1941	void *tmp = kpp_request_ctx(req);
1942	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1943	struct hpre_sqe *msg = &hpre_req->req;
1944	int ret;
1945
1946	ret = hpre_curve25519_msg_request_set(ctx, req);
1947	if (unlikely(ret)) {
1948		dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1949		return ret;
1950	}
1951
1952	if (req->src) {
1953		ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1954		if (unlikely(ret)) {
1955			dev_err(dev, "failed to init src data, ret = %d!\n",
1956				ret);
1957			goto clear_all;
1958		}
1959	} else {
1960		msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1961	}
1962
1963	ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1964	if (unlikely(ret)) {
1965		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1966		goto clear_all;
1967	}
1968
1969	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1970	ret = hpre_send(ctx, msg);
1971	if (likely(!ret))
1972		return -EINPROGRESS;
1973
1974clear_all:
1975	hpre_rm_req_from_ctx(hpre_req);
1976	hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1977	return ret;
1978}
1979
1980static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1981{
1982	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1983
1984	return ctx->key_sz;
1985}
1986
1987static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1988{
1989	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1990
1991	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1992
1993	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1994}
1995
1996static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1997{
1998	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1999
2000	hpre_ecc_clear_ctx(ctx, true, false);
2001}
2002
2003static struct akcipher_alg rsa = {
2004	.sign = hpre_rsa_dec,
2005	.verify = hpre_rsa_enc,
2006	.encrypt = hpre_rsa_enc,
2007	.decrypt = hpre_rsa_dec,
2008	.set_pub_key = hpre_rsa_setpubkey,
2009	.set_priv_key = hpre_rsa_setprivkey,
2010	.max_size = hpre_rsa_max_size,
2011	.init = hpre_rsa_init_tfm,
2012	.exit = hpre_rsa_exit_tfm,
2013	.base = {
2014		.cra_ctxsize = sizeof(struct hpre_ctx),
2015		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2016		.cra_name = "rsa",
2017		.cra_driver_name = "hpre-rsa",
2018		.cra_module = THIS_MODULE,
2019	},
2020};
2021
2022static struct kpp_alg dh = {
2023	.set_secret = hpre_dh_set_secret,
2024	.generate_public_key = hpre_dh_compute_value,
2025	.compute_shared_secret = hpre_dh_compute_value,
2026	.max_size = hpre_dh_max_size,
2027	.init = hpre_dh_init_tfm,
2028	.exit = hpre_dh_exit_tfm,
2029	.base = {
2030		.cra_ctxsize = sizeof(struct hpre_ctx),
2031		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2032		.cra_name = "dh",
2033		.cra_driver_name = "hpre-dh",
2034		.cra_module = THIS_MODULE,
2035	},
2036};
2037
2038static struct kpp_alg ecdh_curves[] = {
2039	{
2040		.set_secret = hpre_ecdh_set_secret,
2041		.generate_public_key = hpre_ecdh_compute_value,
2042		.compute_shared_secret = hpre_ecdh_compute_value,
2043		.max_size = hpre_ecdh_max_size,
2044		.init = hpre_ecdh_nist_p192_init_tfm,
2045		.exit = hpre_ecdh_exit_tfm,
2046		.base = {
2047			.cra_ctxsize = sizeof(struct hpre_ctx),
2048			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2049			.cra_name = "ecdh-nist-p192",
2050			.cra_driver_name = "hpre-ecdh-nist-p192",
2051			.cra_module = THIS_MODULE,
2052		},
2053	}, {
2054		.set_secret = hpre_ecdh_set_secret,
2055		.generate_public_key = hpre_ecdh_compute_value,
2056		.compute_shared_secret = hpre_ecdh_compute_value,
2057		.max_size = hpre_ecdh_max_size,
2058		.init = hpre_ecdh_nist_p256_init_tfm,
2059		.exit = hpre_ecdh_exit_tfm,
2060		.base = {
2061			.cra_ctxsize = sizeof(struct hpre_ctx),
2062			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2063			.cra_name = "ecdh-nist-p256",
2064			.cra_driver_name = "hpre-ecdh-nist-p256",
2065			.cra_module = THIS_MODULE,
2066		},
2067	}, {
2068		.set_secret = hpre_ecdh_set_secret,
2069		.generate_public_key = hpre_ecdh_compute_value,
2070		.compute_shared_secret = hpre_ecdh_compute_value,
2071		.max_size = hpre_ecdh_max_size,
2072		.init = hpre_ecdh_nist_p384_init_tfm,
2073		.exit = hpre_ecdh_exit_tfm,
2074		.base = {
2075			.cra_ctxsize = sizeof(struct hpre_ctx),
2076			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2077			.cra_name = "ecdh-nist-p384",
2078			.cra_driver_name = "hpre-ecdh-nist-p384",
2079			.cra_module = THIS_MODULE,
2080		},
2081	}
2082};
2083
2084static struct kpp_alg curve25519_alg = {
2085	.set_secret = hpre_curve25519_set_secret,
2086	.generate_public_key = hpre_curve25519_compute_value,
2087	.compute_shared_secret = hpre_curve25519_compute_value,
2088	.max_size = hpre_curve25519_max_size,
2089	.init = hpre_curve25519_init_tfm,
2090	.exit = hpre_curve25519_exit_tfm,
2091	.base = {
2092		.cra_ctxsize = sizeof(struct hpre_ctx),
2093		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2094		.cra_name = "curve25519",
2095		.cra_driver_name = "hpre-curve25519",
2096		.cra_module = THIS_MODULE,
2097	},
2098};
2099
2100static int hpre_register_rsa(struct hisi_qm *qm)
2101{
2102	int ret;
2103
2104	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2105		return 0;
2106
2107	rsa.base.cra_flags = 0;
2108	ret = crypto_register_akcipher(&rsa);
2109	if (ret)
2110		dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
2111
2112	return ret;
2113}
2114
2115static void hpre_unregister_rsa(struct hisi_qm *qm)
2116{
2117	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2118		return;
2119
2120	crypto_unregister_akcipher(&rsa);
2121}
2122
2123static int hpre_register_dh(struct hisi_qm *qm)
2124{
2125	int ret;
2126
2127	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2128		return 0;
2129
2130	ret = crypto_register_kpp(&dh);
2131	if (ret)
2132		dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
2133
2134	return ret;
2135}
2136
2137static void hpre_unregister_dh(struct hisi_qm *qm)
2138{
2139	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2140		return;
2141
2142	crypto_unregister_kpp(&dh);
2143}
2144
2145static int hpre_register_ecdh(struct hisi_qm *qm)
2146{
2147	int ret, i;
2148
2149	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2150		return 0;
2151
2152	for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
2153		ret = crypto_register_kpp(&ecdh_curves[i]);
2154		if (ret) {
2155			dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
2156				ecdh_curves[i].base.cra_name, ret);
2157			goto unreg_kpp;
2158		}
2159	}
2160
2161	return 0;
2162
2163unreg_kpp:
2164	for (--i; i >= 0; --i)
2165		crypto_unregister_kpp(&ecdh_curves[i]);
2166
2167	return ret;
2168}
2169
2170static void hpre_unregister_ecdh(struct hisi_qm *qm)
2171{
2172	int i;
2173
2174	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2175		return;
2176
2177	for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
2178		crypto_unregister_kpp(&ecdh_curves[i]);
2179}
2180
2181static int hpre_register_x25519(struct hisi_qm *qm)
2182{
2183	int ret;
2184
2185	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2186		return 0;
2187
2188	ret = crypto_register_kpp(&curve25519_alg);
2189	if (ret)
2190		dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
2191
2192	return ret;
2193}
2194
2195static void hpre_unregister_x25519(struct hisi_qm *qm)
2196{
2197	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2198		return;
2199
2200	crypto_unregister_kpp(&curve25519_alg);
2201}
2202
2203int hpre_algs_register(struct hisi_qm *qm)
2204{
2205	int ret;
2206
2207	ret = hpre_register_rsa(qm);
2208	if (ret)
2209		return ret;
2210
2211	ret = hpre_register_dh(qm);
2212	if (ret)
2213		goto unreg_rsa;
2214
2215	ret = hpre_register_ecdh(qm);
2216	if (ret)
2217		goto unreg_dh;
2218
2219	ret = hpre_register_x25519(qm);
2220	if (ret)
2221		goto unreg_ecdh;
2222
2223	return ret;
2224
2225unreg_ecdh:
2226	hpre_unregister_ecdh(qm);
2227unreg_dh:
2228	hpre_unregister_dh(qm);
2229unreg_rsa:
2230	hpre_unregister_rsa(qm);
2231	return ret;
2232}
2233
2234void hpre_algs_unregister(struct hisi_qm *qm)
2235{
2236	hpre_unregister_x25519(qm);
2237	hpre_unregister_ecdh(qm);
2238	hpre_unregister_dh(qm);
2239	hpre_unregister_rsa(qm);
2240}
2241