1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Asymmetric algorithms supported by virtio crypto device
3 *
4 * Authors: zhenwei pi <pizhenwei@bytedance.com>
5 * lei he <helei.sig11@bytedance.com>
6 *
7 * Copyright 2022 Bytedance CO., LTD.
8 */
9
10 #include <linux/mpi.h>
11 #include <linux/scatterlist.h>
12 #include <crypto/algapi.h>
13 #include <crypto/internal/akcipher.h>
14 #include <crypto/internal/rsa.h>
15 #include <linux/err.h>
16 #include <crypto/scatterwalk.h>
17 #include <linux/atomic.h>
18
19 #include <uapi/linux/virtio_crypto.h>
20 #include "virtio_crypto_common.h"
21
22 struct virtio_crypto_rsa_ctx {
23 MPI n;
24 };
25
26 struct virtio_crypto_akcipher_ctx {
27 struct crypto_engine_ctx enginectx;
28 struct virtio_crypto *vcrypto;
29 struct crypto_akcipher *tfm;
30 bool session_valid;
31 __u64 session_id;
32 union {
33 struct virtio_crypto_rsa_ctx rsa_ctx;
34 };
35 };
36
37 struct virtio_crypto_akcipher_request {
38 struct virtio_crypto_request base;
39 struct virtio_crypto_akcipher_ctx *akcipher_ctx;
40 struct akcipher_request *akcipher_req;
41 void *src_buf;
42 void *dst_buf;
43 uint32_t opcode;
44 };
45
46 struct virtio_crypto_akcipher_algo {
47 uint32_t algonum;
48 uint32_t service;
49 unsigned int active_devs;
50 struct akcipher_alg algo;
51 };
52
53 static DEFINE_MUTEX(algs_lock);
54
virtio_crypto_akcipher_finalize_req( struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, int err)55 static void virtio_crypto_akcipher_finalize_req(
56 struct virtio_crypto_akcipher_request *vc_akcipher_req,
57 struct akcipher_request *req, int err)
58 {
59 kfree(vc_akcipher_req->src_buf);
60 kfree(vc_akcipher_req->dst_buf);
61 vc_akcipher_req->src_buf = NULL;
62 vc_akcipher_req->dst_buf = NULL;
63 virtcrypto_clear_request(&vc_akcipher_req->base);
64
65 crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
66 }
67
virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)68 static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
69 {
70 struct virtio_crypto_akcipher_request *vc_akcipher_req =
71 container_of(vc_req, struct virtio_crypto_akcipher_request, base);
72 struct akcipher_request *akcipher_req;
73 int error;
74
75 switch (vc_req->status) {
76 case VIRTIO_CRYPTO_OK:
77 error = 0;
78 break;
79 case VIRTIO_CRYPTO_INVSESS:
80 case VIRTIO_CRYPTO_ERR:
81 error = -EINVAL;
82 break;
83 case VIRTIO_CRYPTO_BADMSG:
84 error = -EBADMSG;
85 break;
86
87 case VIRTIO_CRYPTO_KEY_REJECTED:
88 error = -EKEYREJECTED;
89 break;
90
91 default:
92 error = -EIO;
93 break;
94 }
95
96 akcipher_req = vc_akcipher_req->akcipher_req;
97 if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
98 sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
99 vc_akcipher_req->dst_buf, akcipher_req->dst_len);
100 virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
101 }
102
virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx, struct virtio_crypto_ctrl_header *header, void *para, const uint8_t *key, unsigned int keylen)103 static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
104 struct virtio_crypto_ctrl_header *header, void *para,
105 const uint8_t *key, unsigned int keylen)
106 {
107 struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
108 struct virtio_crypto *vcrypto = ctx->vcrypto;
109 uint8_t *pkey;
110 int err;
111 unsigned int num_out = 0, num_in = 0;
112 struct virtio_crypto_op_ctrl_req *ctrl;
113 struct virtio_crypto_session_input *input;
114 struct virtio_crypto_ctrl_request *vc_ctrl_req;
115
116 pkey = kmemdup(key, keylen, GFP_ATOMIC);
117 if (!pkey)
118 return -ENOMEM;
119
120 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
121 if (!vc_ctrl_req) {
122 err = -ENOMEM;
123 goto out;
124 }
125
126 ctrl = &vc_ctrl_req->ctrl;
127 memcpy(&ctrl->header, header, sizeof(ctrl->header));
128 memcpy(&ctrl->u, para, sizeof(ctrl->u));
129 input = &vc_ctrl_req->input;
130 input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
131
132 sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
133 sgs[num_out++] = &outhdr_sg;
134
135 sg_init_one(&key_sg, pkey, keylen);
136 sgs[num_out++] = &key_sg;
137
138 sg_init_one(&inhdr_sg, input, sizeof(*input));
139 sgs[num_out + num_in++] = &inhdr_sg;
140
141 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
142 if (err < 0)
143 goto out;
144
145 if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
146 pr_err("virtio_crypto: Create session failed status: %u\n",
147 le32_to_cpu(input->status));
148 err = -EINVAL;
149 goto out;
150 }
151
152 ctx->session_id = le64_to_cpu(input->session_id);
153 ctx->session_valid = true;
154 err = 0;
155
156 out:
157 kfree(vc_ctrl_req);
158 kfree_sensitive(pkey);
159
160 return err;
161 }
162
virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)163 static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
164 {
165 struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
166 struct virtio_crypto_destroy_session_req *destroy_session;
167 struct virtio_crypto *vcrypto = ctx->vcrypto;
168 unsigned int num_out = 0, num_in = 0;
169 int err;
170 struct virtio_crypto_op_ctrl_req *ctrl;
171 struct virtio_crypto_inhdr *ctrl_status;
172 struct virtio_crypto_ctrl_request *vc_ctrl_req;
173
174 if (!ctx->session_valid)
175 return 0;
176
177 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
178 if (!vc_ctrl_req)
179 return -ENOMEM;
180
181 ctrl_status = &vc_ctrl_req->ctrl_status;
182 ctrl_status->status = VIRTIO_CRYPTO_ERR;
183 ctrl = &vc_ctrl_req->ctrl;
184 ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
185 ctrl->header.queue_id = 0;
186
187 destroy_session = &ctrl->u.destroy_session;
188 destroy_session->session_id = cpu_to_le64(ctx->session_id);
189
190 sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
191 sgs[num_out++] = &outhdr_sg;
192
193 sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
194 sgs[num_out + num_in++] = &inhdr_sg;
195
196 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
197 if (err < 0)
198 goto out;
199
200 if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
201 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
202 ctrl_status->status, destroy_session->session_id);
203 err = -EINVAL;
204 goto out;
205 }
206
207 err = 0;
208 ctx->session_valid = false;
209
210 out:
211 kfree(vc_ctrl_req);
212
213 return err;
214 }
215
__virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, struct data_queue *data_vq)216 static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
217 struct akcipher_request *req, struct data_queue *data_vq)
218 {
219 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
220 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
221 struct virtio_crypto *vcrypto = ctx->vcrypto;
222 struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
223 struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
224 void *src_buf = NULL, *dst_buf = NULL;
225 unsigned int num_out = 0, num_in = 0;
226 int node = dev_to_node(&vcrypto->vdev->dev);
227 unsigned long flags;
228 int ret = -ENOMEM;
229 bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
230 unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
231
232 /* out header */
233 sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
234 sgs[num_out++] = &outhdr_sg;
235
236 /* src data */
237 src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
238 if (!src_buf)
239 goto err;
240
241 if (verify) {
242 /* for verify operation, both src and dst data work as OUT direction */
243 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
244 sg_init_one(&srcdata_sg, src_buf, src_len);
245 sgs[num_out++] = &srcdata_sg;
246 } else {
247 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
248 sg_init_one(&srcdata_sg, src_buf, src_len);
249 sgs[num_out++] = &srcdata_sg;
250
251 /* dst data */
252 dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
253 if (!dst_buf)
254 goto err;
255
256 sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
257 sgs[num_out + num_in++] = &dstdata_sg;
258 }
259
260 vc_akcipher_req->src_buf = src_buf;
261 vc_akcipher_req->dst_buf = dst_buf;
262
263 /* in header */
264 sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
265 sgs[num_out + num_in++] = &inhdr_sg;
266
267 spin_lock_irqsave(&data_vq->lock, flags);
268 ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
269 virtqueue_kick(data_vq->vq);
270 spin_unlock_irqrestore(&data_vq->lock, flags);
271 if (ret)
272 goto err;
273
274 return 0;
275
276 err:
277 kfree(src_buf);
278 kfree(dst_buf);
279
280 return -ENOMEM;
281 }
282
virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)283 static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
284 {
285 struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
286 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
287 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
288 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
289 struct virtio_crypto *vcrypto = ctx->vcrypto;
290 struct data_queue *data_vq = vc_req->dataq;
291 struct virtio_crypto_op_header *header;
292 struct virtio_crypto_akcipher_data_req *akcipher_req;
293 int ret;
294
295 vc_req->sgs = NULL;
296 vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
297 GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
298 if (!vc_req->req_data)
299 return -ENOMEM;
300
301 /* build request header */
302 header = &vc_req->req_data->header;
303 header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
304 header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
305 header->session_id = cpu_to_le64(ctx->session_id);
306
307 /* build request akcipher data */
308 akcipher_req = &vc_req->req_data->u.akcipher_req;
309 akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
310 akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
311
312 ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
313 if (ret < 0) {
314 kfree_sensitive(vc_req->req_data);
315 vc_req->req_data = NULL;
316 return ret;
317 }
318
319 return 0;
320 }
321
virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)322 static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
323 {
324 struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
325 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
326 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
327 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
328 struct virtio_crypto *vcrypto = ctx->vcrypto;
329 /* Use the first data virtqueue as default */
330 struct data_queue *data_vq = &vcrypto->data_vq[0];
331
332 vc_req->dataq = data_vq;
333 vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
334 vc_akcipher_req->akcipher_ctx = ctx;
335 vc_akcipher_req->akcipher_req = req;
336 vc_akcipher_req->opcode = opcode;
337
338 return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
339 }
340
virtio_crypto_rsa_encrypt(struct akcipher_request *req)341 static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
342 {
343 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
344 }
345
virtio_crypto_rsa_decrypt(struct akcipher_request *req)346 static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
347 {
348 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
349 }
350
virtio_crypto_rsa_sign(struct akcipher_request *req)351 static int virtio_crypto_rsa_sign(struct akcipher_request *req)
352 {
353 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
354 }
355
virtio_crypto_rsa_verify(struct akcipher_request *req)356 static int virtio_crypto_rsa_verify(struct akcipher_request *req)
357 {
358 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
359 }
360
virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen, bool private, int padding_algo, int hash_algo)361 static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
362 const void *key,
363 unsigned int keylen,
364 bool private,
365 int padding_algo,
366 int hash_algo)
367 {
368 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
369 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
370 struct virtio_crypto *vcrypto;
371 struct virtio_crypto_ctrl_header header;
372 struct virtio_crypto_akcipher_session_para para;
373 struct rsa_key rsa_key = {0};
374 int node = virtio_crypto_get_current_node();
375 uint32_t keytype;
376 int ret;
377
378 /* mpi_free will test n, just free it. */
379 mpi_free(rsa_ctx->n);
380 rsa_ctx->n = NULL;
381
382 if (private) {
383 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
384 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
385 } else {
386 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
387 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
388 }
389
390 if (ret)
391 return ret;
392
393 rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
394 if (!rsa_ctx->n)
395 return -ENOMEM;
396
397 if (!ctx->vcrypto) {
398 vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
399 VIRTIO_CRYPTO_AKCIPHER_RSA);
400 if (!vcrypto) {
401 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
402 return -ENODEV;
403 }
404
405 ctx->vcrypto = vcrypto;
406 } else {
407 virtio_crypto_alg_akcipher_close_session(ctx);
408 }
409
410 /* set ctrl header */
411 header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
412 header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
413 header.queue_id = 0;
414
415 /* set RSA para */
416 para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
417 para.keytype = cpu_to_le32(keytype);
418 para.keylen = cpu_to_le32(keylen);
419 para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
420 para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
421
422 return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen);
423 }
424
virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)425 static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
426 const void *key,
427 unsigned int keylen)
428 {
429 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
430 VIRTIO_CRYPTO_RSA_RAW_PADDING,
431 VIRTIO_CRYPTO_RSA_NO_HASH);
432 }
433
434
virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)435 static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
436 const void *key,
437 unsigned int keylen)
438 {
439 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
440 VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
441 VIRTIO_CRYPTO_RSA_SHA1);
442 }
443
virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)444 static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
445 const void *key,
446 unsigned int keylen)
447 {
448 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
449 VIRTIO_CRYPTO_RSA_RAW_PADDING,
450 VIRTIO_CRYPTO_RSA_NO_HASH);
451 }
452
virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)453 static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
454 const void *key,
455 unsigned int keylen)
456 {
457 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
458 VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
459 VIRTIO_CRYPTO_RSA_SHA1);
460 }
461
virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)462 static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
463 {
464 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
465 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
466
467 return mpi_get_size(rsa_ctx->n);
468 }
469
virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)470 static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
471 {
472 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
473
474 ctx->tfm = tfm;
475 ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
476 ctx->enginectx.op.prepare_request = NULL;
477 ctx->enginectx.op.unprepare_request = NULL;
478
479 return 0;
480 }
481
virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)482 static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
483 {
484 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
485 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
486
487 virtio_crypto_alg_akcipher_close_session(ctx);
488 virtcrypto_dev_put(ctx->vcrypto);
489 mpi_free(rsa_ctx->n);
490 rsa_ctx->n = NULL;
491 }
492
493 static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
494 {
495 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
496 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
497 .algo = {
498 .encrypt = virtio_crypto_rsa_encrypt,
499 .decrypt = virtio_crypto_rsa_decrypt,
500 .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
501 .set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
502 .max_size = virtio_crypto_rsa_max_size,
503 .init = virtio_crypto_rsa_init_tfm,
504 .exit = virtio_crypto_rsa_exit_tfm,
505 .reqsize = sizeof(struct virtio_crypto_akcipher_request),
506 .base = {
507 .cra_name = "rsa",
508 .cra_driver_name = "virtio-crypto-rsa",
509 .cra_priority = 150,
510 .cra_module = THIS_MODULE,
511 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
512 },
513 },
514 },
515 {
516 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
517 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
518 .algo = {
519 .encrypt = virtio_crypto_rsa_encrypt,
520 .decrypt = virtio_crypto_rsa_decrypt,
521 .sign = virtio_crypto_rsa_sign,
522 .verify = virtio_crypto_rsa_verify,
523 .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
524 .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
525 .max_size = virtio_crypto_rsa_max_size,
526 .init = virtio_crypto_rsa_init_tfm,
527 .exit = virtio_crypto_rsa_exit_tfm,
528 .reqsize = sizeof(struct virtio_crypto_akcipher_request),
529 .base = {
530 .cra_name = "pkcs1pad(rsa,sha1)",
531 .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
532 .cra_priority = 150,
533 .cra_module = THIS_MODULE,
534 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
535 },
536 },
537 },
538 };
539
virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)540 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
541 {
542 int ret = 0;
543 int i = 0;
544
545 mutex_lock(&algs_lock);
546
547 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
548 uint32_t service = virtio_crypto_akcipher_algs[i].service;
549 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
550
551 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
552 continue;
553
554 if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
555 ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
556 if (ret)
557 goto unlock;
558 }
559
560 virtio_crypto_akcipher_algs[i].active_devs++;
561 dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
562 virtio_crypto_akcipher_algs[i].algo.base.cra_name);
563 }
564
565 unlock:
566 mutex_unlock(&algs_lock);
567 return ret;
568 }
569
virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)570 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
571 {
572 int i = 0;
573
574 mutex_lock(&algs_lock);
575
576 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
577 uint32_t service = virtio_crypto_akcipher_algs[i].service;
578 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
579
580 if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
581 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
582 continue;
583
584 if (virtio_crypto_akcipher_algs[i].active_devs == 1)
585 crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
586
587 virtio_crypto_akcipher_algs[i].active_devs--;
588 }
589
590 mutex_unlock(&algs_lock);
591 }
592