1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7 #include <linux/err.h>
8 #include <linux/module.h>
9 #include <linux/virtio_config.h>
10 #include <linux/cpu.h>
11
12 #include <uapi/linux/virtio_crypto.h>
13 #include "virtio_crypto_common.h"
14
15
16 void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)17 virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18 {
19 if (vc_req) {
20 kfree_sensitive(vc_req->req_data);
21 kfree(vc_req->sgs);
22 }
23 }
24
virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)25 static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26 {
27 complete(&vc_ctrl_req->compl);
28 }
29
virtcrypto_ctrlq_callback(struct virtqueue *vq)30 static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31 {
32 struct virtio_crypto *vcrypto = vq->vdev->priv;
33 struct virtio_crypto_ctrl_request *vc_ctrl_req;
34 unsigned long flags;
35 unsigned int len;
36
37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38 do {
39 virtqueue_disable_cb(vq);
40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
41 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req);
43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44 }
45 if (unlikely(virtqueue_is_broken(vq)))
46 break;
47 } while (!virtqueue_enable_cb(vq));
48 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
49 }
50
virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, struct virtio_crypto_ctrl_request *vc_ctrl_req)51 int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
52 unsigned int out_sgs, unsigned int in_sgs,
53 struct virtio_crypto_ctrl_request *vc_ctrl_req)
54 {
55 int err;
56 unsigned long flags;
57
58 init_completion(&vc_ctrl_req->compl);
59
60 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
61 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
62 if (err < 0) {
63 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
64 return err;
65 }
66
67 virtqueue_kick(vcrypto->ctrl_vq);
68 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
69
70 wait_for_completion(&vc_ctrl_req->compl);
71
72 return 0;
73 }
74
virtcrypto_done_task(unsigned long data)75 static void virtcrypto_done_task(unsigned long data)
76 {
77 struct data_queue *data_vq = (struct data_queue *)data;
78 struct virtqueue *vq = data_vq->vq;
79 struct virtio_crypto_request *vc_req;
80 unsigned int len;
81
82 do {
83 virtqueue_disable_cb(vq);
84 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
85 if (vc_req->alg_cb)
86 vc_req->alg_cb(vc_req, len);
87 }
88 } while (!virtqueue_enable_cb(vq));
89 }
90
virtcrypto_dataq_callback(struct virtqueue *vq)91 static void virtcrypto_dataq_callback(struct virtqueue *vq)
92 {
93 struct virtio_crypto *vcrypto = vq->vdev->priv;
94 struct data_queue *dq = &vcrypto->data_vq[vq->index];
95
96 tasklet_schedule(&dq->done_task);
97 }
98
virtcrypto_find_vqs(struct virtio_crypto *vi)99 static int virtcrypto_find_vqs(struct virtio_crypto *vi)
100 {
101 vq_callback_t **callbacks;
102 struct virtqueue **vqs;
103 int ret = -ENOMEM;
104 int i, total_vqs;
105 const char **names;
106 struct device *dev = &vi->vdev->dev;
107
108 /*
109 * We expect 1 data virtqueue, followed by
110 * possible N-1 data queues used in multiqueue mode,
111 * followed by control vq.
112 */
113 total_vqs = vi->max_data_queues + 1;
114
115 /* Allocate space for find_vqs parameters */
116 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
117 if (!vqs)
118 goto err_vq;
119 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
120 if (!callbacks)
121 goto err_callback;
122 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
123 if (!names)
124 goto err_names;
125
126 /* Parameters for control virtqueue */
127 callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
128 names[total_vqs - 1] = "controlq";
129
130 /* Allocate/initialize parameters for data virtqueues */
131 for (i = 0; i < vi->max_data_queues; i++) {
132 callbacks[i] = virtcrypto_dataq_callback;
133 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
134 "dataq.%d", i);
135 names[i] = vi->data_vq[i].name;
136 }
137
138 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
139 if (ret)
140 goto err_find;
141
142 vi->ctrl_vq = vqs[total_vqs - 1];
143
144 for (i = 0; i < vi->max_data_queues; i++) {
145 spin_lock_init(&vi->data_vq[i].lock);
146 vi->data_vq[i].vq = vqs[i];
147 /* Initialize crypto engine */
148 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
149 if (!vi->data_vq[i].engine) {
150 ret = -ENOMEM;
151 goto err_engine;
152 }
153 tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
154 (unsigned long)&vi->data_vq[i]);
155 }
156
157 kfree(names);
158 kfree(callbacks);
159 kfree(vqs);
160
161 return 0;
162
163 err_engine:
164 err_find:
165 kfree(names);
166 err_names:
167 kfree(callbacks);
168 err_callback:
169 kfree(vqs);
170 err_vq:
171 return ret;
172 }
173
virtcrypto_alloc_queues(struct virtio_crypto *vi)174 static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
175 {
176 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
177 GFP_KERNEL);
178 if (!vi->data_vq)
179 return -ENOMEM;
180
181 return 0;
182 }
183
virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)184 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
185 {
186 int i;
187
188 if (vi->affinity_hint_set) {
189 for (i = 0; i < vi->max_data_queues; i++)
190 virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
191
192 vi->affinity_hint_set = false;
193 }
194 }
195
virtcrypto_set_affinity(struct virtio_crypto *vcrypto)196 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
197 {
198 int i = 0;
199 int cpu;
200
201 /*
202 * In single queue mode, we don't set the cpu affinity.
203 */
204 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
205 virtcrypto_clean_affinity(vcrypto, -1);
206 return;
207 }
208
209 /*
210 * In multiqueue mode, we let the queue to be private to one cpu
211 * by setting the affinity hint to eliminate the contention.
212 *
213 * TODO: adds cpu hotplug support by register cpu notifier.
214 *
215 */
216 for_each_online_cpu(cpu) {
217 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
218 if (++i >= vcrypto->max_data_queues)
219 break;
220 }
221
222 vcrypto->affinity_hint_set = true;
223 }
224
virtcrypto_free_queues(struct virtio_crypto *vi)225 static void virtcrypto_free_queues(struct virtio_crypto *vi)
226 {
227 kfree(vi->data_vq);
228 }
229
virtcrypto_init_vqs(struct virtio_crypto *vi)230 static int virtcrypto_init_vqs(struct virtio_crypto *vi)
231 {
232 int ret;
233
234 /* Allocate send & receive queues */
235 ret = virtcrypto_alloc_queues(vi);
236 if (ret)
237 goto err;
238
239 ret = virtcrypto_find_vqs(vi);
240 if (ret)
241 goto err_free;
242
243 get_online_cpus();
244 virtcrypto_set_affinity(vi);
245 put_online_cpus();
246
247 return 0;
248
249 err_free:
250 virtcrypto_free_queues(vi);
251 err:
252 return ret;
253 }
254
virtcrypto_update_status(struct virtio_crypto *vcrypto)255 static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
256 {
257 u32 status;
258 int err;
259
260 virtio_cread_le(vcrypto->vdev,
261 struct virtio_crypto_config, status, &status);
262
263 /*
264 * Unknown status bits would be a host error and the driver
265 * should consider the device to be broken.
266 */
267 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
268 dev_warn(&vcrypto->vdev->dev,
269 "Unknown status bits: 0x%x\n", status);
270
271 virtio_break_device(vcrypto->vdev);
272 return -EPERM;
273 }
274
275 if (vcrypto->status == status)
276 return 0;
277
278 vcrypto->status = status;
279
280 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
281 err = virtcrypto_dev_start(vcrypto);
282 if (err) {
283 dev_err(&vcrypto->vdev->dev,
284 "Failed to start virtio crypto device.\n");
285
286 return -EPERM;
287 }
288 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
289 } else {
290 virtcrypto_dev_stop(vcrypto);
291 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
292 }
293
294 return 0;
295 }
296
virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)297 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
298 {
299 int32_t i;
300 int ret;
301
302 for (i = 0; i < vcrypto->max_data_queues; i++) {
303 if (vcrypto->data_vq[i].engine) {
304 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
305 if (ret)
306 goto err;
307 }
308 }
309
310 return 0;
311
312 err:
313 while (--i >= 0)
314 if (vcrypto->data_vq[i].engine)
315 crypto_engine_exit(vcrypto->data_vq[i].engine);
316
317 return ret;
318 }
319
virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)320 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
321 {
322 u32 i;
323
324 for (i = 0; i < vcrypto->max_data_queues; i++)
325 if (vcrypto->data_vq[i].engine)
326 crypto_engine_exit(vcrypto->data_vq[i].engine);
327 }
328
virtcrypto_del_vqs(struct virtio_crypto *vcrypto)329 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
330 {
331 struct virtio_device *vdev = vcrypto->vdev;
332
333 virtcrypto_clean_affinity(vcrypto, -1);
334
335 vdev->config->del_vqs(vdev);
336
337 virtcrypto_free_queues(vcrypto);
338 }
339
virtcrypto_probe(struct virtio_device *vdev)340 static int virtcrypto_probe(struct virtio_device *vdev)
341 {
342 int err = -EFAULT;
343 struct virtio_crypto *vcrypto;
344 u32 max_data_queues = 0, max_cipher_key_len = 0;
345 u32 max_auth_key_len = 0;
346 u64 max_size = 0;
347 u32 cipher_algo_l = 0;
348 u32 cipher_algo_h = 0;
349 u32 hash_algo = 0;
350 u32 mac_algo_l = 0;
351 u32 mac_algo_h = 0;
352 u32 aead_algo = 0;
353 u32 akcipher_algo = 0;
354 u32 crypto_services = 0;
355
356 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
357 return -ENODEV;
358
359 if (!vdev->config->get) {
360 dev_err(&vdev->dev, "%s failure: config access disabled\n",
361 __func__);
362 return -EINVAL;
363 }
364
365 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
366 /*
367 * If the accelerator is connected to a node with no memory
368 * there is no point in using the accelerator since the remote
369 * memory transaction will be very slow.
370 */
371 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
372 return -EINVAL;
373 }
374
375 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
376 dev_to_node(&vdev->dev));
377 if (!vcrypto)
378 return -ENOMEM;
379
380 virtio_cread_le(vdev, struct virtio_crypto_config,
381 max_dataqueues, &max_data_queues);
382 if (max_data_queues < 1)
383 max_data_queues = 1;
384
385 virtio_cread_le(vdev, struct virtio_crypto_config,
386 max_cipher_key_len, &max_cipher_key_len);
387 virtio_cread_le(vdev, struct virtio_crypto_config,
388 max_auth_key_len, &max_auth_key_len);
389 virtio_cread_le(vdev, struct virtio_crypto_config,
390 max_size, &max_size);
391 virtio_cread_le(vdev, struct virtio_crypto_config,
392 crypto_services, &crypto_services);
393 virtio_cread_le(vdev, struct virtio_crypto_config,
394 cipher_algo_l, &cipher_algo_l);
395 virtio_cread_le(vdev, struct virtio_crypto_config,
396 cipher_algo_h, &cipher_algo_h);
397 virtio_cread_le(vdev, struct virtio_crypto_config,
398 hash_algo, &hash_algo);
399 virtio_cread_le(vdev, struct virtio_crypto_config,
400 mac_algo_l, &mac_algo_l);
401 virtio_cread_le(vdev, struct virtio_crypto_config,
402 mac_algo_h, &mac_algo_h);
403 virtio_cread_le(vdev, struct virtio_crypto_config,
404 aead_algo, &aead_algo);
405 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
406 virtio_cread_le(vdev, struct virtio_crypto_config,
407 akcipher_algo, &akcipher_algo);
408
409 /* Add virtio crypto device to global table */
410 err = virtcrypto_devmgr_add_dev(vcrypto);
411 if (err) {
412 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
413 goto free;
414 }
415 vcrypto->owner = THIS_MODULE;
416 vcrypto = vdev->priv = vcrypto;
417 vcrypto->vdev = vdev;
418
419 spin_lock_init(&vcrypto->ctrl_lock);
420
421 /* Use single data queue as default */
422 vcrypto->curr_queue = 1;
423 vcrypto->max_data_queues = max_data_queues;
424 vcrypto->max_cipher_key_len = max_cipher_key_len;
425 vcrypto->max_auth_key_len = max_auth_key_len;
426 vcrypto->max_size = max_size;
427 vcrypto->crypto_services = crypto_services;
428 vcrypto->cipher_algo_l = cipher_algo_l;
429 vcrypto->cipher_algo_h = cipher_algo_h;
430 vcrypto->mac_algo_l = mac_algo_l;
431 vcrypto->mac_algo_h = mac_algo_h;
432 vcrypto->hash_algo = hash_algo;
433 vcrypto->aead_algo = aead_algo;
434 vcrypto->akcipher_algo = akcipher_algo;
435
436 dev_info(&vdev->dev,
437 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
438 vcrypto->max_data_queues,
439 vcrypto->max_cipher_key_len,
440 vcrypto->max_auth_key_len,
441 vcrypto->max_size);
442
443 err = virtcrypto_init_vqs(vcrypto);
444 if (err) {
445 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
446 goto free_dev;
447 }
448
449 err = virtcrypto_start_crypto_engines(vcrypto);
450 if (err)
451 goto free_vqs;
452
453 virtio_device_ready(vdev);
454
455 err = virtcrypto_update_status(vcrypto);
456 if (err)
457 goto free_engines;
458
459 return 0;
460
461 free_engines:
462 virtcrypto_clear_crypto_engines(vcrypto);
463 free_vqs:
464 vcrypto->vdev->config->reset(vdev);
465 virtcrypto_del_vqs(vcrypto);
466 free_dev:
467 virtcrypto_devmgr_rm_dev(vcrypto);
468 free:
469 kfree(vcrypto);
470 return err;
471 }
472
virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)473 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
474 {
475 struct virtio_crypto_request *vc_req;
476 int i;
477 struct virtqueue *vq;
478
479 for (i = 0; i < vcrypto->max_data_queues; i++) {
480 vq = vcrypto->data_vq[i].vq;
481 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
482 kfree(vc_req->req_data);
483 kfree(vc_req->sgs);
484 }
485 }
486 }
487
virtcrypto_remove(struct virtio_device *vdev)488 static void virtcrypto_remove(struct virtio_device *vdev)
489 {
490 struct virtio_crypto *vcrypto = vdev->priv;
491 int i;
492
493 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
494
495 if (virtcrypto_dev_started(vcrypto))
496 virtcrypto_dev_stop(vcrypto);
497 for (i = 0; i < vcrypto->max_data_queues; i++)
498 tasklet_kill(&vcrypto->data_vq[i].done_task);
499 vdev->config->reset(vdev);
500 virtcrypto_free_unused_reqs(vcrypto);
501 virtcrypto_clear_crypto_engines(vcrypto);
502 virtcrypto_del_vqs(vcrypto);
503 virtcrypto_devmgr_rm_dev(vcrypto);
504 kfree(vcrypto);
505 }
506
virtcrypto_config_changed(struct virtio_device *vdev)507 static void virtcrypto_config_changed(struct virtio_device *vdev)
508 {
509 struct virtio_crypto *vcrypto = vdev->priv;
510
511 virtcrypto_update_status(vcrypto);
512 }
513
514 #ifdef CONFIG_PM_SLEEP
virtcrypto_freeze(struct virtio_device *vdev)515 static int virtcrypto_freeze(struct virtio_device *vdev)
516 {
517 struct virtio_crypto *vcrypto = vdev->priv;
518
519 vdev->config->reset(vdev);
520 virtcrypto_free_unused_reqs(vcrypto);
521 if (virtcrypto_dev_started(vcrypto))
522 virtcrypto_dev_stop(vcrypto);
523
524 virtcrypto_clear_crypto_engines(vcrypto);
525 virtcrypto_del_vqs(vcrypto);
526 return 0;
527 }
528
virtcrypto_restore(struct virtio_device *vdev)529 static int virtcrypto_restore(struct virtio_device *vdev)
530 {
531 struct virtio_crypto *vcrypto = vdev->priv;
532 int err;
533
534 err = virtcrypto_init_vqs(vcrypto);
535 if (err)
536 return err;
537
538 err = virtcrypto_start_crypto_engines(vcrypto);
539 if (err)
540 goto free_vqs;
541
542 virtio_device_ready(vdev);
543
544 err = virtcrypto_dev_start(vcrypto);
545 if (err) {
546 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
547 goto free_engines;
548 }
549
550 return 0;
551
552 free_engines:
553 virtcrypto_clear_crypto_engines(vcrypto);
554 free_vqs:
555 vcrypto->vdev->config->reset(vdev);
556 virtcrypto_del_vqs(vcrypto);
557 return err;
558 }
559 #endif
560
561 static const unsigned int features[] = {
562 /* none */
563 };
564
565 static const struct virtio_device_id id_table[] = {
566 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
567 { 0 },
568 };
569
570 static struct virtio_driver virtio_crypto_driver = {
571 .driver.name = KBUILD_MODNAME,
572 .driver.owner = THIS_MODULE,
573 .feature_table = features,
574 .feature_table_size = ARRAY_SIZE(features),
575 .id_table = id_table,
576 .probe = virtcrypto_probe,
577 .remove = virtcrypto_remove,
578 .config_changed = virtcrypto_config_changed,
579 #ifdef CONFIG_PM_SLEEP
580 .freeze = virtcrypto_freeze,
581 .restore = virtcrypto_restore,
582 #endif
583 };
584
585 module_virtio_driver(virtio_crypto_driver);
586
587 MODULE_DEVICE_TABLE(virtio, id_table);
588 MODULE_DESCRIPTION("virtio crypto device driver");
589 MODULE_LICENSE("GPL");
590 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
591