xref: /kernel/linux/linux-5.10/crypto/cryptd.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 *             Adrian Hoban <adrian.hoban@intel.com>
10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
12 *    Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37	struct crypto_queue queue;
38	struct work_struct work;
39};
40
41struct cryptd_queue {
42	/*
43	 * Protected by disabling BH to allow enqueueing from softinterrupt and
44	 * dequeuing from kworker (cryptd_queue_worker()).
45	 */
46	struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50	struct crypto_spawn spawn;
51	struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55	struct crypto_skcipher_spawn spawn;
56	struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60	struct crypto_shash_spawn spawn;
61	struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65	struct crypto_aead_spawn aead_spawn;
66	struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70	refcount_t refcnt;
71	struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75	crypto_completion_t complete;
76	struct skcipher_request req;
77};
78
79struct cryptd_hash_ctx {
80	refcount_t refcnt;
81	struct crypto_shash *child;
82};
83
84struct cryptd_hash_request_ctx {
85	crypto_completion_t complete;
86	struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90	refcount_t refcnt;
91	struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95	crypto_completion_t complete;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101			     unsigned int max_cpu_qlen)
102{
103	int cpu;
104	struct cryptd_cpu_queue *cpu_queue;
105
106	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107	if (!queue->cpu_queue)
108		return -ENOMEM;
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113	}
114	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115	return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120	int cpu;
121	struct cryptd_cpu_queue *cpu_queue;
122
123	for_each_possible_cpu(cpu) {
124		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125		BUG_ON(cpu_queue->queue.qlen);
126	}
127	free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131				  struct crypto_async_request *request)
132{
133	int err;
134	struct cryptd_cpu_queue *cpu_queue;
135	refcount_t *refcnt;
136
137	local_bh_disable();
138	cpu_queue = this_cpu_ptr(queue->cpu_queue);
139	err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141	refcnt = crypto_tfm_ctx(request->tfm);
142
143	if (err == -ENOSPC)
144		goto out;
145
146	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148	if (!refcount_read(refcnt))
149		goto out;
150
151	refcount_inc(refcnt);
152
153out:
154	local_bh_enable();
155
156	return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164	struct cryptd_cpu_queue *cpu_queue;
165	struct crypto_async_request *req, *backlog;
166
167	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168	/*
169	 * Only handle one request at a time to avoid hogging crypto workqueue.
170	 */
171	local_bh_disable();
172	backlog = crypto_get_backlog(&cpu_queue->queue);
173	req = crypto_dequeue_request(&cpu_queue->queue);
174	local_bh_enable();
175
176	if (!req)
177		return;
178
179	if (backlog)
180		backlog->complete(backlog, -EINPROGRESS);
181	req->complete(req, 0);
182
183	if (cpu_queue->queue.qlen)
184		queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191	return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195				 u32 *type, u32 *mask)
196{
197	/*
198	 * cryptd is allowed to wrap internal algorithms, but in that case the
199	 * resulting cryptd instance will be marked as internal as well.
200	 */
201	*type = algt->type & CRYPTO_ALG_INTERNAL;
202	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204	/* No point in cryptd wrapping an algorithm that's already async. */
205	*mask |= CRYPTO_ALG_ASYNC;
206
207	*mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211				struct crypto_alg *alg)
212{
213	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214		     "cryptd(%s)",
215		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216		return -ENAMETOOLONG;
217
218	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220	inst->alg.cra_priority = alg->cra_priority + 50;
221	inst->alg.cra_blocksize = alg->cra_blocksize;
222	inst->alg.cra_alignmask = alg->cra_alignmask;
223
224	return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228				  const u8 *key, unsigned int keylen)
229{
230	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231	struct crypto_skcipher *child = ctx->child;
232
233	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234	crypto_skcipher_set_flags(child,
235				  crypto_skcipher_get_flags(parent) &
236				  CRYPTO_TFM_REQ_MASK);
237	return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
241{
242	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
243	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
244	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
245	int refcnt = refcount_read(&ctx->refcnt);
246
247	local_bh_disable();
248	rctx->complete(&req->base, err);
249	local_bh_enable();
250
251	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
252		crypto_free_skcipher(tfm);
253}
254
255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
256				    int err)
257{
258	struct skcipher_request *req = skcipher_request_cast(base);
259	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
260	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
262	struct skcipher_request *subreq = &rctx->req;
263	struct crypto_skcipher *child = ctx->child;
264
265	if (unlikely(err == -EINPROGRESS))
266		goto out;
267
268	skcipher_request_set_tfm(subreq, child);
269	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
270				      NULL, NULL);
271	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
272				   req->iv);
273
274	err = crypto_skcipher_encrypt(subreq);
275	skcipher_request_zero(subreq);
276
277	req->base.complete = rctx->complete;
278
279out:
280	cryptd_skcipher_complete(req, err);
281}
282
283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
284				    int err)
285{
286	struct skcipher_request *req = skcipher_request_cast(base);
287	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
288	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
290	struct skcipher_request *subreq = &rctx->req;
291	struct crypto_skcipher *child = ctx->child;
292
293	if (unlikely(err == -EINPROGRESS))
294		goto out;
295
296	skcipher_request_set_tfm(subreq, child);
297	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
298				      NULL, NULL);
299	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
300				   req->iv);
301
302	err = crypto_skcipher_decrypt(subreq);
303	skcipher_request_zero(subreq);
304
305	req->base.complete = rctx->complete;
306
307out:
308	cryptd_skcipher_complete(req, err);
309}
310
311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
312				   crypto_completion_t compl)
313{
314	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316	struct cryptd_queue *queue;
317
318	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
319	rctx->complete = req->base.complete;
320	req->base.complete = compl;
321
322	return cryptd_enqueue_request(queue, &req->base);
323}
324
325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
326{
327	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
328}
329
330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
331{
332	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
333}
334
335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
336{
337	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
338	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
339	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
340	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
341	struct crypto_skcipher *cipher;
342
343	cipher = crypto_spawn_skcipher(spawn);
344	if (IS_ERR(cipher))
345		return PTR_ERR(cipher);
346
347	ctx->child = cipher;
348	crypto_skcipher_set_reqsize(
349		tfm, sizeof(struct cryptd_skcipher_request_ctx) +
350		     crypto_skcipher_reqsize(cipher));
351	return 0;
352}
353
354static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
355{
356	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
357
358	crypto_free_skcipher(ctx->child);
359}
360
361static void cryptd_skcipher_free(struct skcipher_instance *inst)
362{
363	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
364
365	crypto_drop_skcipher(&ctx->spawn);
366	kfree(inst);
367}
368
369static int cryptd_create_skcipher(struct crypto_template *tmpl,
370				  struct rtattr **tb,
371				  struct crypto_attr_type *algt,
372				  struct cryptd_queue *queue)
373{
374	struct skcipherd_instance_ctx *ctx;
375	struct skcipher_instance *inst;
376	struct skcipher_alg *alg;
377	u32 type;
378	u32 mask;
379	int err;
380
381	cryptd_type_and_mask(algt, &type, &mask);
382
383	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
384	if (!inst)
385		return -ENOMEM;
386
387	ctx = skcipher_instance_ctx(inst);
388	ctx->queue = queue;
389
390	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
391				   crypto_attr_alg_name(tb[1]), type, mask);
392	if (err)
393		goto err_free_inst;
394
395	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
396	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
397	if (err)
398		goto err_free_inst;
399
400	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
401		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
402	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
403	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
404	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
405	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
406
407	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
408
409	inst->alg.init = cryptd_skcipher_init_tfm;
410	inst->alg.exit = cryptd_skcipher_exit_tfm;
411
412	inst->alg.setkey = cryptd_skcipher_setkey;
413	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
414	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
415
416	inst->free = cryptd_skcipher_free;
417
418	err = skcipher_register_instance(tmpl, inst);
419	if (err) {
420err_free_inst:
421		cryptd_skcipher_free(inst);
422	}
423	return err;
424}
425
426static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
427{
428	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
429	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
430	struct crypto_shash_spawn *spawn = &ictx->spawn;
431	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
432	struct crypto_shash *hash;
433
434	hash = crypto_spawn_shash(spawn);
435	if (IS_ERR(hash))
436		return PTR_ERR(hash);
437
438	ctx->child = hash;
439	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
440				 sizeof(struct cryptd_hash_request_ctx) +
441				 crypto_shash_descsize(hash));
442	return 0;
443}
444
445static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
446{
447	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
448
449	crypto_free_shash(ctx->child);
450}
451
452static int cryptd_hash_setkey(struct crypto_ahash *parent,
453				   const u8 *key, unsigned int keylen)
454{
455	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
456	struct crypto_shash *child = ctx->child;
457
458	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
459	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
460				      CRYPTO_TFM_REQ_MASK);
461	return crypto_shash_setkey(child, key, keylen);
462}
463
464static int cryptd_hash_enqueue(struct ahash_request *req,
465				crypto_completion_t compl)
466{
467	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
468	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
469	struct cryptd_queue *queue =
470		cryptd_get_queue(crypto_ahash_tfm(tfm));
471
472	rctx->complete = req->base.complete;
473	req->base.complete = compl;
474
475	return cryptd_enqueue_request(queue, &req->base);
476}
477
478static void cryptd_hash_complete(struct ahash_request *req, int err)
479{
480	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
481	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
482	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483	int refcnt = refcount_read(&ctx->refcnt);
484
485	local_bh_disable();
486	rctx->complete(&req->base, err);
487	local_bh_enable();
488
489	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
490		crypto_free_ahash(tfm);
491}
492
493static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
494{
495	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
496	struct crypto_shash *child = ctx->child;
497	struct ahash_request *req = ahash_request_cast(req_async);
498	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
499	struct shash_desc *desc = &rctx->desc;
500
501	if (unlikely(err == -EINPROGRESS))
502		goto out;
503
504	desc->tfm = child;
505
506	err = crypto_shash_init(desc);
507
508	req->base.complete = rctx->complete;
509
510out:
511	cryptd_hash_complete(req, err);
512}
513
514static int cryptd_hash_init_enqueue(struct ahash_request *req)
515{
516	return cryptd_hash_enqueue(req, cryptd_hash_init);
517}
518
519static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
520{
521	struct ahash_request *req = ahash_request_cast(req_async);
522	struct cryptd_hash_request_ctx *rctx;
523
524	rctx = ahash_request_ctx(req);
525
526	if (unlikely(err == -EINPROGRESS))
527		goto out;
528
529	err = shash_ahash_update(req, &rctx->desc);
530
531	req->base.complete = rctx->complete;
532
533out:
534	cryptd_hash_complete(req, err);
535}
536
537static int cryptd_hash_update_enqueue(struct ahash_request *req)
538{
539	return cryptd_hash_enqueue(req, cryptd_hash_update);
540}
541
542static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
543{
544	struct ahash_request *req = ahash_request_cast(req_async);
545	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
546
547	if (unlikely(err == -EINPROGRESS))
548		goto out;
549
550	err = crypto_shash_final(&rctx->desc, req->result);
551
552	req->base.complete = rctx->complete;
553
554out:
555	cryptd_hash_complete(req, err);
556}
557
558static int cryptd_hash_final_enqueue(struct ahash_request *req)
559{
560	return cryptd_hash_enqueue(req, cryptd_hash_final);
561}
562
563static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
564{
565	struct ahash_request *req = ahash_request_cast(req_async);
566	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
567
568	if (unlikely(err == -EINPROGRESS))
569		goto out;
570
571	err = shash_ahash_finup(req, &rctx->desc);
572
573	req->base.complete = rctx->complete;
574
575out:
576	cryptd_hash_complete(req, err);
577}
578
579static int cryptd_hash_finup_enqueue(struct ahash_request *req)
580{
581	return cryptd_hash_enqueue(req, cryptd_hash_finup);
582}
583
584static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
585{
586	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
587	struct crypto_shash *child = ctx->child;
588	struct ahash_request *req = ahash_request_cast(req_async);
589	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
590	struct shash_desc *desc = &rctx->desc;
591
592	if (unlikely(err == -EINPROGRESS))
593		goto out;
594
595	desc->tfm = child;
596
597	err = shash_ahash_digest(req, desc);
598
599	req->base.complete = rctx->complete;
600
601out:
602	cryptd_hash_complete(req, err);
603}
604
605static int cryptd_hash_digest_enqueue(struct ahash_request *req)
606{
607	return cryptd_hash_enqueue(req, cryptd_hash_digest);
608}
609
610static int cryptd_hash_export(struct ahash_request *req, void *out)
611{
612	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
613
614	return crypto_shash_export(&rctx->desc, out);
615}
616
617static int cryptd_hash_import(struct ahash_request *req, const void *in)
618{
619	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
620	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
621	struct shash_desc *desc = cryptd_shash_desc(req);
622
623	desc->tfm = ctx->child;
624
625	return crypto_shash_import(desc, in);
626}
627
628static void cryptd_hash_free(struct ahash_instance *inst)
629{
630	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
631
632	crypto_drop_shash(&ctx->spawn);
633	kfree(inst);
634}
635
636static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
637			      struct crypto_attr_type *algt,
638			      struct cryptd_queue *queue)
639{
640	struct hashd_instance_ctx *ctx;
641	struct ahash_instance *inst;
642	struct shash_alg *alg;
643	u32 type;
644	u32 mask;
645	int err;
646
647	cryptd_type_and_mask(algt, &type, &mask);
648
649	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
650	if (!inst)
651		return -ENOMEM;
652
653	ctx = ahash_instance_ctx(inst);
654	ctx->queue = queue;
655
656	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
657				crypto_attr_alg_name(tb[1]), type, mask);
658	if (err)
659		goto err_free_inst;
660	alg = crypto_spawn_shash_alg(&ctx->spawn);
661
662	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
663	if (err)
664		goto err_free_inst;
665
666	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
667		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
668					CRYPTO_ALG_OPTIONAL_KEY));
669	inst->alg.halg.digestsize = alg->digestsize;
670	inst->alg.halg.statesize = alg->statesize;
671	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
672
673	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
674	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
675
676	inst->alg.init   = cryptd_hash_init_enqueue;
677	inst->alg.update = cryptd_hash_update_enqueue;
678	inst->alg.final  = cryptd_hash_final_enqueue;
679	inst->alg.finup  = cryptd_hash_finup_enqueue;
680	inst->alg.export = cryptd_hash_export;
681	inst->alg.import = cryptd_hash_import;
682	if (crypto_shash_alg_has_setkey(alg))
683		inst->alg.setkey = cryptd_hash_setkey;
684	inst->alg.digest = cryptd_hash_digest_enqueue;
685
686	inst->free = cryptd_hash_free;
687
688	err = ahash_register_instance(tmpl, inst);
689	if (err) {
690err_free_inst:
691		cryptd_hash_free(inst);
692	}
693	return err;
694}
695
696static int cryptd_aead_setkey(struct crypto_aead *parent,
697			      const u8 *key, unsigned int keylen)
698{
699	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
700	struct crypto_aead *child = ctx->child;
701
702	return crypto_aead_setkey(child, key, keylen);
703}
704
705static int cryptd_aead_setauthsize(struct crypto_aead *parent,
706				   unsigned int authsize)
707{
708	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
709	struct crypto_aead *child = ctx->child;
710
711	return crypto_aead_setauthsize(child, authsize);
712}
713
714static void cryptd_aead_crypt(struct aead_request *req,
715			struct crypto_aead *child,
716			int err,
717			int (*crypt)(struct aead_request *req))
718{
719	struct cryptd_aead_request_ctx *rctx;
720	struct cryptd_aead_ctx *ctx;
721	crypto_completion_t compl;
722	struct crypto_aead *tfm;
723	int refcnt;
724
725	rctx = aead_request_ctx(req);
726	compl = rctx->complete;
727
728	tfm = crypto_aead_reqtfm(req);
729
730	if (unlikely(err == -EINPROGRESS))
731		goto out;
732	aead_request_set_tfm(req, child);
733	err = crypt( req );
734
735out:
736	ctx = crypto_aead_ctx(tfm);
737	refcnt = refcount_read(&ctx->refcnt);
738
739	local_bh_disable();
740	compl(&req->base, err);
741	local_bh_enable();
742
743	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
744		crypto_free_aead(tfm);
745}
746
747static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
748{
749	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
750	struct crypto_aead *child = ctx->child;
751	struct aead_request *req;
752
753	req = container_of(areq, struct aead_request, base);
754	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
755}
756
757static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
758{
759	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
760	struct crypto_aead *child = ctx->child;
761	struct aead_request *req;
762
763	req = container_of(areq, struct aead_request, base);
764	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
765}
766
767static int cryptd_aead_enqueue(struct aead_request *req,
768				    crypto_completion_t compl)
769{
770	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
771	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
772	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
773
774	rctx->complete = req->base.complete;
775	req->base.complete = compl;
776	return cryptd_enqueue_request(queue, &req->base);
777}
778
779static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
780{
781	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
782}
783
784static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
785{
786	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
787}
788
789static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
790{
791	struct aead_instance *inst = aead_alg_instance(tfm);
792	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
793	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
794	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
795	struct crypto_aead *cipher;
796
797	cipher = crypto_spawn_aead(spawn);
798	if (IS_ERR(cipher))
799		return PTR_ERR(cipher);
800
801	ctx->child = cipher;
802	crypto_aead_set_reqsize(
803		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
804			 crypto_aead_reqsize(cipher)));
805	return 0;
806}
807
808static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
809{
810	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
811	crypto_free_aead(ctx->child);
812}
813
814static void cryptd_aead_free(struct aead_instance *inst)
815{
816	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
817
818	crypto_drop_aead(&ctx->aead_spawn);
819	kfree(inst);
820}
821
822static int cryptd_create_aead(struct crypto_template *tmpl,
823		              struct rtattr **tb,
824			      struct crypto_attr_type *algt,
825			      struct cryptd_queue *queue)
826{
827	struct aead_instance_ctx *ctx;
828	struct aead_instance *inst;
829	struct aead_alg *alg;
830	u32 type;
831	u32 mask;
832	int err;
833
834	cryptd_type_and_mask(algt, &type, &mask);
835
836	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
837	if (!inst)
838		return -ENOMEM;
839
840	ctx = aead_instance_ctx(inst);
841	ctx->queue = queue;
842
843	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
844			       crypto_attr_alg_name(tb[1]), type, mask);
845	if (err)
846		goto err_free_inst;
847
848	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
849	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
850	if (err)
851		goto err_free_inst;
852
853	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
854		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
855	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
856
857	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
858	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
859
860	inst->alg.init = cryptd_aead_init_tfm;
861	inst->alg.exit = cryptd_aead_exit_tfm;
862	inst->alg.setkey = cryptd_aead_setkey;
863	inst->alg.setauthsize = cryptd_aead_setauthsize;
864	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
865	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
866
867	inst->free = cryptd_aead_free;
868
869	err = aead_register_instance(tmpl, inst);
870	if (err) {
871err_free_inst:
872		cryptd_aead_free(inst);
873	}
874	return err;
875}
876
877static struct cryptd_queue queue;
878
879static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
880{
881	struct crypto_attr_type *algt;
882
883	algt = crypto_get_attr_type(tb);
884	if (IS_ERR(algt))
885		return PTR_ERR(algt);
886
887	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
888	case CRYPTO_ALG_TYPE_SKCIPHER:
889		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
890	case CRYPTO_ALG_TYPE_HASH:
891		return cryptd_create_hash(tmpl, tb, algt, &queue);
892	case CRYPTO_ALG_TYPE_AEAD:
893		return cryptd_create_aead(tmpl, tb, algt, &queue);
894	}
895
896	return -EINVAL;
897}
898
899static struct crypto_template cryptd_tmpl = {
900	.name = "cryptd",
901	.create = cryptd_create,
902	.module = THIS_MODULE,
903};
904
905struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
906					      u32 type, u32 mask)
907{
908	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
909	struct cryptd_skcipher_ctx *ctx;
910	struct crypto_skcipher *tfm;
911
912	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
913		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
914		return ERR_PTR(-EINVAL);
915
916	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
917	if (IS_ERR(tfm))
918		return ERR_CAST(tfm);
919
920	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
921		crypto_free_skcipher(tfm);
922		return ERR_PTR(-EINVAL);
923	}
924
925	ctx = crypto_skcipher_ctx(tfm);
926	refcount_set(&ctx->refcnt, 1);
927
928	return container_of(tfm, struct cryptd_skcipher, base);
929}
930EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
931
932struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
933{
934	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
935
936	return ctx->child;
937}
938EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
939
940bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
941{
942	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
943
944	return refcount_read(&ctx->refcnt) - 1;
945}
946EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
947
948void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
949{
950	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
951
952	if (refcount_dec_and_test(&ctx->refcnt))
953		crypto_free_skcipher(&tfm->base);
954}
955EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
956
957struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
958					u32 type, u32 mask)
959{
960	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
961	struct cryptd_hash_ctx *ctx;
962	struct crypto_ahash *tfm;
963
964	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
965		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
966		return ERR_PTR(-EINVAL);
967	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
968	if (IS_ERR(tfm))
969		return ERR_CAST(tfm);
970	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
971		crypto_free_ahash(tfm);
972		return ERR_PTR(-EINVAL);
973	}
974
975	ctx = crypto_ahash_ctx(tfm);
976	refcount_set(&ctx->refcnt, 1);
977
978	return __cryptd_ahash_cast(tfm);
979}
980EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
981
982struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
983{
984	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
985
986	return ctx->child;
987}
988EXPORT_SYMBOL_GPL(cryptd_ahash_child);
989
990struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
991{
992	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
993	return &rctx->desc;
994}
995EXPORT_SYMBOL_GPL(cryptd_shash_desc);
996
997bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
998{
999	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1000
1001	return refcount_read(&ctx->refcnt) - 1;
1002}
1003EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1004
1005void cryptd_free_ahash(struct cryptd_ahash *tfm)
1006{
1007	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1008
1009	if (refcount_dec_and_test(&ctx->refcnt))
1010		crypto_free_ahash(&tfm->base);
1011}
1012EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1013
1014struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1015						  u32 type, u32 mask)
1016{
1017	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1018	struct cryptd_aead_ctx *ctx;
1019	struct crypto_aead *tfm;
1020
1021	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1022		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1023		return ERR_PTR(-EINVAL);
1024	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1025	if (IS_ERR(tfm))
1026		return ERR_CAST(tfm);
1027	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1028		crypto_free_aead(tfm);
1029		return ERR_PTR(-EINVAL);
1030	}
1031
1032	ctx = crypto_aead_ctx(tfm);
1033	refcount_set(&ctx->refcnt, 1);
1034
1035	return __cryptd_aead_cast(tfm);
1036}
1037EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1038
1039struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1040{
1041	struct cryptd_aead_ctx *ctx;
1042	ctx = crypto_aead_ctx(&tfm->base);
1043	return ctx->child;
1044}
1045EXPORT_SYMBOL_GPL(cryptd_aead_child);
1046
1047bool cryptd_aead_queued(struct cryptd_aead *tfm)
1048{
1049	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1050
1051	return refcount_read(&ctx->refcnt) - 1;
1052}
1053EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1054
1055void cryptd_free_aead(struct cryptd_aead *tfm)
1056{
1057	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1058
1059	if (refcount_dec_and_test(&ctx->refcnt))
1060		crypto_free_aead(&tfm->base);
1061}
1062EXPORT_SYMBOL_GPL(cryptd_free_aead);
1063
1064static int __init cryptd_init(void)
1065{
1066	int err;
1067
1068	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1069				    1);
1070	if (!cryptd_wq)
1071		return -ENOMEM;
1072
1073	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1074	if (err)
1075		goto err_destroy_wq;
1076
1077	err = crypto_register_template(&cryptd_tmpl);
1078	if (err)
1079		goto err_fini_queue;
1080
1081	return 0;
1082
1083err_fini_queue:
1084	cryptd_fini_queue(&queue);
1085err_destroy_wq:
1086	destroy_workqueue(cryptd_wq);
1087	return err;
1088}
1089
1090static void __exit cryptd_exit(void)
1091{
1092	destroy_workqueue(cryptd_wq);
1093	cryptd_fini_queue(&queue);
1094	crypto_unregister_template(&cryptd_tmpl);
1095}
1096
1097subsys_initcall(cryptd_init);
1098module_exit(cryptd_exit);
1099
1100MODULE_LICENSE("GPL");
1101MODULE_DESCRIPTION("Software async crypto daemon");
1102MODULE_ALIAS_CRYPTO("cryptd");
1103