1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/hyperhold/hp_core.c
4 *
5 * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6 */
7
8 #define pr_fmt(fmt) "[HYPERHOLD]" fmt
9
10#include <linux/module.h>
11#include <linux/blkdev.h>
12#include <linux/sysctl.h>
13#include <linux/version.h>
14
15#include "hyperhold.h"
16#include "hp_device.h"
17#include "hp_space.h"
18#include "hp_iotab.h"
19
20#define HP_DFLT_DEVICE "/dev/by-name/hyperhold"
21#define HP_DFLT_EXT_SIZE (1 << 15)
22#define HP_DEV_NAME_LEN 256
23#define HP_STATE_LEN 10
24
25#define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
26#define CHECK_BOUND(var, min, max) \
27	CHECK((var) >= (min) && (var) <= (max), \
28		"%s %u out of bounds %u ~ %u!\n", #var, (var), (min), (max))
29#define CHECK_INITED CHECK(hyperhold.inited, "hyperhold is not enable!\n")
30#define CHECK_ENABLE (CHECK_INITED && CHECK(hyperhold.enable, "hyperhold is readonly!\n"))
31
32struct hyperhold {
33	bool enable;
34	bool inited;
35
36	char device_name[HP_DEV_NAME_LEN];
37	u32 extent_size;
38	u32 enable_soft_crypt;
39
40	struct hp_device dev;
41	struct hp_space spc;
42
43	struct workqueue_struct *read_wq;
44	struct workqueue_struct *write_wq;
45
46	struct mutex init_lock;
47};
48
49struct hyperhold hyperhold;
50
51atomic64_t mem_used = ATOMIC64_INIT(0);
52#ifdef CONFIG_HYPERHOLD_DEBUG
53/*
54 * return the memory overhead of hyperhold module
55 */
56u64 hyperhold_memory_used(void)
57{
58	return atomic64_read(&mem_used) + hpio_memory() + space_memory();
59}
60#endif
61
62void hyperhold_disable(bool force)
63{
64	if (!CHECK_INITED)
65		return;
66	if (!force && !CHECK_ENABLE)
67		return;
68
69	mutex_lock(&hyperhold.init_lock);
70	hyperhold.enable = false;
71	if (!wait_for_space_empty(&hyperhold.spc, force))
72		goto out;
73	hyperhold.inited = false;
74	wait_for_iotab_empty();
75	destroy_workqueue(hyperhold.read_wq);
76	destroy_workqueue(hyperhold.write_wq);
77	deinit_space(&hyperhold.spc);
78	crypto_deinit(&hyperhold.dev);
79	unbind_bdev(&hyperhold.dev);
80out:
81	if (hyperhold.inited)
82		pr_info("hyperhold is disabled, read only.\n");
83	else
84		pr_info("hyperhold is totally disabled!\n");
85	mutex_unlock(&hyperhold.init_lock);
86}
87EXPORT_SYMBOL(hyperhold_disable);
88
89void hyperhold_enable(void)
90{
91	bool enable = true;
92
93	if (hyperhold.inited)
94		goto out;
95
96	mutex_lock(&hyperhold.init_lock);
97	if (hyperhold.inited)
98		goto unlock;
99	if (!bind_bdev(&hyperhold.dev, hyperhold.device_name))
100		goto err1;
101	if (!crypto_init(&hyperhold.dev, hyperhold.enable_soft_crypt))
102		goto err2;
103	if (!init_space(&hyperhold.spc, hyperhold.dev.dev_size, hyperhold.extent_size))
104		goto err3;
105	hyperhold.read_wq = alloc_workqueue("hyperhold_read", WQ_HIGHPRI | WQ_UNBOUND, 0);
106	if (!hyperhold.read_wq)
107		goto err4;
108	hyperhold.write_wq = alloc_workqueue("hyperhold_write", 0, 0);
109	if (!hyperhold.write_wq)
110		goto err5;
111	hyperhold.inited = true;
112	goto unlock;
113err5:
114	destroy_workqueue(hyperhold.read_wq);
115err4:
116	deinit_space(&hyperhold.spc);
117err3:
118	crypto_deinit(&hyperhold.dev);
119err2:
120	unbind_bdev(&hyperhold.dev);
121err1:
122	enable = false;
123unlock:
124	mutex_unlock(&hyperhold.init_lock);
125out:
126	if (enable) {
127		hyperhold.enable = true;
128		pr_info("hyperhold is enabled.\n");
129	} else {
130		hyperhold.enable = false;
131		pr_err("hyperhold enable failed!\n");
132	}
133}
134EXPORT_SYMBOL(hyperhold_enable);
135
136static int enable_sysctl_handler(struct ctl_table *table, int write,
137				 void *buffer, size_t *lenp, loff_t *ppos)
138{
139	const struct cred *cred = current_cred();
140	char *filter_buf;
141
142	filter_buf = strstrip((char *)buffer);
143	if (write) {
144		if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) &&
145			!uid_eq(cred->euid, GLOBAL_ROOT_UID)) {
146			pr_err("no permission to enable/disable eswap!\n");
147			return 0;
148		}
149		if (!strcmp(filter_buf, "enable"))
150			hyperhold_enable();
151		else if (!strcmp(filter_buf, "disable"))
152			hyperhold_disable(false);
153		else if (!strcmp(filter_buf, "force_disable"))
154			hyperhold_disable(true);
155	} else {
156		if (*lenp < HP_STATE_LEN || *ppos) {
157			*lenp = 0;
158			return 0;
159		}
160		if (hyperhold.enable)
161			strcpy(buffer, "enable\n");
162		else if (hyperhold.inited)
163			strcpy(buffer, "readonly\n");
164		else
165			strcpy(buffer, "disable\n");
166		*lenp = strlen(buffer);
167		*ppos += *lenp;
168#ifdef CONFIG_HYPERHOLD_DEBUG
169		pr_info("hyperhold memory overhead = %llu.\n", hyperhold_memory_used());
170#endif
171	}
172	return 0;
173}
174
175static int device_sysctl_handler(struct ctl_table *table, int write,
176				 void *buffer, size_t *lenp, loff_t *ppos)
177{
178	int ret;
179
180	mutex_lock(&hyperhold.init_lock);
181	if (write && hyperhold.inited) {
182		pr_err("hyperhold device is busy!\n");
183		ret = -EBUSY;
184		goto unlock;
185	}
186	ret = proc_dostring(table, write, buffer, lenp, ppos);
187	if (write && !ret) {
188		hyperhold.enable_soft_crypt = 1;
189		pr_info("device changed, default enable soft crypt.\n");
190	}
191unlock:
192	mutex_unlock(&hyperhold.init_lock);
193
194	return ret;
195}
196
197static int extent_sysctl_handler(struct ctl_table *table, int write,
198				 void *buffer, size_t *lenp, loff_t *ppos)
199{
200	int ret;
201
202	mutex_lock(&hyperhold.init_lock);
203	if (write && hyperhold.inited) {
204		pr_err("hyperhold device is busy!\n");
205		ret = -EBUSY;
206		goto unlock;
207	}
208	ret = proc_douintvec(table, write, buffer, lenp, ppos);
209unlock:
210	mutex_unlock(&hyperhold.init_lock);
211
212	return ret;
213}
214
215static int crypto_sysctl_handler(struct ctl_table *table, int write,
216				 void *buffer, size_t *lenp, loff_t *ppos)
217{
218	int ret;
219
220	mutex_lock(&hyperhold.init_lock);
221	if (write && hyperhold.inited) {
222		pr_err("hyperhold device is busy!\n");
223		ret = -EBUSY;
224		goto unlock;
225	}
226	ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
227unlock:
228	mutex_unlock(&hyperhold.init_lock);
229
230	return ret;
231}
232
233static struct ctl_table_header *hp_sysctl_header;
234
235#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
236static struct ctl_table hp_sys_table[] = {
237	{
238		.procname = "enable",
239		.mode = 0666,
240		.proc_handler = enable_sysctl_handler,
241	},
242	{
243		.procname = "device",
244		.data = &hyperhold.device_name,
245		.maxlen = sizeof(hyperhold.device_name),
246		.mode = 0644,
247		.proc_handler = device_sysctl_handler,
248	},
249	{
250		.procname = "extent_size",
251		.data = &hyperhold.extent_size,
252		.maxlen = sizeof(hyperhold.extent_size),
253		.mode = 0644,
254		.proc_handler = extent_sysctl_handler,
255	},
256	{
257		.procname = "soft_crypt",
258		.data = &hyperhold.enable_soft_crypt,
259		.maxlen = sizeof(hyperhold.enable_soft_crypt),
260		.mode = 0644,
261		.proc_handler = crypto_sysctl_handler,
262		.extra1 = SYSCTL_ZERO,
263		.extra2 = SYSCTL_ONE,
264	},
265	{}
266};
267#else
268static struct ctl_table hp_table[] = {
269	{
270		.procname = "enable",
271		.mode = 0666,
272		.proc_handler = enable_sysctl_handler,
273	},
274	{
275		.procname = "device",
276		.data = &hyperhold.device_name,
277		.maxlen = sizeof(hyperhold.device_name),
278		.mode = 0644,
279		.proc_handler = device_sysctl_handler,
280	},
281	{
282		.procname = "extent_size",
283		.data = &hyperhold.extent_size,
284		.maxlen = sizeof(hyperhold.extent_size),
285		.mode = 0644,
286		.proc_handler = extent_sysctl_handler,
287	},
288	{
289		.procname = "soft_crypt",
290		.data = &hyperhold.enable_soft_crypt,
291		.maxlen = sizeof(hyperhold.enable_soft_crypt),
292		.mode = 0644,
293		.proc_handler = crypto_sysctl_handler,
294		.extra1 = SYSCTL_ZERO,
295		.extra2 = SYSCTL_ONE,
296	},
297	{}
298};
299static struct ctl_table hp_kernel_table[] = {
300	{
301		.procname = "hyperhold",
302		.mode = 0555,
303		.child = hp_table,
304	},
305	{}
306};
307static struct ctl_table hp_sys_table[] = {
308	{
309		.procname = "kernel",
310		.mode = 0555,
311		.child = hp_kernel_table,
312	},
313	{}
314};
315#endif
316
317bool is_hyperhold_enable(void)
318{
319	return hyperhold.enable;
320}
321
322static int __init hyperhold_init(void)
323{
324	strcpy(hyperhold.device_name, HP_DFLT_DEVICE);
325	hyperhold.extent_size = HP_DFLT_EXT_SIZE;
326	hyperhold.enable_soft_crypt = 1;
327	mutex_init(&hyperhold.init_lock);
328#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
329	hp_sysctl_header = register_sysctl("kernel/hyperhold", hp_sys_table);
330#else
331	hp_sysctl_header = register_sysctl_table(hp_sys_table);
332#endif
333	if (!hp_sysctl_header) {
334		pr_err("register hyperhold sysctl table failed!\n");
335		return -EINVAL;
336	}
337
338	return 0;
339}
340
341static void __exit hyperhold_exit(void)
342{
343	unregister_sysctl_table(hp_sysctl_header);
344	hyperhold_disable(true);
345}
346
347static struct hp_space *space_of(u32 eid)
348{
349	return &hyperhold.spc;
350}
351
352/* replace this func for multi devices */
353static struct hp_device *device_of(u32 eid)
354{
355	return &hyperhold.dev;
356}
357
358/* replace this func for multi devices */
359u32 hyperhold_nr_extent(void)
360{
361	if (!CHECK_INITED)
362		return 0;
363
364	return hyperhold.spc.nr_ext;
365}
366EXPORT_SYMBOL(hyperhold_nr_extent);
367
368u32 hyperhold_extent_size(u32 eid)
369{
370	struct hp_space *spc = NULL;
371
372	if (!CHECK_INITED)
373		return 0;
374	spc = space_of(eid);
375	if (!CHECK(spc, "invalid eid %u!\n", eid))
376		return 0;
377
378	return spc->ext_size;
379}
380EXPORT_SYMBOL(hyperhold_extent_size);
381
382/* replace this func for multi devices */
383long hyperhold_address(u32 eid, u32 offset)
384{
385	struct hp_space *spc = NULL;
386
387	if (!CHECK_INITED)
388		return -EINVAL;
389	spc = space_of(eid);
390	if (!CHECK(spc, "invalid eid %u!\n", eid))
391		return -EINVAL;
392	if (!CHECK_BOUND(offset, 0, spc->ext_size - 1))
393		return -EINVAL;
394
395	return (u64)eid * spc->ext_size + offset;
396}
397EXPORT_SYMBOL(hyperhold_address);
398
399/* replace this func for multi devices */
400int hyperhold_addr_extent(u64 addr)
401{
402	struct hp_space *spc = NULL;
403	u32 eid;
404
405	if (!CHECK_INITED)
406		return -EINVAL;
407	eid = div_u64(addr, hyperhold.spc.ext_size);
408	spc = space_of(eid);
409	if (!CHECK(spc, "invalid eid %u!\n", eid))
410		return -EINVAL;
411
412	return eid;
413}
414EXPORT_SYMBOL(hyperhold_addr_extent);
415
416/* replace this func for multi devices */
417int hyperhold_addr_offset(u64 addr)
418{
419	if (!CHECK_INITED)
420		return -EINVAL;
421
422	return do_div(addr, hyperhold.spc.ext_size);
423}
424EXPORT_SYMBOL(hyperhold_addr_offset);
425
426/* replace this func for multi devices */
427int hyperhold_alloc_extent(void)
428{
429	if (!CHECK_ENABLE)
430		return -EINVAL;
431
432	return alloc_eid(&hyperhold.spc);
433}
434EXPORT_SYMBOL(hyperhold_alloc_extent);
435
436void hyperhold_free_extent(u32 eid)
437{
438	struct hp_space *spc = NULL;
439
440	if (!CHECK_INITED)
441		return;
442	spc = space_of(eid);
443	if (!CHECK(spc, "invalid eid %u!\n", eid))
444		return;
445
446	free_eid(spc, eid);
447}
448EXPORT_SYMBOL(hyperhold_free_extent);
449
450void hyperhold_should_free_extent(u32 eid)
451{
452	struct hpio *hpio = NULL;
453	struct hp_space *spc = NULL;
454
455	if (!CHECK_INITED)
456		return;
457	spc = space_of(eid);
458	if (!CHECK(spc, "invalid eid %u", eid))
459		return;
460
461	hpio = hpio_get(eid);
462	if (!hpio) {
463		free_eid(spc, eid);
464		return;
465	}
466	hpio->free_extent = hyperhold_free_extent;
467	hpio_put(hpio);
468}
469EXPORT_SYMBOL(hyperhold_should_free_extent);
470
471/*
472 * alloc hpio struct for r/w extent at @eid, will fill hpio with new alloced
473 * pages if @new_page. @return NULL on fail.
474 */
475struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page)
476{
477	struct hpio *hpio = NULL;
478	struct hp_space *spc;
479	u32 nr_page;
480
481	if (!CHECK_ENABLE)
482		return NULL;
483	spc = space_of(eid);
484	if (!CHECK(spc, "invalid eid  %u!\n", eid))
485		return NULL;
486
487	nr_page = spc->ext_size / PAGE_SIZE;
488	hpio = hpio_alloc(nr_page, gfp, op, new_page);
489	if (!hpio)
490		goto err;
491	hpio->eid = eid;
492
493	return hpio;
494err:
495	hpio_free(hpio);
496
497	return NULL;
498}
499EXPORT_SYMBOL(hyperhold_io_alloc);
500
501void hyperhold_io_free(struct hpio *hpio)
502{
503	if (!CHECK_INITED)
504		return;
505	if (!CHECK(hpio, "hpio is null!\n"))
506		return;
507
508	hpio_free(hpio);
509}
510EXPORT_SYMBOL(hyperhold_io_free);
511
512/*
513 * find exist read hpio of the extent @eid in iotab and inc its refcnt,
514 * alloc a new hpio and insert it into iotab if there is no hpio for @eid
515 */
516struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op)
517{
518	struct hp_space *spc = NULL;
519	u32 nr_page;
520
521	if (!CHECK_INITED)
522		return NULL;
523	spc = space_of(eid);
524	if (!CHECK(spc, "invalid eid %u", eid))
525		return NULL;
526
527	nr_page = spc->ext_size / PAGE_SIZE;
528	return hpio_get_alloc(eid, nr_page, gfp, op);
529}
530EXPORT_SYMBOL(hyperhold_io_get);
531
532bool hyperhold_io_put(struct hpio *hpio)
533{
534	if (!CHECK_INITED)
535		return false;
536	if (!CHECK(hpio, "hpio is null!\n"))
537		return false;
538
539	return hpio_put(hpio);
540}
541EXPORT_SYMBOL(hyperhold_io_put);
542
543/*
544 * notify all threads waiting for this hpio
545 */
546void hyperhold_io_complete(struct hpio *hpio)
547{
548	if (!CHECK_INITED)
549		return;
550	if (!CHECK(hpio, "hpio is null!\n"))
551		return;
552
553	hpio_complete(hpio);
554}
555EXPORT_SYMBOL(hyperhold_io_complete);
556
557void hyperhold_io_wait(struct hpio *hpio)
558{
559	if (!CHECK_INITED)
560		return;
561	if (!CHECK(hpio, "hpio is null!\n"))
562		return;
563
564	hpio_wait(hpio);
565}
566EXPORT_SYMBOL(hyperhold_io_wait);
567
568bool hyperhold_io_success(struct hpio *hpio)
569{
570	if (!CHECK_INITED)
571		return false;
572	if (!CHECK(hpio, "hpio is null!\n"))
573		return false;
574
575	return hpio_get_state(hpio) == HPIO_DONE;
576}
577EXPORT_SYMBOL(hyperhold_io_success);
578
579int hyperhold_io_extent(struct hpio *hpio)
580{
581	if (!CHECK_INITED)
582		return -EINVAL;
583	if (!CHECK(hpio, "hpio is null!\n"))
584		return -EINVAL;
585
586	return hpio->eid;
587}
588EXPORT_SYMBOL(hyperhold_io_extent);
589
590int hyperhold_io_operate(struct hpio *hpio)
591{
592	if (!CHECK_INITED)
593		return -EINVAL;
594	if (!CHECK(hpio, "hpio is null!\n"))
595		return -EINVAL;
596
597	return hpio->op;
598}
599EXPORT_SYMBOL(hyperhold_io_operate);
600
601struct page *hyperhold_io_page(struct hpio *hpio, u32 index)
602{
603	if (!CHECK_INITED)
604		return NULL;
605	if (!CHECK(hpio, "hpio is null!\n"))
606		return NULL;
607	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
608		return NULL;
609
610	return hpio->pages[index];
611}
612EXPORT_SYMBOL(hyperhold_io_page);
613
614bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page)
615{
616	if (!CHECK_INITED)
617		return false;
618	if (!CHECK(hpio, "hpio is null!\n"))
619		return false;
620	if (!CHECK(page, "page is null!\n"))
621		return false;
622	if (!CHECK_BOUND(index, 0, hpio->nr_page - 1))
623		return false;
624
625	get_page(page);
626	atomic64_add(PAGE_SIZE, &mem_used);
627	BUG_ON(hpio->pages[index]);
628	hpio->pages[index] = page;
629
630	return true;
631}
632EXPORT_SYMBOL(hyperhold_io_add_page);
633
634u32 hyperhold_io_nr_page(struct hpio *hpio)
635{
636	if (!CHECK_INITED)
637		return 0;
638	if (!CHECK(hpio, "hpio is null!\n"))
639		return 0;
640
641	return hpio->nr_page;
642}
643EXPORT_SYMBOL(hyperhold_io_nr_page);
644
645void *hyperhold_io_private(struct hpio *hpio)
646{
647	if (!CHECK_INITED)
648		return NULL;
649	if (!CHECK(hpio, "hpio is null!\n"))
650		return NULL;
651
652	return hpio->private;
653}
654EXPORT_SYMBOL(hyperhold_io_private);
655
656static struct page *get_encrypted_page(struct hp_device *dev, struct page *page, unsigned int op)
657{
658	struct page *encrypted_page = NULL;
659
660	if (!dev->ctfm) {
661		encrypted_page = page;
662		get_page(encrypted_page);
663		goto out;
664	}
665
666	encrypted_page = alloc_page(GFP_NOIO);
667	if (!encrypted_page) {
668		pr_err("alloc encrypted page failed!\n");
669		goto out;
670	}
671	encrypted_page->index = page->index;
672
673	/* just alloc a new page for read */
674	if (!op_is_write(op))
675		goto out;
676
677	/* encrypt page for write */
678	if (soft_crypt_page(dev->ctfm, encrypted_page, page, HP_DEV_ENCRYPT)) {
679		put_page(encrypted_page);
680		encrypted_page = NULL;
681	}
682out:
683	return encrypted_page;
684}
685
686static void put_encrypted_pages(struct bio *bio)
687{
688	struct bio_vec *bv = NULL;
689	struct bvec_iter_all iter;
690
691	bio_for_each_segment_all(bv, bio, iter)
692		put_page(bv->bv_page);
693}
694
695static void hp_endio_work(struct work_struct *work)
696{
697	struct hpio *hpio = container_of(work, struct hpio, endio_work);
698	struct hp_device *dev = NULL;
699	struct bio_vec *bv = NULL;
700	struct bvec_iter_all iter;
701	struct page *page = NULL;
702	u32 ext_size;
703	sector_t sec;
704	int i;
705
706	if (op_is_write(hpio->op))
707		goto endio;
708	ext_size = space_of(hpio->eid)->ext_size;
709	dev = device_of(hpio->eid);
710	sec = hpio->eid * ext_size / dev->sec_size;
711	i = 0;
712	bio_for_each_segment_all(bv, hpio->bio, iter) {
713		page = bv->bv_page;
714		BUG_ON(i >= hpio->nr_page);
715		BUG_ON(!hpio->pages[i]);
716		if (dev->ctfm)
717			BUG_ON(soft_crypt_page(dev->ctfm, hpio->pages[i], page, HP_DEV_DECRYPT));
718		sec += PAGE_SIZE / dev->sec_size;
719		i++;
720	}
721endio:
722	put_encrypted_pages(hpio->bio);
723	bio_put(hpio->bio);
724	if (hpio->endio)
725		hpio->endio(hpio);
726}
727
728static void hpio_endio(struct bio *bio)
729{
730	struct hpio *hpio = bio->bi_private;
731	struct workqueue_struct *wq = NULL;
732
733	pr_info("hpio %p for eid %u returned %d.\n",
734			hpio, hpio->eid, bio->bi_status);
735	hpio_set_state(hpio, bio->bi_status ? HPIO_FAIL : HPIO_DONE);
736	wq = op_is_write(hpio->op) ? hyperhold.write_wq : hyperhold.read_wq;
737	queue_work(wq, &hpio->endio_work);
738	atomic64_sub(sizeof(struct bio), &mem_used);
739}
740
741static int hpio_submit(struct hpio *hpio)
742{
743	struct hp_device *dev = NULL;
744	struct bio *bio = NULL;
745	struct page *page = NULL;
746	u32 ext_size;
747	sector_t sec;
748	int i;
749
750#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
751	dev = device_of(hpio->eid);
752	bio = bio_alloc(dev->bdev, BIO_MAX_VECS,
753				 hpio->op, GFP_NOIO);
754#else
755	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
756#endif
757	if (!bio) {
758		pr_err("bio alloc failed!\n");
759		return -ENOMEM;
760	}
761	atomic64_add(sizeof(struct bio), &mem_used);
762
763#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
764	bio->bi_opf = hpio->op;
765#else
766	dev = device_of(hpio->eid);
767	bio_set_op_attrs(bio, hpio->op, 0);
768#endif
769	bio_set_dev(bio, dev->bdev);
770
771	ext_size = space_of(hpio->eid)->ext_size;
772	sec = div_u64((u64)hpio->eid * ext_size, dev->sec_size);
773	bio->bi_iter.bi_sector = sec;
774	for (i = 0; i < hpio->nr_page; i++) {
775		if (!hpio->pages[i])
776			break;
777		hpio->pages[i]->index = sec;
778		page = get_encrypted_page(dev, hpio->pages[i], hpio->op);
779		if (!page)
780			goto err;
781		if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
782			put_page(page);
783			goto err;
784		}
785		sec += PAGE_SIZE / dev->sec_size;
786	}
787
788	if (dev->blk_key)
789		inline_crypt_bio(dev->blk_key, bio);
790	bio->bi_private = hpio;
791	bio->bi_end_io = hpio_endio;
792	hpio->bio = bio;
793	submit_bio(bio);
794	pr_info("submit hpio %p for eid %u.\n", hpio, hpio->eid);
795
796	return 0;
797err:
798	put_encrypted_pages(bio);
799	bio_put(bio);
800	atomic64_sub(sizeof(struct bio), &mem_used);
801	return -EIO;
802}
803
804static int rw_extent_async(struct hpio *hpio, hp_endio endio, void *priv, unsigned int op)
805{
806	int ret = 0;
807
808	if (!hpio_change_state(hpio, HPIO_INIT, HPIO_SUBMIT))
809		return -EAGAIN;
810
811	hpio->private = priv;
812	hpio->endio = endio;
813	INIT_WORK(&hpio->endio_work, hp_endio_work);
814
815	ret = hpio_submit(hpio);
816	if (ret) {
817		hpio_set_state(hpio, HPIO_FAIL);
818		hpio_complete(hpio);
819	}
820
821	return ret;
822}
823
824int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv)
825{
826	if (!CHECK_ENABLE) {
827		hpio_set_state(hpio, HPIO_FAIL);
828		hpio_complete(hpio);
829		return -EINVAL;
830	}
831
832	BUG_ON(!op_is_write(hpio->op));
833
834	return rw_extent_async(hpio, endio, priv, REQ_OP_WRITE);
835}
836EXPORT_SYMBOL(hyperhold_write_async);
837
838int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv)
839{
840	if (!CHECK_INITED) {
841		hpio_set_state(hpio, HPIO_FAIL);
842		hpio_complete(hpio);
843		return -EINVAL;
844	}
845
846	if (op_is_write(hpio->op))
847		return -EAGAIN;
848
849	return rw_extent_async(hpio, endio, priv, REQ_OP_READ);
850}
851EXPORT_SYMBOL(hyperhold_read_async);
852
853module_init(hyperhold_init)
854module_exit(hyperhold_exit)
855