1 /*
2  * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3  * Copyright (C) 2016-2017 Milan Broz
4  * Copyright (C) 2016-2017 Mikulas Patocka
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bio-record.h"
10 
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
25 
26 #define DM_MSG_PREFIX "integrity"
27 
28 #define DEFAULT_INTERLEAVE_SECTORS	32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR	7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT	32768
31 #define DEFAULT_BUFFER_SECTORS		128
32 #define DEFAULT_JOURNAL_WATERMARK	50
33 #define DEFAULT_SYNC_MSEC		10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS	(IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
35 #define MIN_LOG2_INTERLEAVE_SECTORS	3
36 #define MAX_LOG2_INTERLEAVE_SECTORS	31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE	16
38 #define RECALC_SECTORS			(IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
39 #define RECALC_WRITE_SUPER		16
40 #define BITMAP_BLOCK_SIZE		4096	/* don't change it */
41 #define BITMAP_FLUSH_INTERVAL		(10 * HZ)
42 #define DISCARD_FILLER			0xf6
43 
44 /*
45  * Warning - DEBUG_PRINT prints security-sensitive data to the log,
46  * so it should not be enabled in the official kernel
47  */
48 //#define DEBUG_PRINT
49 //#define INTERNAL_VERIFY
50 
51 /*
52  * On disk structures
53  */
54 
55 #define SB_MAGIC			"integrt"
56 #define SB_VERSION_1			1
57 #define SB_VERSION_2			2
58 #define SB_VERSION_3			3
59 #define SB_VERSION_4			4
60 #define SB_SECTORS			8
61 #define MAX_SECTORS_PER_BLOCK		8
62 
63 struct superblock {
64 	__u8 magic[8];
65 	__u8 version;
66 	__u8 log2_interleave_sectors;
67 	__u16 integrity_tag_size;
68 	__u32 journal_sections;
69 	__u64 provided_data_sectors;	/* userspace uses this value */
70 	__u32 flags;
71 	__u8 log2_sectors_per_block;
72 	__u8 log2_blocks_per_bitmap_bit;
73 	__u8 pad[2];
74 	__u64 recalc_sector;
75 };
76 
77 #define SB_FLAG_HAVE_JOURNAL_MAC	0x1
78 #define SB_FLAG_RECALCULATING		0x2
79 #define SB_FLAG_DIRTY_BITMAP		0x4
80 #define SB_FLAG_FIXED_PADDING		0x8
81 
82 #define	JOURNAL_ENTRY_ROUNDUP		8
83 
84 typedef __u64 commit_id_t;
85 #define JOURNAL_MAC_PER_SECTOR		8
86 
87 struct journal_entry {
88 	union {
89 		struct {
90 			__u32 sector_lo;
91 			__u32 sector_hi;
92 		} s;
93 		__u64 sector;
94 	} u;
95 	commit_id_t last_bytes[];
96 	/* __u8 tag[0]; */
97 };
98 
99 #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
100 
101 #if BITS_PER_LONG == 64
102 #define journal_entry_set_sector(je, x)		do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
103 #else
104 #define journal_entry_set_sector(je, x)		do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
105 #endif
106 #define journal_entry_get_sector(je)		le64_to_cpu((je)->u.sector)
107 #define journal_entry_is_unused(je)		((je)->u.s.sector_hi == cpu_to_le32(-1))
108 #define journal_entry_set_unused(je)		do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
109 #define journal_entry_is_inprogress(je)		((je)->u.s.sector_hi == cpu_to_le32(-2))
110 #define journal_entry_set_inprogress(je)	do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
111 
112 #define JOURNAL_BLOCK_SECTORS		8
113 #define JOURNAL_SECTOR_DATA		((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
114 #define JOURNAL_MAC_SIZE		(JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
115 
116 struct journal_sector {
117 	__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
118 	__u8 mac[JOURNAL_MAC_PER_SECTOR];
119 	commit_id_t commit_id;
120 };
121 
122 #define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
123 
124 #define METADATA_PADDING_SECTORS	8
125 
126 #define N_COMMIT_IDS			4
127 
prev_commit_seq(unsigned char seq)128 static unsigned char prev_commit_seq(unsigned char seq)
129 {
130 	return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
131 }
132 
next_commit_seq(unsigned char seq)133 static unsigned char next_commit_seq(unsigned char seq)
134 {
135 	return (seq + 1) % N_COMMIT_IDS;
136 }
137 
138 /*
139  * In-memory structures
140  */
141 
142 struct journal_node {
143 	struct rb_node node;
144 	sector_t sector;
145 };
146 
147 struct alg_spec {
148 	char *alg_string;
149 	char *key_string;
150 	__u8 *key;
151 	unsigned key_size;
152 };
153 
154 struct dm_integrity_c {
155 	struct dm_dev *dev;
156 	struct dm_dev *meta_dev;
157 	unsigned tag_size;
158 	__s8 log2_tag_size;
159 	sector_t start;
160 	mempool_t journal_io_mempool;
161 	struct dm_io_client *io;
162 	struct dm_bufio_client *bufio;
163 	struct workqueue_struct *metadata_wq;
164 	struct superblock *sb;
165 	unsigned journal_pages;
166 	unsigned n_bitmap_blocks;
167 
168 	struct page_list *journal;
169 	struct page_list *journal_io;
170 	struct page_list *journal_xor;
171 	struct page_list *recalc_bitmap;
172 	struct page_list *may_write_bitmap;
173 	struct bitmap_block_status *bbs;
174 	unsigned bitmap_flush_interval;
175 	int synchronous_mode;
176 	struct bio_list synchronous_bios;
177 	struct delayed_work bitmap_flush_work;
178 
179 	struct crypto_skcipher *journal_crypt;
180 	struct scatterlist **journal_scatterlist;
181 	struct scatterlist **journal_io_scatterlist;
182 	struct skcipher_request **sk_requests;
183 
184 	struct crypto_shash *journal_mac;
185 
186 	struct journal_node *journal_tree;
187 	struct rb_root journal_tree_root;
188 
189 	sector_t provided_data_sectors;
190 
191 	unsigned short journal_entry_size;
192 	unsigned char journal_entries_per_sector;
193 	unsigned char journal_section_entries;
194 	unsigned short journal_section_sectors;
195 	unsigned journal_sections;
196 	unsigned journal_entries;
197 	sector_t data_device_sectors;
198 	sector_t meta_device_sectors;
199 	unsigned initial_sectors;
200 	unsigned metadata_run;
201 	__s8 log2_metadata_run;
202 	__u8 log2_buffer_sectors;
203 	__u8 sectors_per_block;
204 	__u8 log2_blocks_per_bitmap_bit;
205 
206 	unsigned char mode;
207 
208 	int failed;
209 
210 	struct crypto_shash *internal_hash;
211 
212 	struct dm_target *ti;
213 
214 	/* these variables are locked with endio_wait.lock */
215 	struct rb_root in_progress;
216 	struct list_head wait_list;
217 	wait_queue_head_t endio_wait;
218 	struct workqueue_struct *wait_wq;
219 	struct workqueue_struct *offload_wq;
220 
221 	unsigned char commit_seq;
222 	commit_id_t commit_ids[N_COMMIT_IDS];
223 
224 	unsigned committed_section;
225 	unsigned n_committed_sections;
226 
227 	unsigned uncommitted_section;
228 	unsigned n_uncommitted_sections;
229 
230 	unsigned free_section;
231 	unsigned char free_section_entry;
232 	unsigned free_sectors;
233 
234 	unsigned free_sectors_threshold;
235 
236 	struct workqueue_struct *commit_wq;
237 	struct work_struct commit_work;
238 
239 	struct workqueue_struct *writer_wq;
240 	struct work_struct writer_work;
241 
242 	struct workqueue_struct *recalc_wq;
243 	struct work_struct recalc_work;
244 	u8 *recalc_buffer;
245 	u8 *recalc_tags;
246 
247 	struct bio_list flush_bio_list;
248 
249 	unsigned long autocommit_jiffies;
250 	struct timer_list autocommit_timer;
251 	unsigned autocommit_msec;
252 
253 	wait_queue_head_t copy_to_journal_wait;
254 
255 	struct completion crypto_backoff;
256 
257 	bool wrote_to_journal;
258 	bool journal_uptodate;
259 	bool just_formatted;
260 	bool recalculate_flag;
261 	bool discard;
262 	bool fix_padding;
263 	bool legacy_recalculate;
264 
265 	struct alg_spec internal_hash_alg;
266 	struct alg_spec journal_crypt_alg;
267 	struct alg_spec journal_mac_alg;
268 
269 	atomic64_t number_of_mismatches;
270 
271 	struct notifier_block reboot_notifier;
272 };
273 
274 struct dm_integrity_range {
275 	sector_t logical_sector;
276 	sector_t n_sectors;
277 	bool waiting;
278 	union {
279 		struct rb_node node;
280 		struct {
281 			struct task_struct *task;
282 			struct list_head wait_entry;
283 		};
284 	};
285 };
286 
287 struct dm_integrity_io {
288 	struct work_struct work;
289 
290 	struct dm_integrity_c *ic;
291 	enum req_opf op;
292 	bool fua;
293 
294 	struct dm_integrity_range range;
295 
296 	sector_t metadata_block;
297 	unsigned metadata_offset;
298 
299 	atomic_t in_flight;
300 	blk_status_t bi_status;
301 
302 	struct completion *completion;
303 
304 	struct dm_bio_details bio_details;
305 };
306 
307 struct journal_completion {
308 	struct dm_integrity_c *ic;
309 	atomic_t in_flight;
310 	struct completion comp;
311 };
312 
313 struct journal_io {
314 	struct dm_integrity_range range;
315 	struct journal_completion *comp;
316 };
317 
318 struct bitmap_block_status {
319 	struct work_struct work;
320 	struct dm_integrity_c *ic;
321 	unsigned idx;
322 	unsigned long *bitmap;
323 	struct bio_list bio_queue;
324 	spinlock_t bio_queue_lock;
325 
326 };
327 
328 static struct kmem_cache *journal_io_cache;
329 
330 #define JOURNAL_IO_MEMPOOL	32
331 
332 #ifdef DEBUG_PRINT
333 #define DEBUG_print(x, ...)	printk(KERN_DEBUG x, ##__VA_ARGS__)
__DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)334 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
335 {
336 	va_list args;
337 	va_start(args, msg);
338 	vprintk(msg, args);
339 	va_end(args);
340 	if (len)
341 		pr_cont(":");
342 	while (len) {
343 		pr_cont(" %02x", *bytes);
344 		bytes++;
345 		len--;
346 	}
347 	pr_cont("\n");
348 }
349 #define DEBUG_bytes(bytes, len, msg, ...)	__DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
350 #else
351 #define DEBUG_print(x, ...)			do { } while (0)
352 #define DEBUG_bytes(bytes, len, msg, ...)	do { } while (0)
353 #endif
354 
dm_integrity_prepare(struct request *rq)355 static void dm_integrity_prepare(struct request *rq)
356 {
357 }
358 
dm_integrity_complete(struct request *rq, unsigned int nr_bytes)359 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
360 {
361 }
362 
363 /*
364  * DM Integrity profile, protection is performed layer above (dm-crypt)
365  */
366 static const struct blk_integrity_profile dm_integrity_profile = {
367 	.name			= "DM-DIF-EXT-TAG",
368 	.generate_fn		= NULL,
369 	.verify_fn		= NULL,
370 	.prepare_fn		= dm_integrity_prepare,
371 	.complete_fn		= dm_integrity_complete,
372 };
373 
374 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
375 static void integrity_bio_wait(struct work_struct *w);
376 static void dm_integrity_dtr(struct dm_target *ti);
377 
dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)378 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
379 {
380 	if (err == -EILSEQ)
381 		atomic64_inc(&ic->number_of_mismatches);
382 	if (!cmpxchg(&ic->failed, 0, err))
383 		DMERR("Error on %s: %d", msg, err);
384 }
385 
dm_integrity_failed(struct dm_integrity_c *ic)386 static int dm_integrity_failed(struct dm_integrity_c *ic)
387 {
388 	return READ_ONCE(ic->failed);
389 }
390 
dm_integrity_disable_recalculate(struct dm_integrity_c *ic)391 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
392 {
393 	if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
394 	    !ic->legacy_recalculate)
395 		return true;
396 	return false;
397 }
398 
dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, unsigned j, unsigned char seq)399 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
400 					  unsigned j, unsigned char seq)
401 {
402 	/*
403 	 * Xor the number with section and sector, so that if a piece of
404 	 * journal is written at wrong place, it is detected.
405 	 */
406 	return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
407 }
408 
get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, sector_t *area, sector_t *offset)409 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
410 				sector_t *area, sector_t *offset)
411 {
412 	if (!ic->meta_dev) {
413 		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
414 		*area = data_sector >> log2_interleave_sectors;
415 		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
416 	} else {
417 		*area = 0;
418 		*offset = data_sector;
419 	}
420 }
421 
422 #define sector_to_block(ic, n)						\
423 do {									\
424 	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
425 	(n) >>= (ic)->sb->log2_sectors_per_block;			\
426 } while (0)
427 
get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, sector_t offset, unsigned *metadata_offset)428 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
429 					    sector_t offset, unsigned *metadata_offset)
430 {
431 	__u64 ms;
432 	unsigned mo;
433 
434 	ms = area << ic->sb->log2_interleave_sectors;
435 	if (likely(ic->log2_metadata_run >= 0))
436 		ms += area << ic->log2_metadata_run;
437 	else
438 		ms += area * ic->metadata_run;
439 	ms >>= ic->log2_buffer_sectors;
440 
441 	sector_to_block(ic, offset);
442 
443 	if (likely(ic->log2_tag_size >= 0)) {
444 		ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
445 		mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
446 	} else {
447 		ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
448 		mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
449 	}
450 	*metadata_offset = mo;
451 	return ms;
452 }
453 
get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)454 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
455 {
456 	sector_t result;
457 
458 	if (ic->meta_dev)
459 		return offset;
460 
461 	result = area << ic->sb->log2_interleave_sectors;
462 	if (likely(ic->log2_metadata_run >= 0))
463 		result += (area + 1) << ic->log2_metadata_run;
464 	else
465 		result += (area + 1) * ic->metadata_run;
466 
467 	result += (sector_t)ic->initial_sectors + offset;
468 	result += ic->start;
469 
470 	return result;
471 }
472 
wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)473 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
474 {
475 	if (unlikely(*sec_ptr >= ic->journal_sections))
476 		*sec_ptr -= ic->journal_sections;
477 }
478 
sb_set_version(struct dm_integrity_c *ic)479 static void sb_set_version(struct dm_integrity_c *ic)
480 {
481 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
482 		ic->sb->version = SB_VERSION_4;
483 	else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
484 		ic->sb->version = SB_VERSION_3;
485 	else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
486 		ic->sb->version = SB_VERSION_2;
487 	else
488 		ic->sb->version = SB_VERSION_1;
489 }
490 
sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)491 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
492 {
493 	struct dm_io_request io_req;
494 	struct dm_io_region io_loc;
495 
496 	io_req.bi_op = op;
497 	io_req.bi_op_flags = op_flags;
498 	io_req.mem.type = DM_IO_KMEM;
499 	io_req.mem.ptr.addr = ic->sb;
500 	io_req.notify.fn = NULL;
501 	io_req.client = ic->io;
502 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
503 	io_loc.sector = ic->start;
504 	io_loc.count = SB_SECTORS;
505 
506 	if (op == REQ_OP_WRITE)
507 		sb_set_version(ic);
508 
509 	return dm_io(&io_req, 1, &io_loc, NULL);
510 }
511 
512 #define BITMAP_OP_TEST_ALL_SET		0
513 #define BITMAP_OP_TEST_ALL_CLEAR	1
514 #define BITMAP_OP_SET			2
515 #define BITMAP_OP_CLEAR			3
516 
block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, sector_t sector, sector_t n_sectors, int mode)517 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
518 			    sector_t sector, sector_t n_sectors, int mode)
519 {
520 	unsigned long bit, end_bit, this_end_bit, page, end_page;
521 	unsigned long *data;
522 
523 	if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
524 		DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
525 			sector,
526 			n_sectors,
527 			ic->sb->log2_sectors_per_block,
528 			ic->log2_blocks_per_bitmap_bit,
529 			mode);
530 		BUG();
531 	}
532 
533 	if (unlikely(!n_sectors))
534 		return true;
535 
536 	bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
537 	end_bit = (sector + n_sectors - 1) >>
538 		(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
539 
540 	page = bit / (PAGE_SIZE * 8);
541 	bit %= PAGE_SIZE * 8;
542 
543 	end_page = end_bit / (PAGE_SIZE * 8);
544 	end_bit %= PAGE_SIZE * 8;
545 
546 repeat:
547 	if (page < end_page) {
548 		this_end_bit = PAGE_SIZE * 8 - 1;
549 	} else {
550 		this_end_bit = end_bit;
551 	}
552 
553 	data = lowmem_page_address(bitmap[page].page);
554 
555 	if (mode == BITMAP_OP_TEST_ALL_SET) {
556 		while (bit <= this_end_bit) {
557 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
558 				do {
559 					if (data[bit / BITS_PER_LONG] != -1)
560 						return false;
561 					bit += BITS_PER_LONG;
562 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
563 				continue;
564 			}
565 			if (!test_bit(bit, data))
566 				return false;
567 			bit++;
568 		}
569 	} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
570 		while (bit <= this_end_bit) {
571 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
572 				do {
573 					if (data[bit / BITS_PER_LONG] != 0)
574 						return false;
575 					bit += BITS_PER_LONG;
576 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
577 				continue;
578 			}
579 			if (test_bit(bit, data))
580 				return false;
581 			bit++;
582 		}
583 	} else if (mode == BITMAP_OP_SET) {
584 		while (bit <= this_end_bit) {
585 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
586 				do {
587 					data[bit / BITS_PER_LONG] = -1;
588 					bit += BITS_PER_LONG;
589 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
590 				continue;
591 			}
592 			__set_bit(bit, data);
593 			bit++;
594 		}
595 	} else if (mode == BITMAP_OP_CLEAR) {
596 		if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
597 			clear_page(data);
598 		else while (bit <= this_end_bit) {
599 			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
600 				do {
601 					data[bit / BITS_PER_LONG] = 0;
602 					bit += BITS_PER_LONG;
603 				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
604 				continue;
605 			}
606 			__clear_bit(bit, data);
607 			bit++;
608 		}
609 	} else {
610 		BUG();
611 	}
612 
613 	if (unlikely(page < end_page)) {
614 		bit = 0;
615 		page++;
616 		goto repeat;
617 	}
618 
619 	return true;
620 }
621 
block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)622 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
623 {
624 	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
625 	unsigned i;
626 
627 	for (i = 0; i < n_bitmap_pages; i++) {
628 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
629 		unsigned long *src_data = lowmem_page_address(src[i].page);
630 		copy_page(dst_data, src_data);
631 	}
632 }
633 
sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)634 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
635 {
636 	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
637 	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
638 
639 	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
640 	return &ic->bbs[bitmap_block];
641 }
642 
access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, bool e, const char *function)643 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
644 				 bool e, const char *function)
645 {
646 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
647 	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
648 
649 	if (unlikely(section >= ic->journal_sections) ||
650 	    unlikely(offset >= limit)) {
651 		DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
652 		       function, section, offset, ic->journal_sections, limit);
653 		BUG();
654 	}
655 #endif
656 }
657 
page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, unsigned *pl_index, unsigned *pl_offset)658 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
659 			       unsigned *pl_index, unsigned *pl_offset)
660 {
661 	unsigned sector;
662 
663 	access_journal_check(ic, section, offset, false, "page_list_location");
664 
665 	sector = section * ic->journal_section_sectors + offset;
666 
667 	*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
668 	*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
669 }
670 
access_page_list(struct dm_integrity_c *ic, struct page_list *pl, unsigned section, unsigned offset, unsigned *n_sectors)671 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
672 					       unsigned section, unsigned offset, unsigned *n_sectors)
673 {
674 	unsigned pl_index, pl_offset;
675 	char *va;
676 
677 	page_list_location(ic, section, offset, &pl_index, &pl_offset);
678 
679 	if (n_sectors)
680 		*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
681 
682 	va = lowmem_page_address(pl[pl_index].page);
683 
684 	return (struct journal_sector *)(va + pl_offset);
685 }
686 
access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)687 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
688 {
689 	return access_page_list(ic, ic->journal, section, offset, NULL);
690 }
691 
access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)692 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
693 {
694 	unsigned rel_sector, offset;
695 	struct journal_sector *js;
696 
697 	access_journal_check(ic, section, n, true, "access_journal_entry");
698 
699 	rel_sector = n % JOURNAL_BLOCK_SECTORS;
700 	offset = n / JOURNAL_BLOCK_SECTORS;
701 
702 	js = access_journal(ic, section, rel_sector);
703 	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
704 }
705 
access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)706 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
707 {
708 	n <<= ic->sb->log2_sectors_per_block;
709 
710 	n += JOURNAL_BLOCK_SECTORS;
711 
712 	access_journal_check(ic, section, n, false, "access_journal_data");
713 
714 	return access_journal(ic, section, n);
715 }
716 
section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])717 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
718 {
719 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
720 	int r;
721 	unsigned j, size;
722 
723 	desc->tfm = ic->journal_mac;
724 
725 	r = crypto_shash_init(desc);
726 	if (unlikely(r)) {
727 		dm_integrity_io_error(ic, "crypto_shash_init", r);
728 		goto err;
729 	}
730 
731 	for (j = 0; j < ic->journal_section_entries; j++) {
732 		struct journal_entry *je = access_journal_entry(ic, section, j);
733 		r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
734 		if (unlikely(r)) {
735 			dm_integrity_io_error(ic, "crypto_shash_update", r);
736 			goto err;
737 		}
738 	}
739 
740 	size = crypto_shash_digestsize(ic->journal_mac);
741 
742 	if (likely(size <= JOURNAL_MAC_SIZE)) {
743 		r = crypto_shash_final(desc, result);
744 		if (unlikely(r)) {
745 			dm_integrity_io_error(ic, "crypto_shash_final", r);
746 			goto err;
747 		}
748 		memset(result + size, 0, JOURNAL_MAC_SIZE - size);
749 	} else {
750 		__u8 digest[HASH_MAX_DIGESTSIZE];
751 
752 		if (WARN_ON(size > sizeof(digest))) {
753 			dm_integrity_io_error(ic, "digest_size", -EINVAL);
754 			goto err;
755 		}
756 		r = crypto_shash_final(desc, digest);
757 		if (unlikely(r)) {
758 			dm_integrity_io_error(ic, "crypto_shash_final", r);
759 			goto err;
760 		}
761 		memcpy(result, digest, JOURNAL_MAC_SIZE);
762 	}
763 
764 	return;
765 err:
766 	memset(result, 0, JOURNAL_MAC_SIZE);
767 }
768 
rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)769 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
770 {
771 	__u8 result[JOURNAL_MAC_SIZE];
772 	unsigned j;
773 
774 	if (!ic->journal_mac)
775 		return;
776 
777 	section_mac(ic, section, result);
778 
779 	for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
780 		struct journal_sector *js = access_journal(ic, section, j);
781 
782 		if (likely(wr))
783 			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
784 		else {
785 			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
786 				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
787 		}
788 	}
789 }
790 
complete_journal_op(void *context)791 static void complete_journal_op(void *context)
792 {
793 	struct journal_completion *comp = context;
794 	BUG_ON(!atomic_read(&comp->in_flight));
795 	if (likely(atomic_dec_and_test(&comp->in_flight)))
796 		complete(&comp->comp);
797 }
798 
xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, unsigned n_sections, struct journal_completion *comp)799 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
800 			unsigned n_sections, struct journal_completion *comp)
801 {
802 	struct async_submit_ctl submit;
803 	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
804 	unsigned pl_index, pl_offset, section_index;
805 	struct page_list *source_pl, *target_pl;
806 
807 	if (likely(encrypt)) {
808 		source_pl = ic->journal;
809 		target_pl = ic->journal_io;
810 	} else {
811 		source_pl = ic->journal_io;
812 		target_pl = ic->journal;
813 	}
814 
815 	page_list_location(ic, section, 0, &pl_index, &pl_offset);
816 
817 	atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
818 
819 	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
820 
821 	section_index = pl_index;
822 
823 	do {
824 		size_t this_step;
825 		struct page *src_pages[2];
826 		struct page *dst_page;
827 
828 		while (unlikely(pl_index == section_index)) {
829 			unsigned dummy;
830 			if (likely(encrypt))
831 				rw_section_mac(ic, section, true);
832 			section++;
833 			n_sections--;
834 			if (!n_sections)
835 				break;
836 			page_list_location(ic, section, 0, &section_index, &dummy);
837 		}
838 
839 		this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
840 		dst_page = target_pl[pl_index].page;
841 		src_pages[0] = source_pl[pl_index].page;
842 		src_pages[1] = ic->journal_xor[pl_index].page;
843 
844 		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
845 
846 		pl_index++;
847 		pl_offset = 0;
848 		n_bytes -= this_step;
849 	} while (n_bytes);
850 
851 	BUG_ON(n_sections);
852 
853 	async_tx_issue_pending_all();
854 }
855 
complete_journal_encrypt(struct crypto_async_request *req, int err)856 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
857 {
858 	struct journal_completion *comp = req->data;
859 	if (unlikely(err)) {
860 		if (likely(err == -EINPROGRESS)) {
861 			complete(&comp->ic->crypto_backoff);
862 			return;
863 		}
864 		dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
865 	}
866 	complete_journal_op(comp);
867 }
868 
do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)869 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
870 {
871 	int r;
872 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
873 				      complete_journal_encrypt, comp);
874 	if (likely(encrypt))
875 		r = crypto_skcipher_encrypt(req);
876 	else
877 		r = crypto_skcipher_decrypt(req);
878 	if (likely(!r))
879 		return false;
880 	if (likely(r == -EINPROGRESS))
881 		return true;
882 	if (likely(r == -EBUSY)) {
883 		wait_for_completion(&comp->ic->crypto_backoff);
884 		reinit_completion(&comp->ic->crypto_backoff);
885 		return true;
886 	}
887 	dm_integrity_io_error(comp->ic, "encrypt", r);
888 	return false;
889 }
890 
crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, unsigned n_sections, struct journal_completion *comp)891 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
892 			  unsigned n_sections, struct journal_completion *comp)
893 {
894 	struct scatterlist **source_sg;
895 	struct scatterlist **target_sg;
896 
897 	atomic_add(2, &comp->in_flight);
898 
899 	if (likely(encrypt)) {
900 		source_sg = ic->journal_scatterlist;
901 		target_sg = ic->journal_io_scatterlist;
902 	} else {
903 		source_sg = ic->journal_io_scatterlist;
904 		target_sg = ic->journal_scatterlist;
905 	}
906 
907 	do {
908 		struct skcipher_request *req;
909 		unsigned ivsize;
910 		char *iv;
911 
912 		if (likely(encrypt))
913 			rw_section_mac(ic, section, true);
914 
915 		req = ic->sk_requests[section];
916 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
917 		iv = req->iv;
918 
919 		memcpy(iv, iv + ivsize, ivsize);
920 
921 		req->src = source_sg[section];
922 		req->dst = target_sg[section];
923 
924 		if (unlikely(do_crypt(encrypt, req, comp)))
925 			atomic_inc(&comp->in_flight);
926 
927 		section++;
928 		n_sections--;
929 	} while (n_sections);
930 
931 	atomic_dec(&comp->in_flight);
932 	complete_journal_op(comp);
933 }
934 
encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, unsigned n_sections, struct journal_completion *comp)935 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
936 			    unsigned n_sections, struct journal_completion *comp)
937 {
938 	if (ic->journal_xor)
939 		return xor_journal(ic, encrypt, section, n_sections, comp);
940 	else
941 		return crypt_journal(ic, encrypt, section, n_sections, comp);
942 }
943 
complete_journal_io(unsigned long error, void *context)944 static void complete_journal_io(unsigned long error, void *context)
945 {
946 	struct journal_completion *comp = context;
947 	if (unlikely(error != 0))
948 		dm_integrity_io_error(comp->ic, "writing journal", -EIO);
949 	complete_journal_op(comp);
950 }
951 
rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, unsigned sector, unsigned n_sectors, struct journal_completion *comp)952 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
953 			       unsigned sector, unsigned n_sectors, struct journal_completion *comp)
954 {
955 	struct dm_io_request io_req;
956 	struct dm_io_region io_loc;
957 	unsigned pl_index, pl_offset;
958 	int r;
959 
960 	if (unlikely(dm_integrity_failed(ic))) {
961 		if (comp)
962 			complete_journal_io(-1UL, comp);
963 		return;
964 	}
965 
966 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
967 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
968 
969 	io_req.bi_op = op;
970 	io_req.bi_op_flags = op_flags;
971 	io_req.mem.type = DM_IO_PAGE_LIST;
972 	if (ic->journal_io)
973 		io_req.mem.ptr.pl = &ic->journal_io[pl_index];
974 	else
975 		io_req.mem.ptr.pl = &ic->journal[pl_index];
976 	io_req.mem.offset = pl_offset;
977 	if (likely(comp != NULL)) {
978 		io_req.notify.fn = complete_journal_io;
979 		io_req.notify.context = comp;
980 	} else {
981 		io_req.notify.fn = NULL;
982 	}
983 	io_req.client = ic->io;
984 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
985 	io_loc.sector = ic->start + SB_SECTORS + sector;
986 	io_loc.count = n_sectors;
987 
988 	r = dm_io(&io_req, 1, &io_loc, NULL);
989 	if (unlikely(r)) {
990 		dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
991 		if (comp) {
992 			WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
993 			complete_journal_io(-1UL, comp);
994 		}
995 	}
996 }
997 
rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, unsigned n_sections, struct journal_completion *comp)998 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
999 		       unsigned n_sections, struct journal_completion *comp)
1000 {
1001 	unsigned sector, n_sectors;
1002 
1003 	sector = section * ic->journal_section_sectors;
1004 	n_sectors = n_sections * ic->journal_section_sectors;
1005 
1006 	rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1007 }
1008 
write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)1009 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1010 {
1011 	struct journal_completion io_comp;
1012 	struct journal_completion crypt_comp_1;
1013 	struct journal_completion crypt_comp_2;
1014 	unsigned i;
1015 
1016 	io_comp.ic = ic;
1017 	init_completion(&io_comp.comp);
1018 
1019 	if (commit_start + commit_sections <= ic->journal_sections) {
1020 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1021 		if (ic->journal_io) {
1022 			crypt_comp_1.ic = ic;
1023 			init_completion(&crypt_comp_1.comp);
1024 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1025 			encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1026 			wait_for_completion_io(&crypt_comp_1.comp);
1027 		} else {
1028 			for (i = 0; i < commit_sections; i++)
1029 				rw_section_mac(ic, commit_start + i, true);
1030 		}
1031 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1032 			   commit_sections, &io_comp);
1033 	} else {
1034 		unsigned to_end;
1035 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1036 		to_end = ic->journal_sections - commit_start;
1037 		if (ic->journal_io) {
1038 			crypt_comp_1.ic = ic;
1039 			init_completion(&crypt_comp_1.comp);
1040 			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1041 			encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1042 			if (try_wait_for_completion(&crypt_comp_1.comp)) {
1043 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1044 				reinit_completion(&crypt_comp_1.comp);
1045 				crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1046 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1047 				wait_for_completion_io(&crypt_comp_1.comp);
1048 			} else {
1049 				crypt_comp_2.ic = ic;
1050 				init_completion(&crypt_comp_2.comp);
1051 				crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1052 				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1053 				wait_for_completion_io(&crypt_comp_1.comp);
1054 				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1055 				wait_for_completion_io(&crypt_comp_2.comp);
1056 			}
1057 		} else {
1058 			for (i = 0; i < to_end; i++)
1059 				rw_section_mac(ic, commit_start + i, true);
1060 			rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1061 			for (i = 0; i < commit_sections - to_end; i++)
1062 				rw_section_mac(ic, i, true);
1063 		}
1064 		rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1065 	}
1066 
1067 	wait_for_completion_io(&io_comp.comp);
1068 }
1069 
copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)1070 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1071 			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1072 {
1073 	struct dm_io_request io_req;
1074 	struct dm_io_region io_loc;
1075 	int r;
1076 	unsigned sector, pl_index, pl_offset;
1077 
1078 	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1079 
1080 	if (unlikely(dm_integrity_failed(ic))) {
1081 		fn(-1UL, data);
1082 		return;
1083 	}
1084 
1085 	sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1086 
1087 	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1088 	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1089 
1090 	io_req.bi_op = REQ_OP_WRITE;
1091 	io_req.bi_op_flags = 0;
1092 	io_req.mem.type = DM_IO_PAGE_LIST;
1093 	io_req.mem.ptr.pl = &ic->journal[pl_index];
1094 	io_req.mem.offset = pl_offset;
1095 	io_req.notify.fn = fn;
1096 	io_req.notify.context = data;
1097 	io_req.client = ic->io;
1098 	io_loc.bdev = ic->dev->bdev;
1099 	io_loc.sector = target;
1100 	io_loc.count = n_sectors;
1101 
1102 	r = dm_io(&io_req, 1, &io_loc, NULL);
1103 	if (unlikely(r)) {
1104 		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1105 		fn(-1UL, data);
1106 	}
1107 }
1108 
ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)1109 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1110 {
1111 	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1112 	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
1113 }
1114 
add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)1115 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1116 {
1117 	struct rb_node **n = &ic->in_progress.rb_node;
1118 	struct rb_node *parent;
1119 
1120 	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1121 
1122 	if (likely(check_waiting)) {
1123 		struct dm_integrity_range *range;
1124 		list_for_each_entry(range, &ic->wait_list, wait_entry) {
1125 			if (unlikely(ranges_overlap(range, new_range)))
1126 				return false;
1127 		}
1128 	}
1129 
1130 	parent = NULL;
1131 
1132 	while (*n) {
1133 		struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1134 
1135 		parent = *n;
1136 		if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1137 			n = &range->node.rb_left;
1138 		} else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1139 			n = &range->node.rb_right;
1140 		} else {
1141 			return false;
1142 		}
1143 	}
1144 
1145 	rb_link_node(&new_range->node, parent, n);
1146 	rb_insert_color(&new_range->node, &ic->in_progress);
1147 
1148 	return true;
1149 }
1150 
remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)1151 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1152 {
1153 	rb_erase(&range->node, &ic->in_progress);
1154 	while (unlikely(!list_empty(&ic->wait_list))) {
1155 		struct dm_integrity_range *last_range =
1156 			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1157 		struct task_struct *last_range_task;
1158 		last_range_task = last_range->task;
1159 		list_del(&last_range->wait_entry);
1160 		if (!add_new_range(ic, last_range, false)) {
1161 			last_range->task = last_range_task;
1162 			list_add(&last_range->wait_entry, &ic->wait_list);
1163 			break;
1164 		}
1165 		last_range->waiting = false;
1166 		wake_up_process(last_range_task);
1167 	}
1168 }
1169 
remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)1170 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1171 {
1172 	unsigned long flags;
1173 
1174 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1175 	remove_range_unlocked(ic, range);
1176 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1177 }
1178 
wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)1179 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1180 {
1181 	new_range->waiting = true;
1182 	list_add_tail(&new_range->wait_entry, &ic->wait_list);
1183 	new_range->task = current;
1184 	do {
1185 		__set_current_state(TASK_UNINTERRUPTIBLE);
1186 		spin_unlock_irq(&ic->endio_wait.lock);
1187 		io_schedule();
1188 		spin_lock_irq(&ic->endio_wait.lock);
1189 	} while (unlikely(new_range->waiting));
1190 }
1191 
add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)1192 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1193 {
1194 	if (unlikely(!add_new_range(ic, new_range, true)))
1195 		wait_and_add_new_range(ic, new_range);
1196 }
1197 
init_journal_node(struct journal_node *node)1198 static void init_journal_node(struct journal_node *node)
1199 {
1200 	RB_CLEAR_NODE(&node->node);
1201 	node->sector = (sector_t)-1;
1202 }
1203 
add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)1204 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1205 {
1206 	struct rb_node **link;
1207 	struct rb_node *parent;
1208 
1209 	node->sector = sector;
1210 	BUG_ON(!RB_EMPTY_NODE(&node->node));
1211 
1212 	link = &ic->journal_tree_root.rb_node;
1213 	parent = NULL;
1214 
1215 	while (*link) {
1216 		struct journal_node *j;
1217 		parent = *link;
1218 		j = container_of(parent, struct journal_node, node);
1219 		if (sector < j->sector)
1220 			link = &j->node.rb_left;
1221 		else
1222 			link = &j->node.rb_right;
1223 	}
1224 
1225 	rb_link_node(&node->node, parent, link);
1226 	rb_insert_color(&node->node, &ic->journal_tree_root);
1227 }
1228 
remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)1229 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1230 {
1231 	BUG_ON(RB_EMPTY_NODE(&node->node));
1232 	rb_erase(&node->node, &ic->journal_tree_root);
1233 	init_journal_node(node);
1234 }
1235 
1236 #define NOT_FOUND	(-1U)
1237 
find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)1238 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1239 {
1240 	struct rb_node *n = ic->journal_tree_root.rb_node;
1241 	unsigned found = NOT_FOUND;
1242 	*next_sector = (sector_t)-1;
1243 	while (n) {
1244 		struct journal_node *j = container_of(n, struct journal_node, node);
1245 		if (sector == j->sector) {
1246 			found = j - ic->journal_tree;
1247 		}
1248 		if (sector < j->sector) {
1249 			*next_sector = j->sector;
1250 			n = j->node.rb_left;
1251 		} else {
1252 			n = j->node.rb_right;
1253 		}
1254 	}
1255 
1256 	return found;
1257 }
1258 
test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)1259 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1260 {
1261 	struct journal_node *node, *next_node;
1262 	struct rb_node *next;
1263 
1264 	if (unlikely(pos >= ic->journal_entries))
1265 		return false;
1266 	node = &ic->journal_tree[pos];
1267 	if (unlikely(RB_EMPTY_NODE(&node->node)))
1268 		return false;
1269 	if (unlikely(node->sector != sector))
1270 		return false;
1271 
1272 	next = rb_next(&node->node);
1273 	if (unlikely(!next))
1274 		return true;
1275 
1276 	next_node = container_of(next, struct journal_node, node);
1277 	return next_node->sector != sector;
1278 }
1279 
find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)1280 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1281 {
1282 	struct rb_node *next;
1283 	struct journal_node *next_node;
1284 	unsigned next_section;
1285 
1286 	BUG_ON(RB_EMPTY_NODE(&node->node));
1287 
1288 	next = rb_next(&node->node);
1289 	if (unlikely(!next))
1290 		return false;
1291 
1292 	next_node = container_of(next, struct journal_node, node);
1293 
1294 	if (next_node->sector != node->sector)
1295 		return false;
1296 
1297 	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1298 	if (next_section >= ic->committed_section &&
1299 	    next_section < ic->committed_section + ic->n_committed_sections)
1300 		return true;
1301 	if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1302 		return true;
1303 
1304 	return false;
1305 }
1306 
1307 #define TAG_READ	0
1308 #define TAG_WRITE	1
1309 #define TAG_CMP		2
1310 
dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, unsigned *metadata_offset, unsigned total_size, int op)1311 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1312 			       unsigned *metadata_offset, unsigned total_size, int op)
1313 {
1314 #define MAY_BE_FILLER		1
1315 #define MAY_BE_HASH		2
1316 	unsigned hash_offset = 0;
1317 	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1318 
1319 	do {
1320 		unsigned char *data, *dp;
1321 		struct dm_buffer *b;
1322 		unsigned to_copy;
1323 		int r;
1324 
1325 		r = dm_integrity_failed(ic);
1326 		if (unlikely(r))
1327 			return r;
1328 
1329 		data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1330 		if (IS_ERR(data))
1331 			return PTR_ERR(data);
1332 
1333 		to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1334 		dp = data + *metadata_offset;
1335 		if (op == TAG_READ) {
1336 			memcpy(tag, dp, to_copy);
1337 		} else if (op == TAG_WRITE) {
1338 			memcpy(dp, tag, to_copy);
1339 			dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1340 		} else {
1341 			/* e.g.: op == TAG_CMP */
1342 
1343 			if (likely(is_power_of_2(ic->tag_size))) {
1344 				if (unlikely(memcmp(dp, tag, to_copy)))
1345 					if (unlikely(!ic->discard) ||
1346 					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1347 						goto thorough_test;
1348 				}
1349 			} else {
1350 				unsigned i, ts;
1351 thorough_test:
1352 				ts = total_size;
1353 
1354 				for (i = 0; i < to_copy; i++, ts--) {
1355 					if (unlikely(dp[i] != tag[i]))
1356 						may_be &= ~MAY_BE_HASH;
1357 					if (likely(dp[i] != DISCARD_FILLER))
1358 						may_be &= ~MAY_BE_FILLER;
1359 					hash_offset++;
1360 					if (unlikely(hash_offset == ic->tag_size)) {
1361 						if (unlikely(!may_be)) {
1362 							dm_bufio_release(b);
1363 							return ts;
1364 						}
1365 						hash_offset = 0;
1366 						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1367 					}
1368 				}
1369 			}
1370 		}
1371 		dm_bufio_release(b);
1372 
1373 		tag += to_copy;
1374 		*metadata_offset += to_copy;
1375 		if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1376 			(*metadata_block)++;
1377 			*metadata_offset = 0;
1378 		}
1379 
1380 		if (unlikely(!is_power_of_2(ic->tag_size))) {
1381 			hash_offset = (hash_offset + to_copy) % ic->tag_size;
1382 		}
1383 
1384 		total_size -= to_copy;
1385 	} while (unlikely(total_size));
1386 
1387 	return 0;
1388 #undef MAY_BE_FILLER
1389 #undef MAY_BE_HASH
1390 }
1391 
1392 struct flush_request {
1393 	struct dm_io_request io_req;
1394 	struct dm_io_region io_reg;
1395 	struct dm_integrity_c *ic;
1396 	struct completion comp;
1397 };
1398 
flush_notify(unsigned long error, void *fr_)1399 static void flush_notify(unsigned long error, void *fr_)
1400 {
1401 	struct flush_request *fr = fr_;
1402 	if (unlikely(error != 0))
1403 		dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
1404 	complete(&fr->comp);
1405 }
1406 
dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)1407 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1408 {
1409 	int r;
1410 
1411 	struct flush_request fr;
1412 
1413 	if (!ic->meta_dev)
1414 		flush_data = false;
1415 	if (flush_data) {
1416 		fr.io_req.bi_op = REQ_OP_WRITE,
1417 		fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1418 		fr.io_req.mem.type = DM_IO_KMEM,
1419 		fr.io_req.mem.ptr.addr = NULL,
1420 		fr.io_req.notify.fn = flush_notify,
1421 		fr.io_req.notify.context = &fr;
1422 		fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1423 		fr.io_reg.bdev = ic->dev->bdev,
1424 		fr.io_reg.sector = 0,
1425 		fr.io_reg.count = 0,
1426 		fr.ic = ic;
1427 		init_completion(&fr.comp);
1428 		r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1429 		BUG_ON(r);
1430 	}
1431 
1432 	r = dm_bufio_write_dirty_buffers(ic->bufio);
1433 	if (unlikely(r))
1434 		dm_integrity_io_error(ic, "writing tags", r);
1435 
1436 	if (flush_data)
1437 		wait_for_completion(&fr.comp);
1438 }
1439 
sleep_on_endio_wait(struct dm_integrity_c *ic)1440 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1441 {
1442 	DECLARE_WAITQUEUE(wait, current);
1443 	__add_wait_queue(&ic->endio_wait, &wait);
1444 	__set_current_state(TASK_UNINTERRUPTIBLE);
1445 	spin_unlock_irq(&ic->endio_wait.lock);
1446 	io_schedule();
1447 	spin_lock_irq(&ic->endio_wait.lock);
1448 	__remove_wait_queue(&ic->endio_wait, &wait);
1449 }
1450 
autocommit_fn(struct timer_list *t)1451 static void autocommit_fn(struct timer_list *t)
1452 {
1453 	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1454 
1455 	if (likely(!dm_integrity_failed(ic)))
1456 		queue_work(ic->commit_wq, &ic->commit_work);
1457 }
1458 
schedule_autocommit(struct dm_integrity_c *ic)1459 static void schedule_autocommit(struct dm_integrity_c *ic)
1460 {
1461 	if (!timer_pending(&ic->autocommit_timer))
1462 		mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1463 }
1464 
submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)1465 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1466 {
1467 	struct bio *bio;
1468 	unsigned long flags;
1469 
1470 	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1471 	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1472 	bio_list_add(&ic->flush_bio_list, bio);
1473 	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1474 
1475 	queue_work(ic->commit_wq, &ic->commit_work);
1476 }
1477 
do_endio(struct dm_integrity_c *ic, struct bio *bio)1478 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1479 {
1480 	int r = dm_integrity_failed(ic);
1481 	if (unlikely(r) && !bio->bi_status)
1482 		bio->bi_status = errno_to_blk_status(r);
1483 	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1484 		unsigned long flags;
1485 		spin_lock_irqsave(&ic->endio_wait.lock, flags);
1486 		bio_list_add(&ic->synchronous_bios, bio);
1487 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1488 		spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1489 		return;
1490 	}
1491 	bio_endio(bio);
1492 }
1493 
do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)1494 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1495 {
1496 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1497 
1498 	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1499 		submit_flush_bio(ic, dio);
1500 	else
1501 		do_endio(ic, bio);
1502 }
1503 
dec_in_flight(struct dm_integrity_io *dio)1504 static void dec_in_flight(struct dm_integrity_io *dio)
1505 {
1506 	if (atomic_dec_and_test(&dio->in_flight)) {
1507 		struct dm_integrity_c *ic = dio->ic;
1508 		struct bio *bio;
1509 
1510 		remove_range(ic, &dio->range);
1511 
1512 		if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1513 			schedule_autocommit(ic);
1514 
1515 		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1516 
1517 		if (unlikely(dio->bi_status) && !bio->bi_status)
1518 			bio->bi_status = dio->bi_status;
1519 		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1520 			dio->range.logical_sector += dio->range.n_sectors;
1521 			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1522 			INIT_WORK(&dio->work, integrity_bio_wait);
1523 			queue_work(ic->offload_wq, &dio->work);
1524 			return;
1525 		}
1526 		do_endio_flush(ic, dio);
1527 	}
1528 }
1529 
integrity_end_io(struct bio *bio)1530 static void integrity_end_io(struct bio *bio)
1531 {
1532 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1533 
1534 	dm_bio_restore(&dio->bio_details, bio);
1535 	if (bio->bi_integrity)
1536 		bio->bi_opf |= REQ_INTEGRITY;
1537 
1538 	if (dio->completion)
1539 		complete(dio->completion);
1540 
1541 	dec_in_flight(dio);
1542 }
1543 
integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, const char *data, char *result)1544 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1545 				      const char *data, char *result)
1546 {
1547 	__u64 sector_le = cpu_to_le64(sector);
1548 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
1549 	int r;
1550 	unsigned digest_size;
1551 
1552 	req->tfm = ic->internal_hash;
1553 
1554 	r = crypto_shash_init(req);
1555 	if (unlikely(r < 0)) {
1556 		dm_integrity_io_error(ic, "crypto_shash_init", r);
1557 		goto failed;
1558 	}
1559 
1560 	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1561 	if (unlikely(r < 0)) {
1562 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1563 		goto failed;
1564 	}
1565 
1566 	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1567 	if (unlikely(r < 0)) {
1568 		dm_integrity_io_error(ic, "crypto_shash_update", r);
1569 		goto failed;
1570 	}
1571 
1572 	r = crypto_shash_final(req, result);
1573 	if (unlikely(r < 0)) {
1574 		dm_integrity_io_error(ic, "crypto_shash_final", r);
1575 		goto failed;
1576 	}
1577 
1578 	digest_size = crypto_shash_digestsize(ic->internal_hash);
1579 	if (unlikely(digest_size < ic->tag_size))
1580 		memset(result + digest_size, 0, ic->tag_size - digest_size);
1581 
1582 	return;
1583 
1584 failed:
1585 	/* this shouldn't happen anyway, the hash functions have no reason to fail */
1586 	get_random_bytes(result, ic->tag_size);
1587 }
1588 
integrity_metadata(struct work_struct *w)1589 static void integrity_metadata(struct work_struct *w)
1590 {
1591 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1592 	struct dm_integrity_c *ic = dio->ic;
1593 
1594 	int r;
1595 
1596 	if (ic->internal_hash) {
1597 		struct bvec_iter iter;
1598 		struct bio_vec bv;
1599 		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1600 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1601 		char *checksums;
1602 		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1603 		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1604 		sector_t sector;
1605 		unsigned sectors_to_process;
1606 
1607 		if (unlikely(ic->mode == 'R'))
1608 			goto skip_io;
1609 
1610 		if (likely(dio->op != REQ_OP_DISCARD))
1611 			checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1612 					    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1613 		else
1614 			checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1615 		if (!checksums) {
1616 			checksums = checksums_onstack;
1617 			if (WARN_ON(extra_space &&
1618 				    digest_size > sizeof(checksums_onstack))) {
1619 				r = -EINVAL;
1620 				goto error;
1621 			}
1622 		}
1623 
1624 		if (unlikely(dio->op == REQ_OP_DISCARD)) {
1625 			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1626 			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1627 			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1628 			unsigned max_blocks = max_size / ic->tag_size;
1629 			memset(checksums, DISCARD_FILLER, max_size);
1630 
1631 			while (bi_size) {
1632 				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1633 				this_step_blocks = min(this_step_blocks, max_blocks);
1634 				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1635 							this_step_blocks * ic->tag_size, TAG_WRITE);
1636 				if (unlikely(r)) {
1637 					if (likely(checksums != checksums_onstack))
1638 						kfree(checksums);
1639 					goto error;
1640 				}
1641 
1642 				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1643 					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1644 					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1645 					BUG();
1646 				}*/
1647 				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1648 				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1649 			}
1650 
1651 			if (likely(checksums != checksums_onstack))
1652 				kfree(checksums);
1653 			goto skip_io;
1654 		}
1655 
1656 		sector = dio->range.logical_sector;
1657 		sectors_to_process = dio->range.n_sectors;
1658 
1659 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1660 			struct bio_vec bv_copy = bv;
1661 			unsigned pos;
1662 			char *mem, *checksums_ptr;
1663 
1664 again:
1665 			mem = (char *)kmap_atomic(bv_copy.bv_page) + bv_copy.bv_offset;
1666 			pos = 0;
1667 			checksums_ptr = checksums;
1668 			do {
1669 				integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1670 				checksums_ptr += ic->tag_size;
1671 				sectors_to_process -= ic->sectors_per_block;
1672 				pos += ic->sectors_per_block << SECTOR_SHIFT;
1673 				sector += ic->sectors_per_block;
1674 			} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
1675 			kunmap_atomic(mem);
1676 
1677 			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1678 						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1679 			if (unlikely(r)) {
1680 				if (r > 0) {
1681 					char b[BDEVNAME_SIZE];
1682 					DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1683 						    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1684 					r = -EILSEQ;
1685 					atomic64_inc(&ic->number_of_mismatches);
1686 				}
1687 				if (likely(checksums != checksums_onstack))
1688 					kfree(checksums);
1689 				goto error;
1690 			}
1691 
1692 			if (!sectors_to_process)
1693 				break;
1694 
1695 			if (unlikely(pos < bv_copy.bv_len)) {
1696 				bv_copy.bv_offset += pos;
1697 				bv_copy.bv_len -= pos;
1698 				goto again;
1699 			}
1700 		}
1701 
1702 		if (likely(checksums != checksums_onstack))
1703 			kfree(checksums);
1704 	} else {
1705 		struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1706 
1707 		if (bip) {
1708 			struct bio_vec biv;
1709 			struct bvec_iter iter;
1710 			unsigned data_to_process = dio->range.n_sectors;
1711 			sector_to_block(ic, data_to_process);
1712 			data_to_process *= ic->tag_size;
1713 
1714 			bip_for_each_vec(biv, bip, iter) {
1715 				unsigned char *tag;
1716 				unsigned this_len;
1717 
1718 				BUG_ON(PageHighMem(biv.bv_page));
1719 				tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1720 				this_len = min(biv.bv_len, data_to_process);
1721 				r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1722 							this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1723 				if (unlikely(r))
1724 					goto error;
1725 				data_to_process -= this_len;
1726 				if (!data_to_process)
1727 					break;
1728 			}
1729 		}
1730 	}
1731 skip_io:
1732 	dec_in_flight(dio);
1733 	return;
1734 error:
1735 	dio->bi_status = errno_to_blk_status(r);
1736 	dec_in_flight(dio);
1737 }
1738 
dm_integrity_map(struct dm_target *ti, struct bio *bio)1739 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1740 {
1741 	struct dm_integrity_c *ic = ti->private;
1742 	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1743 	struct bio_integrity_payload *bip;
1744 
1745 	sector_t area, offset;
1746 
1747 	dio->ic = ic;
1748 	dio->bi_status = 0;
1749 	dio->op = bio_op(bio);
1750 
1751 	if (unlikely(dio->op == REQ_OP_DISCARD)) {
1752 		if (ti->max_io_len) {
1753 			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1754 			unsigned log2_max_io_len = __fls(ti->max_io_len);
1755 			sector_t start_boundary = sec >> log2_max_io_len;
1756 			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1757 			if (start_boundary < end_boundary) {
1758 				sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1759 				dm_accept_partial_bio(bio, len);
1760 			}
1761 		}
1762 	}
1763 
1764 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1765 		submit_flush_bio(ic, dio);
1766 		return DM_MAPIO_SUBMITTED;
1767 	}
1768 
1769 	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1770 	dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1771 	if (unlikely(dio->fua)) {
1772 		/*
1773 		 * Don't pass down the FUA flag because we have to flush
1774 		 * disk cache anyway.
1775 		 */
1776 		bio->bi_opf &= ~REQ_FUA;
1777 	}
1778 	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1779 		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1780 		      dio->range.logical_sector, bio_sectors(bio),
1781 		      ic->provided_data_sectors);
1782 		return DM_MAPIO_KILL;
1783 	}
1784 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1785 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1786 		      ic->sectors_per_block,
1787 		      dio->range.logical_sector, bio_sectors(bio));
1788 		return DM_MAPIO_KILL;
1789 	}
1790 
1791 	if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1792 		struct bvec_iter iter;
1793 		struct bio_vec bv;
1794 		bio_for_each_segment(bv, bio, iter) {
1795 			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1796 				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1797 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1798 				return DM_MAPIO_KILL;
1799 			}
1800 		}
1801 	}
1802 
1803 	bip = bio_integrity(bio);
1804 	if (!ic->internal_hash) {
1805 		if (bip) {
1806 			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1807 			if (ic->log2_tag_size >= 0)
1808 				wanted_tag_size <<= ic->log2_tag_size;
1809 			else
1810 				wanted_tag_size *= ic->tag_size;
1811 			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1812 				DMERR("Invalid integrity data size %u, expected %u",
1813 				      bip->bip_iter.bi_size, wanted_tag_size);
1814 				return DM_MAPIO_KILL;
1815 			}
1816 		}
1817 	} else {
1818 		if (unlikely(bip != NULL)) {
1819 			DMERR("Unexpected integrity data when using internal hash");
1820 			return DM_MAPIO_KILL;
1821 		}
1822 	}
1823 
1824 	if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1825 		return DM_MAPIO_KILL;
1826 
1827 	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1828 	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1829 	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1830 
1831 	dm_integrity_map_continue(dio, true);
1832 	return DM_MAPIO_SUBMITTED;
1833 }
1834 
__journal_read_write(struct dm_integrity_io *dio, struct bio *bio, unsigned journal_section, unsigned journal_entry)1835 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1836 				 unsigned journal_section, unsigned journal_entry)
1837 {
1838 	struct dm_integrity_c *ic = dio->ic;
1839 	sector_t logical_sector;
1840 	unsigned n_sectors;
1841 
1842 	logical_sector = dio->range.logical_sector;
1843 	n_sectors = dio->range.n_sectors;
1844 	do {
1845 		struct bio_vec bv = bio_iovec(bio);
1846 		char *mem;
1847 
1848 		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1849 			bv.bv_len = n_sectors << SECTOR_SHIFT;
1850 		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1851 		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1852 retry_kmap:
1853 		mem = kmap_atomic(bv.bv_page);
1854 		if (likely(dio->op == REQ_OP_WRITE))
1855 			flush_dcache_page(bv.bv_page);
1856 
1857 		do {
1858 			struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1859 
1860 			if (unlikely(dio->op == REQ_OP_READ)) {
1861 				struct journal_sector *js;
1862 				char *mem_ptr;
1863 				unsigned s;
1864 
1865 				if (unlikely(journal_entry_is_inprogress(je))) {
1866 					flush_dcache_page(bv.bv_page);
1867 					kunmap_atomic(mem);
1868 
1869 					__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1870 					goto retry_kmap;
1871 				}
1872 				smp_rmb();
1873 				BUG_ON(journal_entry_get_sector(je) != logical_sector);
1874 				js = access_journal_data(ic, journal_section, journal_entry);
1875 				mem_ptr = mem + bv.bv_offset;
1876 				s = 0;
1877 				do {
1878 					memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1879 					*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1880 					js++;
1881 					mem_ptr += 1 << SECTOR_SHIFT;
1882 				} while (++s < ic->sectors_per_block);
1883 #ifdef INTERNAL_VERIFY
1884 				if (ic->internal_hash) {
1885 					char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1886 
1887 					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1888 					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1889 						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1890 							    logical_sector);
1891 					}
1892 				}
1893 #endif
1894 			}
1895 
1896 			if (!ic->internal_hash) {
1897 				struct bio_integrity_payload *bip = bio_integrity(bio);
1898 				unsigned tag_todo = ic->tag_size;
1899 				char *tag_ptr = journal_entry_tag(ic, je);
1900 
1901 				if (bip) do {
1902 					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1903 					unsigned tag_now = min(biv.bv_len, tag_todo);
1904 					char *tag_addr;
1905 					BUG_ON(PageHighMem(biv.bv_page));
1906 					tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1907 					if (likely(dio->op == REQ_OP_WRITE))
1908 						memcpy(tag_ptr, tag_addr, tag_now);
1909 					else
1910 						memcpy(tag_addr, tag_ptr, tag_now);
1911 					bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1912 					tag_ptr += tag_now;
1913 					tag_todo -= tag_now;
1914 				} while (unlikely(tag_todo)); else {
1915 					if (likely(dio->op == REQ_OP_WRITE))
1916 						memset(tag_ptr, 0, tag_todo);
1917 				}
1918 			}
1919 
1920 			if (likely(dio->op == REQ_OP_WRITE)) {
1921 				struct journal_sector *js;
1922 				unsigned s;
1923 
1924 				js = access_journal_data(ic, journal_section, journal_entry);
1925 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1926 
1927 				s = 0;
1928 				do {
1929 					je->last_bytes[s] = js[s].commit_id;
1930 				} while (++s < ic->sectors_per_block);
1931 
1932 				if (ic->internal_hash) {
1933 					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1934 					if (unlikely(digest_size > ic->tag_size)) {
1935 						char checksums_onstack[HASH_MAX_DIGESTSIZE];
1936 						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1937 						memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1938 					} else
1939 						integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1940 				}
1941 
1942 				journal_entry_set_sector(je, logical_sector);
1943 			}
1944 			logical_sector += ic->sectors_per_block;
1945 
1946 			journal_entry++;
1947 			if (unlikely(journal_entry == ic->journal_section_entries)) {
1948 				journal_entry = 0;
1949 				journal_section++;
1950 				wraparound_section(ic, &journal_section);
1951 			}
1952 
1953 			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1954 		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1955 
1956 		if (unlikely(dio->op == REQ_OP_READ))
1957 			flush_dcache_page(bv.bv_page);
1958 		kunmap_atomic(mem);
1959 	} while (n_sectors);
1960 
1961 	if (likely(dio->op == REQ_OP_WRITE)) {
1962 		smp_mb();
1963 		if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1964 			wake_up(&ic->copy_to_journal_wait);
1965 		if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1966 			queue_work(ic->commit_wq, &ic->commit_work);
1967 		} else {
1968 			schedule_autocommit(ic);
1969 		}
1970 	} else {
1971 		remove_range(ic, &dio->range);
1972 	}
1973 
1974 	if (unlikely(bio->bi_iter.bi_size)) {
1975 		sector_t area, offset;
1976 
1977 		dio->range.logical_sector = logical_sector;
1978 		get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1979 		dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1980 		return true;
1981 	}
1982 
1983 	return false;
1984 }
1985 
dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)1986 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1987 {
1988 	struct dm_integrity_c *ic = dio->ic;
1989 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1990 	unsigned journal_section, journal_entry;
1991 	unsigned journal_read_pos;
1992 	struct completion read_comp;
1993 	bool discard_retried = false;
1994 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1995 	if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1996 		need_sync_io = true;
1997 
1998 	if (need_sync_io && from_map) {
1999 		INIT_WORK(&dio->work, integrity_bio_wait);
2000 		queue_work(ic->offload_wq, &dio->work);
2001 		return;
2002 	}
2003 
2004 lock_retry:
2005 	spin_lock_irq(&ic->endio_wait.lock);
2006 retry:
2007 	if (unlikely(dm_integrity_failed(ic))) {
2008 		spin_unlock_irq(&ic->endio_wait.lock);
2009 		do_endio(ic, bio);
2010 		return;
2011 	}
2012 	dio->range.n_sectors = bio_sectors(bio);
2013 	journal_read_pos = NOT_FOUND;
2014 	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2015 		if (dio->op == REQ_OP_WRITE) {
2016 			unsigned next_entry, i, pos;
2017 			unsigned ws, we, range_sectors;
2018 
2019 			dio->range.n_sectors = min(dio->range.n_sectors,
2020 						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2021 			if (unlikely(!dio->range.n_sectors)) {
2022 				if (from_map)
2023 					goto offload_to_thread;
2024 				sleep_on_endio_wait(ic);
2025 				goto retry;
2026 			}
2027 			range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2028 			ic->free_sectors -= range_sectors;
2029 			journal_section = ic->free_section;
2030 			journal_entry = ic->free_section_entry;
2031 
2032 			next_entry = ic->free_section_entry + range_sectors;
2033 			ic->free_section_entry = next_entry % ic->journal_section_entries;
2034 			ic->free_section += next_entry / ic->journal_section_entries;
2035 			ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2036 			wraparound_section(ic, &ic->free_section);
2037 
2038 			pos = journal_section * ic->journal_section_entries + journal_entry;
2039 			ws = journal_section;
2040 			we = journal_entry;
2041 			i = 0;
2042 			do {
2043 				struct journal_entry *je;
2044 
2045 				add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2046 				pos++;
2047 				if (unlikely(pos >= ic->journal_entries))
2048 					pos = 0;
2049 
2050 				je = access_journal_entry(ic, ws, we);
2051 				BUG_ON(!journal_entry_is_unused(je));
2052 				journal_entry_set_inprogress(je);
2053 				we++;
2054 				if (unlikely(we == ic->journal_section_entries)) {
2055 					we = 0;
2056 					ws++;
2057 					wraparound_section(ic, &ws);
2058 				}
2059 			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2060 
2061 			spin_unlock_irq(&ic->endio_wait.lock);
2062 			goto journal_read_write;
2063 		} else {
2064 			sector_t next_sector;
2065 			journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2066 			if (likely(journal_read_pos == NOT_FOUND)) {
2067 				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2068 					dio->range.n_sectors = next_sector - dio->range.logical_sector;
2069 			} else {
2070 				unsigned i;
2071 				unsigned jp = journal_read_pos + 1;
2072 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2073 					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2074 						break;
2075 				}
2076 				dio->range.n_sectors = i;
2077 			}
2078 		}
2079 	}
2080 	if (unlikely(!add_new_range(ic, &dio->range, true))) {
2081 		/*
2082 		 * We must not sleep in the request routine because it could
2083 		 * stall bios on current->bio_list.
2084 		 * So, we offload the bio to a workqueue if we have to sleep.
2085 		 */
2086 		if (from_map) {
2087 offload_to_thread:
2088 			spin_unlock_irq(&ic->endio_wait.lock);
2089 			INIT_WORK(&dio->work, integrity_bio_wait);
2090 			queue_work(ic->wait_wq, &dio->work);
2091 			return;
2092 		}
2093 		if (journal_read_pos != NOT_FOUND)
2094 			dio->range.n_sectors = ic->sectors_per_block;
2095 		wait_and_add_new_range(ic, &dio->range);
2096 		/*
2097 		 * wait_and_add_new_range drops the spinlock, so the journal
2098 		 * may have been changed arbitrarily. We need to recheck.
2099 		 * To simplify the code, we restrict I/O size to just one block.
2100 		 */
2101 		if (journal_read_pos != NOT_FOUND) {
2102 			sector_t next_sector;
2103 			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2104 			if (unlikely(new_pos != journal_read_pos)) {
2105 				remove_range_unlocked(ic, &dio->range);
2106 				goto retry;
2107 			}
2108 		}
2109 	}
2110 	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2111 		sector_t next_sector;
2112 		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2113 		if (unlikely(new_pos != NOT_FOUND) ||
2114 		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2115 			remove_range_unlocked(ic, &dio->range);
2116 			spin_unlock_irq(&ic->endio_wait.lock);
2117 			queue_work(ic->commit_wq, &ic->commit_work);
2118 			flush_workqueue(ic->commit_wq);
2119 			queue_work(ic->writer_wq, &ic->writer_work);
2120 			flush_workqueue(ic->writer_wq);
2121 			discard_retried = true;
2122 			goto lock_retry;
2123 		}
2124 	}
2125 	spin_unlock_irq(&ic->endio_wait.lock);
2126 
2127 	if (unlikely(journal_read_pos != NOT_FOUND)) {
2128 		journal_section = journal_read_pos / ic->journal_section_entries;
2129 		journal_entry = journal_read_pos % ic->journal_section_entries;
2130 		goto journal_read_write;
2131 	}
2132 
2133 	if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2134 		if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2135 				     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2136 			struct bitmap_block_status *bbs;
2137 
2138 			bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2139 			spin_lock(&bbs->bio_queue_lock);
2140 			bio_list_add(&bbs->bio_queue, bio);
2141 			spin_unlock(&bbs->bio_queue_lock);
2142 			queue_work(ic->writer_wq, &bbs->work);
2143 			return;
2144 		}
2145 	}
2146 
2147 	dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2148 
2149 	if (need_sync_io) {
2150 		init_completion(&read_comp);
2151 		dio->completion = &read_comp;
2152 	} else
2153 		dio->completion = NULL;
2154 
2155 	dm_bio_record(&dio->bio_details, bio);
2156 	bio_set_dev(bio, ic->dev->bdev);
2157 	bio->bi_integrity = NULL;
2158 	bio->bi_opf &= ~REQ_INTEGRITY;
2159 	bio->bi_end_io = integrity_end_io;
2160 	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2161 
2162 	if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2163 		integrity_metadata(&dio->work);
2164 		dm_integrity_flush_buffers(ic, false);
2165 
2166 		dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2167 		dio->completion = NULL;
2168 
2169 		submit_bio_noacct(bio);
2170 
2171 		return;
2172 	}
2173 
2174 	submit_bio_noacct(bio);
2175 
2176 	if (need_sync_io) {
2177 		wait_for_completion_io(&read_comp);
2178 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2179 		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2180 			goto skip_check;
2181 		if (ic->mode == 'B') {
2182 			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2183 					     dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2184 				goto skip_check;
2185 		}
2186 
2187 		if (likely(!bio->bi_status))
2188 			integrity_metadata(&dio->work);
2189 		else
2190 skip_check:
2191 			dec_in_flight(dio);
2192 
2193 	} else {
2194 		INIT_WORK(&dio->work, integrity_metadata);
2195 		queue_work(ic->metadata_wq, &dio->work);
2196 	}
2197 
2198 	return;
2199 
2200 journal_read_write:
2201 	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2202 		goto lock_retry;
2203 
2204 	do_endio_flush(ic, dio);
2205 }
2206 
2207 
integrity_bio_wait(struct work_struct *w)2208 static void integrity_bio_wait(struct work_struct *w)
2209 {
2210 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2211 
2212 	dm_integrity_map_continue(dio, false);
2213 }
2214 
pad_uncommitted(struct dm_integrity_c *ic)2215 static void pad_uncommitted(struct dm_integrity_c *ic)
2216 {
2217 	if (ic->free_section_entry) {
2218 		ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2219 		ic->free_section_entry = 0;
2220 		ic->free_section++;
2221 		wraparound_section(ic, &ic->free_section);
2222 		ic->n_uncommitted_sections++;
2223 	}
2224 	if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2225 		    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2226 		    ic->journal_section_entries + ic->free_sectors)) {
2227 		DMCRIT("journal_sections %u, journal_section_entries %u, "
2228 		       "n_uncommitted_sections %u, n_committed_sections %u, "
2229 		       "journal_section_entries %u, free_sectors %u",
2230 		       ic->journal_sections, ic->journal_section_entries,
2231 		       ic->n_uncommitted_sections, ic->n_committed_sections,
2232 		       ic->journal_section_entries, ic->free_sectors);
2233 	}
2234 }
2235 
integrity_commit(struct work_struct *w)2236 static void integrity_commit(struct work_struct *w)
2237 {
2238 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2239 	unsigned commit_start, commit_sections;
2240 	unsigned i, j, n;
2241 	struct bio *flushes;
2242 
2243 	del_timer(&ic->autocommit_timer);
2244 
2245 	spin_lock_irq(&ic->endio_wait.lock);
2246 	flushes = bio_list_get(&ic->flush_bio_list);
2247 	if (unlikely(ic->mode != 'J')) {
2248 		spin_unlock_irq(&ic->endio_wait.lock);
2249 		dm_integrity_flush_buffers(ic, true);
2250 		goto release_flush_bios;
2251 	}
2252 
2253 	pad_uncommitted(ic);
2254 	commit_start = ic->uncommitted_section;
2255 	commit_sections = ic->n_uncommitted_sections;
2256 	spin_unlock_irq(&ic->endio_wait.lock);
2257 
2258 	if (!commit_sections)
2259 		goto release_flush_bios;
2260 
2261 	ic->wrote_to_journal = true;
2262 
2263 	i = commit_start;
2264 	for (n = 0; n < commit_sections; n++) {
2265 		for (j = 0; j < ic->journal_section_entries; j++) {
2266 			struct journal_entry *je;
2267 			je = access_journal_entry(ic, i, j);
2268 			io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2269 		}
2270 		for (j = 0; j < ic->journal_section_sectors; j++) {
2271 			struct journal_sector *js;
2272 			js = access_journal(ic, i, j);
2273 			js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2274 		}
2275 		i++;
2276 		if (unlikely(i >= ic->journal_sections))
2277 			ic->commit_seq = next_commit_seq(ic->commit_seq);
2278 		wraparound_section(ic, &i);
2279 	}
2280 	smp_rmb();
2281 
2282 	write_journal(ic, commit_start, commit_sections);
2283 
2284 	spin_lock_irq(&ic->endio_wait.lock);
2285 	ic->uncommitted_section += commit_sections;
2286 	wraparound_section(ic, &ic->uncommitted_section);
2287 	ic->n_uncommitted_sections -= commit_sections;
2288 	ic->n_committed_sections += commit_sections;
2289 	spin_unlock_irq(&ic->endio_wait.lock);
2290 
2291 	if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2292 		queue_work(ic->writer_wq, &ic->writer_work);
2293 
2294 release_flush_bios:
2295 	while (flushes) {
2296 		struct bio *next = flushes->bi_next;
2297 		flushes->bi_next = NULL;
2298 		do_endio(ic, flushes);
2299 		flushes = next;
2300 	}
2301 }
2302 
complete_copy_from_journal(unsigned long error, void *context)2303 static void complete_copy_from_journal(unsigned long error, void *context)
2304 {
2305 	struct journal_io *io = context;
2306 	struct journal_completion *comp = io->comp;
2307 	struct dm_integrity_c *ic = comp->ic;
2308 	remove_range(ic, &io->range);
2309 	mempool_free(io, &ic->journal_io_mempool);
2310 	if (unlikely(error != 0))
2311 		dm_integrity_io_error(ic, "copying from journal", -EIO);
2312 	complete_journal_op(comp);
2313 }
2314 
restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, struct journal_entry *je)2315 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2316 			       struct journal_entry *je)
2317 {
2318 	unsigned s = 0;
2319 	do {
2320 		js->commit_id = je->last_bytes[s];
2321 		js++;
2322 	} while (++s < ic->sectors_per_block);
2323 }
2324 
do_journal_write(struct dm_integrity_c *ic, unsigned write_start, unsigned write_sections, bool from_replay)2325 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2326 			     unsigned write_sections, bool from_replay)
2327 {
2328 	unsigned i, j, n;
2329 	struct journal_completion comp;
2330 	struct blk_plug plug;
2331 
2332 	blk_start_plug(&plug);
2333 
2334 	comp.ic = ic;
2335 	comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2336 	init_completion(&comp.comp);
2337 
2338 	i = write_start;
2339 	for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2340 #ifndef INTERNAL_VERIFY
2341 		if (unlikely(from_replay))
2342 #endif
2343 			rw_section_mac(ic, i, false);
2344 		for (j = 0; j < ic->journal_section_entries; j++) {
2345 			struct journal_entry *je = access_journal_entry(ic, i, j);
2346 			sector_t sec, area, offset;
2347 			unsigned k, l, next_loop;
2348 			sector_t metadata_block;
2349 			unsigned metadata_offset;
2350 			struct journal_io *io;
2351 
2352 			if (journal_entry_is_unused(je))
2353 				continue;
2354 			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2355 			sec = journal_entry_get_sector(je);
2356 			if (unlikely(from_replay)) {
2357 				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2358 					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2359 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
2360 				}
2361 				if (unlikely(sec >= ic->provided_data_sectors)) {
2362 					journal_entry_set_unused(je);
2363 					continue;
2364 				}
2365 			}
2366 			get_area_and_offset(ic, sec, &area, &offset);
2367 			restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2368 			for (k = j + 1; k < ic->journal_section_entries; k++) {
2369 				struct journal_entry *je2 = access_journal_entry(ic, i, k);
2370 				sector_t sec2, area2, offset2;
2371 				if (journal_entry_is_unused(je2))
2372 					break;
2373 				BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2374 				sec2 = journal_entry_get_sector(je2);
2375 				if (unlikely(sec2 >= ic->provided_data_sectors))
2376 					break;
2377 				get_area_and_offset(ic, sec2, &area2, &offset2);
2378 				if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2379 					break;
2380 				restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2381 			}
2382 			next_loop = k - 1;
2383 
2384 			io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2385 			io->comp = &comp;
2386 			io->range.logical_sector = sec;
2387 			io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2388 
2389 			spin_lock_irq(&ic->endio_wait.lock);
2390 			add_new_range_and_wait(ic, &io->range);
2391 
2392 			if (likely(!from_replay)) {
2393 				struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2394 
2395 				/* don't write if there is newer committed sector */
2396 				while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2397 					struct journal_entry *je2 = access_journal_entry(ic, i, j);
2398 
2399 					journal_entry_set_unused(je2);
2400 					remove_journal_node(ic, &section_node[j]);
2401 					j++;
2402 					sec += ic->sectors_per_block;
2403 					offset += ic->sectors_per_block;
2404 				}
2405 				while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2406 					struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2407 
2408 					journal_entry_set_unused(je2);
2409 					remove_journal_node(ic, &section_node[k - 1]);
2410 					k--;
2411 				}
2412 				if (j == k) {
2413 					remove_range_unlocked(ic, &io->range);
2414 					spin_unlock_irq(&ic->endio_wait.lock);
2415 					mempool_free(io, &ic->journal_io_mempool);
2416 					goto skip_io;
2417 				}
2418 				for (l = j; l < k; l++) {
2419 					remove_journal_node(ic, &section_node[l]);
2420 				}
2421 			}
2422 			spin_unlock_irq(&ic->endio_wait.lock);
2423 
2424 			metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2425 			for (l = j; l < k; l++) {
2426 				int r;
2427 				struct journal_entry *je2 = access_journal_entry(ic, i, l);
2428 
2429 				if (
2430 #ifndef INTERNAL_VERIFY
2431 				    unlikely(from_replay) &&
2432 #endif
2433 				    ic->internal_hash) {
2434 					char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2435 
2436 					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2437 								  (char *)access_journal_data(ic, i, l), test_tag);
2438 					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2439 						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2440 				}
2441 
2442 				journal_entry_set_unused(je2);
2443 				r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2444 							ic->tag_size, TAG_WRITE);
2445 				if (unlikely(r)) {
2446 					dm_integrity_io_error(ic, "reading tags", r);
2447 				}
2448 			}
2449 
2450 			atomic_inc(&comp.in_flight);
2451 			copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2452 					  (k - j) << ic->sb->log2_sectors_per_block,
2453 					  get_data_sector(ic, area, offset),
2454 					  complete_copy_from_journal, io);
2455 skip_io:
2456 			j = next_loop;
2457 		}
2458 	}
2459 
2460 	dm_bufio_write_dirty_buffers_async(ic->bufio);
2461 
2462 	blk_finish_plug(&plug);
2463 
2464 	complete_journal_op(&comp);
2465 	wait_for_completion_io(&comp.comp);
2466 
2467 	dm_integrity_flush_buffers(ic, true);
2468 }
2469 
integrity_writer(struct work_struct *w)2470 static void integrity_writer(struct work_struct *w)
2471 {
2472 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2473 	unsigned write_start, write_sections;
2474 
2475 	unsigned prev_free_sectors;
2476 
2477 	spin_lock_irq(&ic->endio_wait.lock);
2478 	write_start = ic->committed_section;
2479 	write_sections = ic->n_committed_sections;
2480 	spin_unlock_irq(&ic->endio_wait.lock);
2481 
2482 	if (!write_sections)
2483 		return;
2484 
2485 	do_journal_write(ic, write_start, write_sections, false);
2486 
2487 	spin_lock_irq(&ic->endio_wait.lock);
2488 
2489 	ic->committed_section += write_sections;
2490 	wraparound_section(ic, &ic->committed_section);
2491 	ic->n_committed_sections -= write_sections;
2492 
2493 	prev_free_sectors = ic->free_sectors;
2494 	ic->free_sectors += write_sections * ic->journal_section_entries;
2495 	if (unlikely(!prev_free_sectors))
2496 		wake_up_locked(&ic->endio_wait);
2497 
2498 	spin_unlock_irq(&ic->endio_wait.lock);
2499 }
2500 
recalc_write_super(struct dm_integrity_c *ic)2501 static void recalc_write_super(struct dm_integrity_c *ic)
2502 {
2503 	int r;
2504 
2505 	dm_integrity_flush_buffers(ic, false);
2506 	if (dm_integrity_failed(ic))
2507 		return;
2508 
2509 	r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2510 	if (unlikely(r))
2511 		dm_integrity_io_error(ic, "writing superblock", r);
2512 }
2513 
integrity_recalc(struct work_struct *w)2514 static void integrity_recalc(struct work_struct *w)
2515 {
2516 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2517 	struct dm_integrity_range range;
2518 	struct dm_io_request io_req;
2519 	struct dm_io_region io_loc;
2520 	sector_t area, offset;
2521 	sector_t metadata_block;
2522 	unsigned metadata_offset;
2523 	sector_t logical_sector, n_sectors;
2524 	__u8 *t;
2525 	unsigned i;
2526 	int r;
2527 	unsigned super_counter = 0;
2528 
2529 	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2530 
2531 	spin_lock_irq(&ic->endio_wait.lock);
2532 
2533 next_chunk:
2534 
2535 	if (unlikely(dm_post_suspending(ic->ti)))
2536 		goto unlock_ret;
2537 
2538 	range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2539 	if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2540 		if (ic->mode == 'B') {
2541 			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2542 			DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2543 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2544 		}
2545 		goto unlock_ret;
2546 	}
2547 
2548 	get_area_and_offset(ic, range.logical_sector, &area, &offset);
2549 	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2550 	if (!ic->meta_dev)
2551 		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2552 
2553 	add_new_range_and_wait(ic, &range);
2554 	spin_unlock_irq(&ic->endio_wait.lock);
2555 	logical_sector = range.logical_sector;
2556 	n_sectors = range.n_sectors;
2557 
2558 	if (ic->mode == 'B') {
2559 		if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2560 			goto advance_and_next;
2561 		}
2562 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2563 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2564 			logical_sector += ic->sectors_per_block;
2565 			n_sectors -= ic->sectors_per_block;
2566 			cond_resched();
2567 		}
2568 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2569 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2570 			n_sectors -= ic->sectors_per_block;
2571 			cond_resched();
2572 		}
2573 		get_area_and_offset(ic, logical_sector, &area, &offset);
2574 	}
2575 
2576 	DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2577 
2578 	if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2579 		recalc_write_super(ic);
2580 		if (ic->mode == 'B') {
2581 			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2582 		}
2583 		super_counter = 0;
2584 	}
2585 
2586 	if (unlikely(dm_integrity_failed(ic)))
2587 		goto err;
2588 
2589 	io_req.bi_op = REQ_OP_READ;
2590 	io_req.bi_op_flags = 0;
2591 	io_req.mem.type = DM_IO_VMA;
2592 	io_req.mem.ptr.addr = ic->recalc_buffer;
2593 	io_req.notify.fn = NULL;
2594 	io_req.client = ic->io;
2595 	io_loc.bdev = ic->dev->bdev;
2596 	io_loc.sector = get_data_sector(ic, area, offset);
2597 	io_loc.count = n_sectors;
2598 
2599 	r = dm_io(&io_req, 1, &io_loc, NULL);
2600 	if (unlikely(r)) {
2601 		dm_integrity_io_error(ic, "reading data", r);
2602 		goto err;
2603 	}
2604 
2605 	t = ic->recalc_tags;
2606 	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2607 		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2608 		t += ic->tag_size;
2609 	}
2610 
2611 	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2612 
2613 	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2614 	if (unlikely(r)) {
2615 		dm_integrity_io_error(ic, "writing tags", r);
2616 		goto err;
2617 	}
2618 
2619 	if (ic->mode == 'B') {
2620 		sector_t start, end;
2621 		start = (range.logical_sector >>
2622 			 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2623 			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2624 		end = ((range.logical_sector + range.n_sectors) >>
2625 		       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2626 			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2627 		block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2628 	}
2629 
2630 advance_and_next:
2631 	cond_resched();
2632 
2633 	spin_lock_irq(&ic->endio_wait.lock);
2634 	remove_range_unlocked(ic, &range);
2635 	ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2636 	goto next_chunk;
2637 
2638 err:
2639 	remove_range(ic, &range);
2640 	return;
2641 
2642 unlock_ret:
2643 	spin_unlock_irq(&ic->endio_wait.lock);
2644 
2645 	recalc_write_super(ic);
2646 }
2647 
bitmap_block_work(struct work_struct *w)2648 static void bitmap_block_work(struct work_struct *w)
2649 {
2650 	struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2651 	struct dm_integrity_c *ic = bbs->ic;
2652 	struct bio *bio;
2653 	struct bio_list bio_queue;
2654 	struct bio_list waiting;
2655 
2656 	bio_list_init(&waiting);
2657 
2658 	spin_lock(&bbs->bio_queue_lock);
2659 	bio_queue = bbs->bio_queue;
2660 	bio_list_init(&bbs->bio_queue);
2661 	spin_unlock(&bbs->bio_queue_lock);
2662 
2663 	while ((bio = bio_list_pop(&bio_queue))) {
2664 		struct dm_integrity_io *dio;
2665 
2666 		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2667 
2668 		if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2669 				    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2670 			remove_range(ic, &dio->range);
2671 			INIT_WORK(&dio->work, integrity_bio_wait);
2672 			queue_work(ic->offload_wq, &dio->work);
2673 		} else {
2674 			block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2675 					dio->range.n_sectors, BITMAP_OP_SET);
2676 			bio_list_add(&waiting, bio);
2677 		}
2678 	}
2679 
2680 	if (bio_list_empty(&waiting))
2681 		return;
2682 
2683 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2684 			   bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2685 			   BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2686 
2687 	while ((bio = bio_list_pop(&waiting))) {
2688 		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2689 
2690 		block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2691 				dio->range.n_sectors, BITMAP_OP_SET);
2692 
2693 		remove_range(ic, &dio->range);
2694 		INIT_WORK(&dio->work, integrity_bio_wait);
2695 		queue_work(ic->offload_wq, &dio->work);
2696 	}
2697 
2698 	queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2699 }
2700 
bitmap_flush_work(struct work_struct *work)2701 static void bitmap_flush_work(struct work_struct *work)
2702 {
2703 	struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2704 	struct dm_integrity_range range;
2705 	unsigned long limit;
2706 	struct bio *bio;
2707 
2708 	dm_integrity_flush_buffers(ic, false);
2709 
2710 	range.logical_sector = 0;
2711 	range.n_sectors = ic->provided_data_sectors;
2712 
2713 	spin_lock_irq(&ic->endio_wait.lock);
2714 	add_new_range_and_wait(ic, &range);
2715 	spin_unlock_irq(&ic->endio_wait.lock);
2716 
2717 	dm_integrity_flush_buffers(ic, true);
2718 
2719 	limit = ic->provided_data_sectors;
2720 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2721 		limit = le64_to_cpu(ic->sb->recalc_sector)
2722 			>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2723 			<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2724 	}
2725 	/*DEBUG_print("zeroing journal\n");*/
2726 	block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2727 	block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2728 
2729 	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2730 			   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2731 
2732 	spin_lock_irq(&ic->endio_wait.lock);
2733 	remove_range_unlocked(ic, &range);
2734 	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2735 		bio_endio(bio);
2736 		spin_unlock_irq(&ic->endio_wait.lock);
2737 		spin_lock_irq(&ic->endio_wait.lock);
2738 	}
2739 	spin_unlock_irq(&ic->endio_wait.lock);
2740 }
2741 
2742 
init_journal(struct dm_integrity_c *ic, unsigned start_section, unsigned n_sections, unsigned char commit_seq)2743 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2744 			 unsigned n_sections, unsigned char commit_seq)
2745 {
2746 	unsigned i, j, n;
2747 
2748 	if (!n_sections)
2749 		return;
2750 
2751 	for (n = 0; n < n_sections; n++) {
2752 		i = start_section + n;
2753 		wraparound_section(ic, &i);
2754 		for (j = 0; j < ic->journal_section_sectors; j++) {
2755 			struct journal_sector *js = access_journal(ic, i, j);
2756 			memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2757 			js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2758 		}
2759 		for (j = 0; j < ic->journal_section_entries; j++) {
2760 			struct journal_entry *je = access_journal_entry(ic, i, j);
2761 			journal_entry_set_unused(je);
2762 		}
2763 	}
2764 
2765 	write_journal(ic, start_section, n_sections);
2766 }
2767 
find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)2768 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2769 {
2770 	unsigned char k;
2771 	for (k = 0; k < N_COMMIT_IDS; k++) {
2772 		if (dm_integrity_commit_id(ic, i, j, k) == id)
2773 			return k;
2774 	}
2775 	dm_integrity_io_error(ic, "journal commit id", -EIO);
2776 	return -EIO;
2777 }
2778 
replay_journal(struct dm_integrity_c *ic)2779 static void replay_journal(struct dm_integrity_c *ic)
2780 {
2781 	unsigned i, j;
2782 	bool used_commit_ids[N_COMMIT_IDS];
2783 	unsigned max_commit_id_sections[N_COMMIT_IDS];
2784 	unsigned write_start, write_sections;
2785 	unsigned continue_section;
2786 	bool journal_empty;
2787 	unsigned char unused, last_used, want_commit_seq;
2788 
2789 	if (ic->mode == 'R')
2790 		return;
2791 
2792 	if (ic->journal_uptodate)
2793 		return;
2794 
2795 	last_used = 0;
2796 	write_start = 0;
2797 
2798 	if (!ic->just_formatted) {
2799 		DEBUG_print("reading journal\n");
2800 		rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2801 		if (ic->journal_io)
2802 			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2803 		if (ic->journal_io) {
2804 			struct journal_completion crypt_comp;
2805 			crypt_comp.ic = ic;
2806 			init_completion(&crypt_comp.comp);
2807 			crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2808 			encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2809 			wait_for_completion(&crypt_comp.comp);
2810 		}
2811 		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2812 	}
2813 
2814 	if (dm_integrity_failed(ic))
2815 		goto clear_journal;
2816 
2817 	journal_empty = true;
2818 	memset(used_commit_ids, 0, sizeof used_commit_ids);
2819 	memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2820 	for (i = 0; i < ic->journal_sections; i++) {
2821 		for (j = 0; j < ic->journal_section_sectors; j++) {
2822 			int k;
2823 			struct journal_sector *js = access_journal(ic, i, j);
2824 			k = find_commit_seq(ic, i, j, js->commit_id);
2825 			if (k < 0)
2826 				goto clear_journal;
2827 			used_commit_ids[k] = true;
2828 			max_commit_id_sections[k] = i;
2829 		}
2830 		if (journal_empty) {
2831 			for (j = 0; j < ic->journal_section_entries; j++) {
2832 				struct journal_entry *je = access_journal_entry(ic, i, j);
2833 				if (!journal_entry_is_unused(je)) {
2834 					journal_empty = false;
2835 					break;
2836 				}
2837 			}
2838 		}
2839 	}
2840 
2841 	if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2842 		unused = N_COMMIT_IDS - 1;
2843 		while (unused && !used_commit_ids[unused - 1])
2844 			unused--;
2845 	} else {
2846 		for (unused = 0; unused < N_COMMIT_IDS; unused++)
2847 			if (!used_commit_ids[unused])
2848 				break;
2849 		if (unused == N_COMMIT_IDS) {
2850 			dm_integrity_io_error(ic, "journal commit ids", -EIO);
2851 			goto clear_journal;
2852 		}
2853 	}
2854 	DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2855 		    unused, used_commit_ids[0], used_commit_ids[1],
2856 		    used_commit_ids[2], used_commit_ids[3]);
2857 
2858 	last_used = prev_commit_seq(unused);
2859 	want_commit_seq = prev_commit_seq(last_used);
2860 
2861 	if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2862 		journal_empty = true;
2863 
2864 	write_start = max_commit_id_sections[last_used] + 1;
2865 	if (unlikely(write_start >= ic->journal_sections))
2866 		want_commit_seq = next_commit_seq(want_commit_seq);
2867 	wraparound_section(ic, &write_start);
2868 
2869 	i = write_start;
2870 	for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2871 		for (j = 0; j < ic->journal_section_sectors; j++) {
2872 			struct journal_sector *js = access_journal(ic, i, j);
2873 
2874 			if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2875 				/*
2876 				 * This could be caused by crash during writing.
2877 				 * We won't replay the inconsistent part of the
2878 				 * journal.
2879 				 */
2880 				DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2881 					    i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2882 				goto brk;
2883 			}
2884 		}
2885 		i++;
2886 		if (unlikely(i >= ic->journal_sections))
2887 			want_commit_seq = next_commit_seq(want_commit_seq);
2888 		wraparound_section(ic, &i);
2889 	}
2890 brk:
2891 
2892 	if (!journal_empty) {
2893 		DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2894 			    write_sections, write_start, want_commit_seq);
2895 		do_journal_write(ic, write_start, write_sections, true);
2896 	}
2897 
2898 	if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2899 		continue_section = write_start;
2900 		ic->commit_seq = want_commit_seq;
2901 		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2902 	} else {
2903 		unsigned s;
2904 		unsigned char erase_seq;
2905 clear_journal:
2906 		DEBUG_print("clearing journal\n");
2907 
2908 		erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2909 		s = write_start;
2910 		init_journal(ic, s, 1, erase_seq);
2911 		s++;
2912 		wraparound_section(ic, &s);
2913 		if (ic->journal_sections >= 2) {
2914 			init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2915 			s += ic->journal_sections - 2;
2916 			wraparound_section(ic, &s);
2917 			init_journal(ic, s, 1, erase_seq);
2918 		}
2919 
2920 		continue_section = 0;
2921 		ic->commit_seq = next_commit_seq(erase_seq);
2922 	}
2923 
2924 	ic->committed_section = continue_section;
2925 	ic->n_committed_sections = 0;
2926 
2927 	ic->uncommitted_section = continue_section;
2928 	ic->n_uncommitted_sections = 0;
2929 
2930 	ic->free_section = continue_section;
2931 	ic->free_section_entry = 0;
2932 	ic->free_sectors = ic->journal_entries;
2933 
2934 	ic->journal_tree_root = RB_ROOT;
2935 	for (i = 0; i < ic->journal_entries; i++)
2936 		init_journal_node(&ic->journal_tree[i]);
2937 }
2938 
dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)2939 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2940 {
2941 	DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2942 
2943 	if (ic->mode == 'B') {
2944 		ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2945 		ic->synchronous_mode = 1;
2946 
2947 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2948 		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2949 		flush_workqueue(ic->commit_wq);
2950 	}
2951 }
2952 
dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)2953 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2954 {
2955 	struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2956 
2957 	DEBUG_print("dm_integrity_reboot\n");
2958 
2959 	dm_integrity_enter_synchronous_mode(ic);
2960 
2961 	return NOTIFY_DONE;
2962 }
2963 
dm_integrity_postsuspend(struct dm_target *ti)2964 static void dm_integrity_postsuspend(struct dm_target *ti)
2965 {
2966 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2967 	int r;
2968 
2969 	WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2970 
2971 	del_timer_sync(&ic->autocommit_timer);
2972 
2973 	if (ic->recalc_wq)
2974 		drain_workqueue(ic->recalc_wq);
2975 
2976 	if (ic->mode == 'B')
2977 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2978 
2979 	queue_work(ic->commit_wq, &ic->commit_work);
2980 	drain_workqueue(ic->commit_wq);
2981 
2982 	if (ic->mode == 'J') {
2983 		queue_work(ic->writer_wq, &ic->writer_work);
2984 		drain_workqueue(ic->writer_wq);
2985 		dm_integrity_flush_buffers(ic, true);
2986 		if (ic->wrote_to_journal) {
2987 			init_journal(ic, ic->free_section,
2988 				     ic->journal_sections - ic->free_section, ic->commit_seq);
2989 			if (ic->free_section) {
2990 				init_journal(ic, 0, ic->free_section,
2991 					     next_commit_seq(ic->commit_seq));
2992 			}
2993 		}
2994 	}
2995 
2996 	if (ic->mode == 'B') {
2997 		dm_integrity_flush_buffers(ic, true);
2998 #if 1
2999 		/* set to 0 to test bitmap replay code */
3000 		init_journal(ic, 0, ic->journal_sections, 0);
3001 		ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3002 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3003 		if (unlikely(r))
3004 			dm_integrity_io_error(ic, "writing superblock", r);
3005 #endif
3006 	}
3007 
3008 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3009 
3010 	ic->journal_uptodate = true;
3011 }
3012 
dm_integrity_resume(struct dm_target *ti)3013 static void dm_integrity_resume(struct dm_target *ti)
3014 {
3015 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3016 	__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3017 	int r;
3018 
3019 	DEBUG_print("resume\n");
3020 
3021 	ic->wrote_to_journal = false;
3022 
3023 	if (ic->provided_data_sectors != old_provided_data_sectors) {
3024 		if (ic->provided_data_sectors > old_provided_data_sectors &&
3025 		    ic->mode == 'B' &&
3026 		    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3027 			rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3028 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3029 			block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3030 					ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3031 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3032 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3033 		}
3034 
3035 		ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3036 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3037 		if (unlikely(r))
3038 			dm_integrity_io_error(ic, "writing superblock", r);
3039 	}
3040 
3041 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3042 		DEBUG_print("resume dirty_bitmap\n");
3043 		rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3044 				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3045 		if (ic->mode == 'B') {
3046 			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3047 				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3048 				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3049 				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3050 						     BITMAP_OP_TEST_ALL_CLEAR)) {
3051 					ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3052 					ic->sb->recalc_sector = cpu_to_le64(0);
3053 				}
3054 			} else {
3055 				DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3056 					    ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3057 				ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3058 				block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3059 				block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3060 				block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3061 				rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3062 						   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3063 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3064 				ic->sb->recalc_sector = cpu_to_le64(0);
3065 			}
3066 		} else {
3067 			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3068 			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3069 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3070 				ic->sb->recalc_sector = cpu_to_le64(0);
3071 			}
3072 			init_journal(ic, 0, ic->journal_sections, 0);
3073 			replay_journal(ic);
3074 			ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3075 		}
3076 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3077 		if (unlikely(r))
3078 			dm_integrity_io_error(ic, "writing superblock", r);
3079 	} else {
3080 		replay_journal(ic);
3081 		if (ic->mode == 'B') {
3082 			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3083 			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3084 			r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3085 			if (unlikely(r))
3086 				dm_integrity_io_error(ic, "writing superblock", r);
3087 
3088 			block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3089 			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3090 			block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3091 			if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3092 			    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3093 				block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3094 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3095 				block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3096 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3097 				block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3098 						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3099 			}
3100 			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3101 					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3102 		}
3103 	}
3104 
3105 	DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3106 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3107 		__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3108 		DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3109 		if (recalc_pos < ic->provided_data_sectors) {
3110 			queue_work(ic->recalc_wq, &ic->recalc_work);
3111 		} else if (recalc_pos > ic->provided_data_sectors) {
3112 			ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3113 			recalc_write_super(ic);
3114 		}
3115 	}
3116 
3117 	ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3118 	ic->reboot_notifier.next = NULL;
3119 	ic->reboot_notifier.priority = INT_MAX - 1;	/* be notified after md and before hardware drivers */
3120 	WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3121 
3122 #if 0
3123 	/* set to 1 to stress test synchronous mode */
3124 	dm_integrity_enter_synchronous_mode(ic);
3125 #endif
3126 }
3127 
dm_integrity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen)3128 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3129 				unsigned status_flags, char *result, unsigned maxlen)
3130 {
3131 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3132 	unsigned arg_count;
3133 	size_t sz = 0;
3134 
3135 	switch (type) {
3136 	case STATUSTYPE_INFO:
3137 		DMEMIT("%llu %llu",
3138 			(unsigned long long)atomic64_read(&ic->number_of_mismatches),
3139 			ic->provided_data_sectors);
3140 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3141 			DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3142 		else
3143 			DMEMIT(" -");
3144 		break;
3145 
3146 	case STATUSTYPE_TABLE: {
3147 		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3148 		watermark_percentage += ic->journal_entries / 2;
3149 		do_div(watermark_percentage, ic->journal_entries);
3150 		arg_count = 3;
3151 		arg_count += !!ic->meta_dev;
3152 		arg_count += ic->sectors_per_block != 1;
3153 		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3154 		arg_count += ic->discard;
3155 		arg_count += ic->mode == 'J';
3156 		arg_count += ic->mode == 'J';
3157 		arg_count += ic->mode == 'B';
3158 		arg_count += ic->mode == 'B';
3159 		arg_count += !!ic->internal_hash_alg.alg_string;
3160 		arg_count += !!ic->journal_crypt_alg.alg_string;
3161 		arg_count += !!ic->journal_mac_alg.alg_string;
3162 		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3163 		arg_count += ic->legacy_recalculate;
3164 		DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3165 		       ic->tag_size, ic->mode, arg_count);
3166 		if (ic->meta_dev)
3167 			DMEMIT(" meta_device:%s", ic->meta_dev->name);
3168 		if (ic->sectors_per_block != 1)
3169 			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3170 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3171 			DMEMIT(" recalculate");
3172 		if (ic->discard)
3173 			DMEMIT(" allow_discards");
3174 		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3175 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3176 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3177 		if (ic->mode == 'J') {
3178 			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3179 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
3180 		}
3181 		if (ic->mode == 'B') {
3182 			DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3183 			DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3184 		}
3185 		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3186 			DMEMIT(" fix_padding");
3187 		if (ic->legacy_recalculate)
3188 			DMEMIT(" legacy_recalculate");
3189 
3190 #define EMIT_ALG(a, n)							\
3191 		do {							\
3192 			if (ic->a.alg_string) {				\
3193 				DMEMIT(" %s:%s", n, ic->a.alg_string);	\
3194 				if (ic->a.key_string)			\
3195 					DMEMIT(":%s", ic->a.key_string);\
3196 			}						\
3197 		} while (0)
3198 		EMIT_ALG(internal_hash_alg, "internal_hash");
3199 		EMIT_ALG(journal_crypt_alg, "journal_crypt");
3200 		EMIT_ALG(journal_mac_alg, "journal_mac");
3201 		break;
3202 	}
3203 	}
3204 }
3205 
dm_integrity_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)3206 static int dm_integrity_iterate_devices(struct dm_target *ti,
3207 					iterate_devices_callout_fn fn, void *data)
3208 {
3209 	struct dm_integrity_c *ic = ti->private;
3210 
3211 	if (!ic->meta_dev)
3212 		return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3213 	else
3214 		return fn(ti, ic->dev, 0, ti->len, data);
3215 }
3216 
dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)3217 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3218 {
3219 	struct dm_integrity_c *ic = ti->private;
3220 
3221 	if (ic->sectors_per_block > 1) {
3222 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3223 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3224 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3225 	}
3226 }
3227 
calculate_journal_section_size(struct dm_integrity_c *ic)3228 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3229 {
3230 	unsigned sector_space = JOURNAL_SECTOR_DATA;
3231 
3232 	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3233 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3234 					 JOURNAL_ENTRY_ROUNDUP);
3235 
3236 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3237 		sector_space -= JOURNAL_MAC_PER_SECTOR;
3238 	ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3239 	ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3240 	ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3241 	ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3242 }
3243 
calculate_device_limits(struct dm_integrity_c *ic)3244 static int calculate_device_limits(struct dm_integrity_c *ic)
3245 {
3246 	__u64 initial_sectors;
3247 
3248 	calculate_journal_section_size(ic);
3249 	initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3250 	if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3251 		return -EINVAL;
3252 	ic->initial_sectors = initial_sectors;
3253 
3254 	if (!ic->meta_dev) {
3255 		sector_t last_sector, last_area, last_offset;
3256 
3257 		/* we have to maintain excessive padding for compatibility with existing volumes */
3258 		__u64 metadata_run_padding =
3259 			ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3260 			(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3261 			(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3262 
3263 		ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3264 					    metadata_run_padding) >> SECTOR_SHIFT;
3265 		if (!(ic->metadata_run & (ic->metadata_run - 1)))
3266 			ic->log2_metadata_run = __ffs(ic->metadata_run);
3267 		else
3268 			ic->log2_metadata_run = -1;
3269 
3270 		get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3271 		last_sector = get_data_sector(ic, last_area, last_offset);
3272 		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3273 			return -EINVAL;
3274 	} else {
3275 		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3276 		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3277 				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3278 		meta_size <<= ic->log2_buffer_sectors;
3279 		if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3280 		    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3281 			return -EINVAL;
3282 		ic->metadata_run = 1;
3283 		ic->log2_metadata_run = 0;
3284 	}
3285 
3286 	return 0;
3287 }
3288 
get_provided_data_sectors(struct dm_integrity_c *ic)3289 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3290 {
3291 	if (!ic->meta_dev) {
3292 		int test_bit;
3293 		ic->provided_data_sectors = 0;
3294 		for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3295 			__u64 prev_data_sectors = ic->provided_data_sectors;
3296 
3297 			ic->provided_data_sectors |= (sector_t)1 << test_bit;
3298 			if (calculate_device_limits(ic))
3299 				ic->provided_data_sectors = prev_data_sectors;
3300 		}
3301 	} else {
3302 		ic->provided_data_sectors = ic->data_device_sectors;
3303 		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3304 	}
3305 }
3306 
initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)3307 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3308 {
3309 	unsigned journal_sections;
3310 	int test_bit;
3311 
3312 	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3313 	memcpy(ic->sb->magic, SB_MAGIC, 8);
3314 	ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3315 	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3316 	if (ic->journal_mac_alg.alg_string)
3317 		ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3318 
3319 	calculate_journal_section_size(ic);
3320 	journal_sections = journal_sectors / ic->journal_section_sectors;
3321 	if (!journal_sections)
3322 		journal_sections = 1;
3323 
3324 	if (!ic->meta_dev) {
3325 		if (ic->fix_padding)
3326 			ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3327 		ic->sb->journal_sections = cpu_to_le32(journal_sections);
3328 		if (!interleave_sectors)
3329 			interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3330 		ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3331 		ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3332 		ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3333 
3334 		get_provided_data_sectors(ic);
3335 		if (!ic->provided_data_sectors)
3336 			return -EINVAL;
3337 	} else {
3338 		ic->sb->log2_interleave_sectors = 0;
3339 
3340 		get_provided_data_sectors(ic);
3341 		if (!ic->provided_data_sectors)
3342 			return -EINVAL;
3343 
3344 try_smaller_buffer:
3345 		ic->sb->journal_sections = cpu_to_le32(0);
3346 		for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3347 			__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3348 			__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3349 			if (test_journal_sections > journal_sections)
3350 				continue;
3351 			ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3352 			if (calculate_device_limits(ic))
3353 				ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3354 
3355 		}
3356 		if (!le32_to_cpu(ic->sb->journal_sections)) {
3357 			if (ic->log2_buffer_sectors > 3) {
3358 				ic->log2_buffer_sectors--;
3359 				goto try_smaller_buffer;
3360 			}
3361 			return -EINVAL;
3362 		}
3363 	}
3364 
3365 	ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3366 
3367 	sb_set_version(ic);
3368 
3369 	return 0;
3370 }
3371 
dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)3372 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3373 {
3374 	struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3375 	struct blk_integrity bi;
3376 
3377 	memset(&bi, 0, sizeof(bi));
3378 	bi.profile = &dm_integrity_profile;
3379 	bi.tuple_size = ic->tag_size;
3380 	bi.tag_size = bi.tuple_size;
3381 	bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3382 
3383 	blk_integrity_register(disk, &bi);
3384 	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3385 }
3386 
dm_integrity_free_page_list(struct page_list *pl)3387 static void dm_integrity_free_page_list(struct page_list *pl)
3388 {
3389 	unsigned i;
3390 
3391 	if (!pl)
3392 		return;
3393 	for (i = 0; pl[i].page; i++)
3394 		__free_page(pl[i].page);
3395 	kvfree(pl);
3396 }
3397 
dm_integrity_alloc_page_list(unsigned n_pages)3398 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3399 {
3400 	struct page_list *pl;
3401 	unsigned i;
3402 
3403 	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3404 	if (!pl)
3405 		return NULL;
3406 
3407 	for (i = 0; i < n_pages; i++) {
3408 		pl[i].page = alloc_page(GFP_KERNEL);
3409 		if (!pl[i].page) {
3410 			dm_integrity_free_page_list(pl);
3411 			return NULL;
3412 		}
3413 		if (i)
3414 			pl[i - 1].next = &pl[i];
3415 	}
3416 	pl[i].page = NULL;
3417 	pl[i].next = NULL;
3418 
3419 	return pl;
3420 }
3421 
dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)3422 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3423 {
3424 	unsigned i;
3425 	for (i = 0; i < ic->journal_sections; i++)
3426 		kvfree(sl[i]);
3427 	kvfree(sl);
3428 }
3429 
dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)3430 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3431 								   struct page_list *pl)
3432 {
3433 	struct scatterlist **sl;
3434 	unsigned i;
3435 
3436 	sl = kvmalloc_array(ic->journal_sections,
3437 			    sizeof(struct scatterlist *),
3438 			    GFP_KERNEL | __GFP_ZERO);
3439 	if (!sl)
3440 		return NULL;
3441 
3442 	for (i = 0; i < ic->journal_sections; i++) {
3443 		struct scatterlist *s;
3444 		unsigned start_index, start_offset;
3445 		unsigned end_index, end_offset;
3446 		unsigned n_pages;
3447 		unsigned idx;
3448 
3449 		page_list_location(ic, i, 0, &start_index, &start_offset);
3450 		page_list_location(ic, i, ic->journal_section_sectors - 1,
3451 				   &end_index, &end_offset);
3452 
3453 		n_pages = (end_index - start_index + 1);
3454 
3455 		s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3456 				   GFP_KERNEL);
3457 		if (!s) {
3458 			dm_integrity_free_journal_scatterlist(ic, sl);
3459 			return NULL;
3460 		}
3461 
3462 		sg_init_table(s, n_pages);
3463 		for (idx = start_index; idx <= end_index; idx++) {
3464 			char *va = lowmem_page_address(pl[idx].page);
3465 			unsigned start = 0, end = PAGE_SIZE;
3466 			if (idx == start_index)
3467 				start = start_offset;
3468 			if (idx == end_index)
3469 				end = end_offset + (1 << SECTOR_SHIFT);
3470 			sg_set_buf(&s[idx - start_index], va + start, end - start);
3471 		}
3472 
3473 		sl[i] = s;
3474 	}
3475 
3476 	return sl;
3477 }
3478 
free_alg(struct alg_spec *a)3479 static void free_alg(struct alg_spec *a)
3480 {
3481 	kfree_sensitive(a->alg_string);
3482 	kfree_sensitive(a->key);
3483 	memset(a, 0, sizeof *a);
3484 }
3485 
get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)3486 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3487 {
3488 	char *k;
3489 
3490 	free_alg(a);
3491 
3492 	a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3493 	if (!a->alg_string)
3494 		goto nomem;
3495 
3496 	k = strchr(a->alg_string, ':');
3497 	if (k) {
3498 		*k = 0;
3499 		a->key_string = k + 1;
3500 		if (strlen(a->key_string) & 1)
3501 			goto inval;
3502 
3503 		a->key_size = strlen(a->key_string) / 2;
3504 		a->key = kmalloc(a->key_size, GFP_KERNEL);
3505 		if (!a->key)
3506 			goto nomem;
3507 		if (hex2bin(a->key, a->key_string, a->key_size))
3508 			goto inval;
3509 	}
3510 
3511 	return 0;
3512 inval:
3513 	*error = error_inval;
3514 	return -EINVAL;
3515 nomem:
3516 	*error = "Out of memory for an argument";
3517 	return -ENOMEM;
3518 }
3519 
get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, char *error_alg, char *error_key)3520 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3521 		   char *error_alg, char *error_key)
3522 {
3523 	int r;
3524 
3525 	if (a->alg_string) {
3526 		*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3527 		if (IS_ERR(*hash)) {
3528 			*error = error_alg;
3529 			r = PTR_ERR(*hash);
3530 			*hash = NULL;
3531 			return r;
3532 		}
3533 
3534 		if (a->key) {
3535 			r = crypto_shash_setkey(*hash, a->key, a->key_size);
3536 			if (r) {
3537 				*error = error_key;
3538 				return r;
3539 			}
3540 		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3541 			*error = error_key;
3542 			return -ENOKEY;
3543 		}
3544 	}
3545 
3546 	return 0;
3547 }
3548 
create_journal(struct dm_integrity_c *ic, char **error)3549 static int create_journal(struct dm_integrity_c *ic, char **error)
3550 {
3551 	int r = 0;
3552 	unsigned i;
3553 	__u64 journal_pages, journal_desc_size, journal_tree_size;
3554 	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3555 	struct skcipher_request *req = NULL;
3556 
3557 	ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3558 	ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3559 	ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3560 	ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3561 
3562 	journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3563 				PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3564 	journal_desc_size = journal_pages * sizeof(struct page_list);
3565 	if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3566 		*error = "Journal doesn't fit into memory";
3567 		r = -ENOMEM;
3568 		goto bad;
3569 	}
3570 	ic->journal_pages = journal_pages;
3571 
3572 	ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3573 	if (!ic->journal) {
3574 		*error = "Could not allocate memory for journal";
3575 		r = -ENOMEM;
3576 		goto bad;
3577 	}
3578 	if (ic->journal_crypt_alg.alg_string) {
3579 		unsigned ivsize, blocksize;
3580 		struct journal_completion comp;
3581 
3582 		comp.ic = ic;
3583 		ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3584 		if (IS_ERR(ic->journal_crypt)) {
3585 			*error = "Invalid journal cipher";
3586 			r = PTR_ERR(ic->journal_crypt);
3587 			ic->journal_crypt = NULL;
3588 			goto bad;
3589 		}
3590 		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3591 		blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3592 
3593 		if (ic->journal_crypt_alg.key) {
3594 			r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3595 						   ic->journal_crypt_alg.key_size);
3596 			if (r) {
3597 				*error = "Error setting encryption key";
3598 				goto bad;
3599 			}
3600 		}
3601 		DEBUG_print("cipher %s, block size %u iv size %u\n",
3602 			    ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3603 
3604 		ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3605 		if (!ic->journal_io) {
3606 			*error = "Could not allocate memory for journal io";
3607 			r = -ENOMEM;
3608 			goto bad;
3609 		}
3610 
3611 		if (blocksize == 1) {
3612 			struct scatterlist *sg;
3613 
3614 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3615 			if (!req) {
3616 				*error = "Could not allocate crypt request";
3617 				r = -ENOMEM;
3618 				goto bad;
3619 			}
3620 
3621 			crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3622 			if (!crypt_iv) {
3623 				*error = "Could not allocate iv";
3624 				r = -ENOMEM;
3625 				goto bad;
3626 			}
3627 
3628 			ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3629 			if (!ic->journal_xor) {
3630 				*error = "Could not allocate memory for journal xor";
3631 				r = -ENOMEM;
3632 				goto bad;
3633 			}
3634 
3635 			sg = kvmalloc_array(ic->journal_pages + 1,
3636 					    sizeof(struct scatterlist),
3637 					    GFP_KERNEL);
3638 			if (!sg) {
3639 				*error = "Unable to allocate sg list";
3640 				r = -ENOMEM;
3641 				goto bad;
3642 			}
3643 			sg_init_table(sg, ic->journal_pages + 1);
3644 			for (i = 0; i < ic->journal_pages; i++) {
3645 				char *va = lowmem_page_address(ic->journal_xor[i].page);
3646 				clear_page(va);
3647 				sg_set_buf(&sg[i], va, PAGE_SIZE);
3648 			}
3649 			sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3650 
3651 			skcipher_request_set_crypt(req, sg, sg,
3652 						   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3653 			init_completion(&comp.comp);
3654 			comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3655 			if (do_crypt(true, req, &comp))
3656 				wait_for_completion(&comp.comp);
3657 			kvfree(sg);
3658 			r = dm_integrity_failed(ic);
3659 			if (r) {
3660 				*error = "Unable to encrypt journal";
3661 				goto bad;
3662 			}
3663 			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3664 
3665 			crypto_free_skcipher(ic->journal_crypt);
3666 			ic->journal_crypt = NULL;
3667 		} else {
3668 			unsigned crypt_len = roundup(ivsize, blocksize);
3669 
3670 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3671 			if (!req) {
3672 				*error = "Could not allocate crypt request";
3673 				r = -ENOMEM;
3674 				goto bad;
3675 			}
3676 
3677 			crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3678 			if (!crypt_iv) {
3679 				*error = "Could not allocate iv";
3680 				r = -ENOMEM;
3681 				goto bad;
3682 			}
3683 
3684 			crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3685 			if (!crypt_data) {
3686 				*error = "Unable to allocate crypt data";
3687 				r = -ENOMEM;
3688 				goto bad;
3689 			}
3690 
3691 			ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3692 			if (!ic->journal_scatterlist) {
3693 				*error = "Unable to allocate sg list";
3694 				r = -ENOMEM;
3695 				goto bad;
3696 			}
3697 			ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3698 			if (!ic->journal_io_scatterlist) {
3699 				*error = "Unable to allocate sg list";
3700 				r = -ENOMEM;
3701 				goto bad;
3702 			}
3703 			ic->sk_requests = kvmalloc_array(ic->journal_sections,
3704 							 sizeof(struct skcipher_request *),
3705 							 GFP_KERNEL | __GFP_ZERO);
3706 			if (!ic->sk_requests) {
3707 				*error = "Unable to allocate sk requests";
3708 				r = -ENOMEM;
3709 				goto bad;
3710 			}
3711 			for (i = 0; i < ic->journal_sections; i++) {
3712 				struct scatterlist sg;
3713 				struct skcipher_request *section_req;
3714 				__u32 section_le = cpu_to_le32(i);
3715 
3716 				memset(crypt_iv, 0x00, ivsize);
3717 				memset(crypt_data, 0x00, crypt_len);
3718 				memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3719 
3720 				sg_init_one(&sg, crypt_data, crypt_len);
3721 				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3722 				init_completion(&comp.comp);
3723 				comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3724 				if (do_crypt(true, req, &comp))
3725 					wait_for_completion(&comp.comp);
3726 
3727 				r = dm_integrity_failed(ic);
3728 				if (r) {
3729 					*error = "Unable to generate iv";
3730 					goto bad;
3731 				}
3732 
3733 				section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3734 				if (!section_req) {
3735 					*error = "Unable to allocate crypt request";
3736 					r = -ENOMEM;
3737 					goto bad;
3738 				}
3739 				section_req->iv = kmalloc_array(ivsize, 2,
3740 								GFP_KERNEL);
3741 				if (!section_req->iv) {
3742 					skcipher_request_free(section_req);
3743 					*error = "Unable to allocate iv";
3744 					r = -ENOMEM;
3745 					goto bad;
3746 				}
3747 				memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3748 				section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3749 				ic->sk_requests[i] = section_req;
3750 				DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3751 			}
3752 		}
3753 	}
3754 
3755 	for (i = 0; i < N_COMMIT_IDS; i++) {
3756 		unsigned j;
3757 retest_commit_id:
3758 		for (j = 0; j < i; j++) {
3759 			if (ic->commit_ids[j] == ic->commit_ids[i]) {
3760 				ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3761 				goto retest_commit_id;
3762 			}
3763 		}
3764 		DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3765 	}
3766 
3767 	journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3768 	if (journal_tree_size > ULONG_MAX) {
3769 		*error = "Journal doesn't fit into memory";
3770 		r = -ENOMEM;
3771 		goto bad;
3772 	}
3773 	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3774 	if (!ic->journal_tree) {
3775 		*error = "Could not allocate memory for journal tree";
3776 		r = -ENOMEM;
3777 	}
3778 bad:
3779 	kfree(crypt_data);
3780 	kfree(crypt_iv);
3781 	skcipher_request_free(req);
3782 
3783 	return r;
3784 }
3785 
3786 /*
3787  * Construct a integrity mapping
3788  *
3789  * Arguments:
3790  *	device
3791  *	offset from the start of the device
3792  *	tag size
3793  *	D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3794  *	number of optional arguments
3795  *	optional arguments:
3796  *		journal_sectors
3797  *		interleave_sectors
3798  *		buffer_sectors
3799  *		journal_watermark
3800  *		commit_time
3801  *		meta_device
3802  *		block_size
3803  *		sectors_per_bit
3804  *		bitmap_flush_interval
3805  *		internal_hash
3806  *		journal_crypt
3807  *		journal_mac
3808  *		recalculate
3809  */
dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)3810 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3811 {
3812 	struct dm_integrity_c *ic;
3813 	char dummy;
3814 	int r;
3815 	unsigned extra_args;
3816 	struct dm_arg_set as;
3817 	static const struct dm_arg _args[] = {
3818 		{0, 16, "Invalid number of feature args"},
3819 	};
3820 	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3821 	bool should_write_sb;
3822 	__u64 threshold;
3823 	unsigned long long start;
3824 	__s8 log2_sectors_per_bitmap_bit = -1;
3825 	__s8 log2_blocks_per_bitmap_bit;
3826 	__u64 bits_in_journal;
3827 	__u64 n_bitmap_bits;
3828 
3829 #define DIRECT_ARGUMENTS	4
3830 
3831 	if (argc <= DIRECT_ARGUMENTS) {
3832 		ti->error = "Invalid argument count";
3833 		return -EINVAL;
3834 	}
3835 
3836 	ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3837 	if (!ic) {
3838 		ti->error = "Cannot allocate integrity context";
3839 		return -ENOMEM;
3840 	}
3841 	ti->private = ic;
3842 	ti->per_io_data_size = sizeof(struct dm_integrity_io);
3843 	ic->ti = ti;
3844 
3845 	ic->in_progress = RB_ROOT;
3846 	INIT_LIST_HEAD(&ic->wait_list);
3847 	init_waitqueue_head(&ic->endio_wait);
3848 	bio_list_init(&ic->flush_bio_list);
3849 	init_waitqueue_head(&ic->copy_to_journal_wait);
3850 	init_completion(&ic->crypto_backoff);
3851 	atomic64_set(&ic->number_of_mismatches, 0);
3852 	ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3853 
3854 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3855 	if (r) {
3856 		ti->error = "Device lookup failed";
3857 		goto bad;
3858 	}
3859 
3860 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3861 		ti->error = "Invalid starting offset";
3862 		r = -EINVAL;
3863 		goto bad;
3864 	}
3865 	ic->start = start;
3866 
3867 	if (strcmp(argv[2], "-")) {
3868 		if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3869 			ti->error = "Invalid tag size";
3870 			r = -EINVAL;
3871 			goto bad;
3872 		}
3873 	}
3874 
3875 	if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3876 	    !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3877 		ic->mode = argv[3][0];
3878 	} else {
3879 		ti->error = "Invalid mode (expecting J, B, D, R)";
3880 		r = -EINVAL;
3881 		goto bad;
3882 	}
3883 
3884 	journal_sectors = 0;
3885 	interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3886 	buffer_sectors = DEFAULT_BUFFER_SECTORS;
3887 	journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3888 	sync_msec = DEFAULT_SYNC_MSEC;
3889 	ic->sectors_per_block = 1;
3890 
3891 	as.argc = argc - DIRECT_ARGUMENTS;
3892 	as.argv = argv + DIRECT_ARGUMENTS;
3893 	r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3894 	if (r)
3895 		goto bad;
3896 
3897 	while (extra_args--) {
3898 		const char *opt_string;
3899 		unsigned val;
3900 		unsigned long long llval;
3901 		opt_string = dm_shift_arg(&as);
3902 		if (!opt_string) {
3903 			r = -EINVAL;
3904 			ti->error = "Not enough feature arguments";
3905 			goto bad;
3906 		}
3907 		if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3908 			journal_sectors = val ? val : 1;
3909 		else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3910 			interleave_sectors = val;
3911 		else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3912 			buffer_sectors = val;
3913 		else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3914 			journal_watermark = val;
3915 		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3916 			sync_msec = val;
3917 		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3918 			if (ic->meta_dev) {
3919 				dm_put_device(ti, ic->meta_dev);
3920 				ic->meta_dev = NULL;
3921 			}
3922 			r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3923 					  dm_table_get_mode(ti->table), &ic->meta_dev);
3924 			if (r) {
3925 				ti->error = "Device lookup failed";
3926 				goto bad;
3927 			}
3928 		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3929 			if (val < 1 << SECTOR_SHIFT ||
3930 			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3931 			    (val & (val -1))) {
3932 				r = -EINVAL;
3933 				ti->error = "Invalid block_size argument";
3934 				goto bad;
3935 			}
3936 			ic->sectors_per_block = val >> SECTOR_SHIFT;
3937 		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3938 			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3939 		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3940 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3941 				r = -EINVAL;
3942 				ti->error = "Invalid bitmap_flush_interval argument";
3943 				goto bad;
3944 			}
3945 			ic->bitmap_flush_interval = msecs_to_jiffies(val);
3946 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3947 			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3948 					    "Invalid internal_hash argument");
3949 			if (r)
3950 				goto bad;
3951 		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3952 			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3953 					    "Invalid journal_crypt argument");
3954 			if (r)
3955 				goto bad;
3956 		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3957 			r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3958 					    "Invalid journal_mac argument");
3959 			if (r)
3960 				goto bad;
3961 		} else if (!strcmp(opt_string, "recalculate")) {
3962 			ic->recalculate_flag = true;
3963 		} else if (!strcmp(opt_string, "allow_discards")) {
3964 			ic->discard = true;
3965 		} else if (!strcmp(opt_string, "fix_padding")) {
3966 			ic->fix_padding = true;
3967 		} else if (!strcmp(opt_string, "legacy_recalculate")) {
3968 			ic->legacy_recalculate = true;
3969 		} else {
3970 			r = -EINVAL;
3971 			ti->error = "Invalid argument";
3972 			goto bad;
3973 		}
3974 	}
3975 
3976 	ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3977 	if (!ic->meta_dev)
3978 		ic->meta_device_sectors = ic->data_device_sectors;
3979 	else
3980 		ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3981 
3982 	if (!journal_sectors) {
3983 		journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3984 				      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3985 	}
3986 
3987 	if (!buffer_sectors)
3988 		buffer_sectors = 1;
3989 	ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3990 
3991 	r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3992 		    "Invalid internal hash", "Error setting internal hash key");
3993 	if (r)
3994 		goto bad;
3995 
3996 	r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3997 		    "Invalid journal mac", "Error setting journal mac key");
3998 	if (r)
3999 		goto bad;
4000 
4001 	if (!ic->tag_size) {
4002 		if (!ic->internal_hash) {
4003 			ti->error = "Unknown tag size";
4004 			r = -EINVAL;
4005 			goto bad;
4006 		}
4007 		ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4008 	}
4009 	if (ic->tag_size > MAX_TAG_SIZE) {
4010 		ti->error = "Too big tag size";
4011 		r = -EINVAL;
4012 		goto bad;
4013 	}
4014 	if (!(ic->tag_size & (ic->tag_size - 1)))
4015 		ic->log2_tag_size = __ffs(ic->tag_size);
4016 	else
4017 		ic->log2_tag_size = -1;
4018 
4019 	if (ic->mode == 'B' && !ic->internal_hash) {
4020 		r = -EINVAL;
4021 		ti->error = "Bitmap mode can be only used with internal hash";
4022 		goto bad;
4023 	}
4024 
4025 	if (ic->discard && !ic->internal_hash) {
4026 		r = -EINVAL;
4027 		ti->error = "Discard can be only used with internal hash";
4028 		goto bad;
4029 	}
4030 
4031 	ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4032 	ic->autocommit_msec = sync_msec;
4033 	timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4034 
4035 	ic->io = dm_io_client_create();
4036 	if (IS_ERR(ic->io)) {
4037 		r = PTR_ERR(ic->io);
4038 		ic->io = NULL;
4039 		ti->error = "Cannot allocate dm io";
4040 		goto bad;
4041 	}
4042 
4043 	r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4044 	if (r) {
4045 		ti->error = "Cannot allocate mempool";
4046 		goto bad;
4047 	}
4048 
4049 	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4050 					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4051 	if (!ic->metadata_wq) {
4052 		ti->error = "Cannot allocate workqueue";
4053 		r = -ENOMEM;
4054 		goto bad;
4055 	}
4056 
4057 	/*
4058 	 * If this workqueue were percpu, it would cause bio reordering
4059 	 * and reduced performance.
4060 	 */
4061 	ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4062 	if (!ic->wait_wq) {
4063 		ti->error = "Cannot allocate workqueue";
4064 		r = -ENOMEM;
4065 		goto bad;
4066 	}
4067 
4068 	ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4069 					  METADATA_WORKQUEUE_MAX_ACTIVE);
4070 	if (!ic->offload_wq) {
4071 		ti->error = "Cannot allocate workqueue";
4072 		r = -ENOMEM;
4073 		goto bad;
4074 	}
4075 
4076 	ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4077 	if (!ic->commit_wq) {
4078 		ti->error = "Cannot allocate workqueue";
4079 		r = -ENOMEM;
4080 		goto bad;
4081 	}
4082 	INIT_WORK(&ic->commit_work, integrity_commit);
4083 
4084 	if (ic->mode == 'J' || ic->mode == 'B') {
4085 		ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4086 		if (!ic->writer_wq) {
4087 			ti->error = "Cannot allocate workqueue";
4088 			r = -ENOMEM;
4089 			goto bad;
4090 		}
4091 		INIT_WORK(&ic->writer_work, integrity_writer);
4092 	}
4093 
4094 	ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4095 	if (!ic->sb) {
4096 		r = -ENOMEM;
4097 		ti->error = "Cannot allocate superblock area";
4098 		goto bad;
4099 	}
4100 
4101 	r = sync_rw_sb(ic, REQ_OP_READ, 0);
4102 	if (r) {
4103 		ti->error = "Error reading superblock";
4104 		goto bad;
4105 	}
4106 	should_write_sb = false;
4107 	if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4108 		if (ic->mode != 'R') {
4109 			if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4110 				r = -EINVAL;
4111 				ti->error = "The device is not initialized";
4112 				goto bad;
4113 			}
4114 		}
4115 
4116 		r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4117 		if (r) {
4118 			ti->error = "Could not initialize superblock";
4119 			goto bad;
4120 		}
4121 		if (ic->mode != 'R')
4122 			should_write_sb = true;
4123 	}
4124 
4125 	if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4126 		r = -EINVAL;
4127 		ti->error = "Unknown version";
4128 		goto bad;
4129 	}
4130 	if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4131 		r = -EINVAL;
4132 		ti->error = "Tag size doesn't match the information in superblock";
4133 		goto bad;
4134 	}
4135 	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4136 		r = -EINVAL;
4137 		ti->error = "Block size doesn't match the information in superblock";
4138 		goto bad;
4139 	}
4140 	if (!le32_to_cpu(ic->sb->journal_sections)) {
4141 		r = -EINVAL;
4142 		ti->error = "Corrupted superblock, journal_sections is 0";
4143 		goto bad;
4144 	}
4145 	/* make sure that ti->max_io_len doesn't overflow */
4146 	if (!ic->meta_dev) {
4147 		if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4148 		    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4149 			r = -EINVAL;
4150 			ti->error = "Invalid interleave_sectors in the superblock";
4151 			goto bad;
4152 		}
4153 	} else {
4154 		if (ic->sb->log2_interleave_sectors) {
4155 			r = -EINVAL;
4156 			ti->error = "Invalid interleave_sectors in the superblock";
4157 			goto bad;
4158 		}
4159 	}
4160 	if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4161 		r = -EINVAL;
4162 		ti->error = "Journal mac mismatch";
4163 		goto bad;
4164 	}
4165 
4166 	get_provided_data_sectors(ic);
4167 	if (!ic->provided_data_sectors) {
4168 		r = -EINVAL;
4169 		ti->error = "The device is too small";
4170 		goto bad;
4171 	}
4172 
4173 try_smaller_buffer:
4174 	r = calculate_device_limits(ic);
4175 	if (r) {
4176 		if (ic->meta_dev) {
4177 			if (ic->log2_buffer_sectors > 3) {
4178 				ic->log2_buffer_sectors--;
4179 				goto try_smaller_buffer;
4180 			}
4181 		}
4182 		ti->error = "The device is too small";
4183 		goto bad;
4184 	}
4185 
4186 	if (log2_sectors_per_bitmap_bit < 0)
4187 		log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4188 	if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4189 		log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4190 
4191 	bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4192 	if (bits_in_journal > UINT_MAX)
4193 		bits_in_journal = UINT_MAX;
4194 	while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4195 		log2_sectors_per_bitmap_bit++;
4196 
4197 	log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4198 	ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4199 	if (should_write_sb) {
4200 		ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4201 	}
4202 	n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4203 				+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4204 	ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4205 
4206 	if (!ic->meta_dev)
4207 		ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4208 
4209 	if (ti->len > ic->provided_data_sectors) {
4210 		r = -EINVAL;
4211 		ti->error = "Not enough provided sectors for requested mapping size";
4212 		goto bad;
4213 	}
4214 
4215 
4216 	threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4217 	threshold += 50;
4218 	do_div(threshold, 100);
4219 	ic->free_sectors_threshold = threshold;
4220 
4221 	DEBUG_print("initialized:\n");
4222 	DEBUG_print("	integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4223 	DEBUG_print("	journal_entry_size %u\n", ic->journal_entry_size);
4224 	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4225 	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
4226 	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
4227 	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4228 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
4229 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4230 	DEBUG_print("	data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4231 	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
4232 	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
4233 	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
4234 	DEBUG_print("	provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4235 	DEBUG_print("	log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4236 	DEBUG_print("	bits_in_journal %llu\n", bits_in_journal);
4237 
4238 	if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4239 		ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4240 		ic->sb->recalc_sector = cpu_to_le64(0);
4241 	}
4242 
4243 	if (ic->internal_hash) {
4244 		size_t recalc_tags_size;
4245 		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4246 		if (!ic->recalc_wq ) {
4247 			ti->error = "Cannot allocate workqueue";
4248 			r = -ENOMEM;
4249 			goto bad;
4250 		}
4251 		INIT_WORK(&ic->recalc_work, integrity_recalc);
4252 		ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4253 		if (!ic->recalc_buffer) {
4254 			ti->error = "Cannot allocate buffer for recalculating";
4255 			r = -ENOMEM;
4256 			goto bad;
4257 		}
4258 		recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
4259 		if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
4260 			recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
4261 		ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
4262 		if (!ic->recalc_tags) {
4263 			ti->error = "Cannot allocate tags for recalculating";
4264 			r = -ENOMEM;
4265 			goto bad;
4266 		}
4267 	} else {
4268 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4269 			ti->error = "Recalculate can only be specified with internal_hash";
4270 			r = -EINVAL;
4271 			goto bad;
4272 		}
4273 	}
4274 
4275 	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4276 	    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4277 	    dm_integrity_disable_recalculate(ic)) {
4278 		ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4279 		r = -EOPNOTSUPP;
4280 		goto bad;
4281 	}
4282 
4283 	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4284 			1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4285 	if (IS_ERR(ic->bufio)) {
4286 		r = PTR_ERR(ic->bufio);
4287 		ti->error = "Cannot initialize dm-bufio";
4288 		ic->bufio = NULL;
4289 		goto bad;
4290 	}
4291 	dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4292 
4293 	if (ic->mode != 'R') {
4294 		r = create_journal(ic, &ti->error);
4295 		if (r)
4296 			goto bad;
4297 
4298 	}
4299 
4300 	if (ic->mode == 'B') {
4301 		unsigned i;
4302 		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4303 
4304 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4305 		if (!ic->recalc_bitmap) {
4306 			r = -ENOMEM;
4307 			goto bad;
4308 		}
4309 		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4310 		if (!ic->may_write_bitmap) {
4311 			r = -ENOMEM;
4312 			goto bad;
4313 		}
4314 		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4315 		if (!ic->bbs) {
4316 			r = -ENOMEM;
4317 			goto bad;
4318 		}
4319 		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4320 		for (i = 0; i < ic->n_bitmap_blocks; i++) {
4321 			struct bitmap_block_status *bbs = &ic->bbs[i];
4322 			unsigned sector, pl_index, pl_offset;
4323 
4324 			INIT_WORK(&bbs->work, bitmap_block_work);
4325 			bbs->ic = ic;
4326 			bbs->idx = i;
4327 			bio_list_init(&bbs->bio_queue);
4328 			spin_lock_init(&bbs->bio_queue_lock);
4329 
4330 			sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4331 			pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4332 			pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4333 
4334 			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4335 		}
4336 	}
4337 
4338 	if (should_write_sb) {
4339 		init_journal(ic, 0, ic->journal_sections, 0);
4340 		r = dm_integrity_failed(ic);
4341 		if (unlikely(r)) {
4342 			ti->error = "Error initializing journal";
4343 			goto bad;
4344 		}
4345 		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4346 		if (r) {
4347 			ti->error = "Error initializing superblock";
4348 			goto bad;
4349 		}
4350 		ic->just_formatted = true;
4351 	}
4352 
4353 	if (!ic->meta_dev) {
4354 		r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4355 		if (r)
4356 			goto bad;
4357 	}
4358 	if (ic->mode == 'B') {
4359 		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4360 		if (!max_io_len)
4361 			max_io_len = 1U << 31;
4362 		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4363 		if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4364 			r = dm_set_target_max_io_len(ti, max_io_len);
4365 			if (r)
4366 				goto bad;
4367 		}
4368 	}
4369 
4370 	if (!ic->internal_hash)
4371 		dm_integrity_set(ti, ic);
4372 
4373 	ti->num_flush_bios = 1;
4374 	ti->flush_supported = true;
4375 	if (ic->discard)
4376 		ti->num_discard_bios = 1;
4377 
4378 	return 0;
4379 
4380 bad:
4381 	dm_integrity_dtr(ti);
4382 	return r;
4383 }
4384 
dm_integrity_dtr(struct dm_target *ti)4385 static void dm_integrity_dtr(struct dm_target *ti)
4386 {
4387 	struct dm_integrity_c *ic = ti->private;
4388 
4389 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4390 	BUG_ON(!list_empty(&ic->wait_list));
4391 
4392 	if (ic->mode == 'B')
4393 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
4394 	if (ic->metadata_wq)
4395 		destroy_workqueue(ic->metadata_wq);
4396 	if (ic->wait_wq)
4397 		destroy_workqueue(ic->wait_wq);
4398 	if (ic->offload_wq)
4399 		destroy_workqueue(ic->offload_wq);
4400 	if (ic->commit_wq)
4401 		destroy_workqueue(ic->commit_wq);
4402 	if (ic->writer_wq)
4403 		destroy_workqueue(ic->writer_wq);
4404 	if (ic->recalc_wq)
4405 		destroy_workqueue(ic->recalc_wq);
4406 	vfree(ic->recalc_buffer);
4407 	kvfree(ic->recalc_tags);
4408 	kvfree(ic->bbs);
4409 	if (ic->bufio)
4410 		dm_bufio_client_destroy(ic->bufio);
4411 	mempool_exit(&ic->journal_io_mempool);
4412 	if (ic->io)
4413 		dm_io_client_destroy(ic->io);
4414 	if (ic->dev)
4415 		dm_put_device(ti, ic->dev);
4416 	if (ic->meta_dev)
4417 		dm_put_device(ti, ic->meta_dev);
4418 	dm_integrity_free_page_list(ic->journal);
4419 	dm_integrity_free_page_list(ic->journal_io);
4420 	dm_integrity_free_page_list(ic->journal_xor);
4421 	dm_integrity_free_page_list(ic->recalc_bitmap);
4422 	dm_integrity_free_page_list(ic->may_write_bitmap);
4423 	if (ic->journal_scatterlist)
4424 		dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4425 	if (ic->journal_io_scatterlist)
4426 		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4427 	if (ic->sk_requests) {
4428 		unsigned i;
4429 
4430 		for (i = 0; i < ic->journal_sections; i++) {
4431 			struct skcipher_request *req = ic->sk_requests[i];
4432 			if (req) {
4433 				kfree_sensitive(req->iv);
4434 				skcipher_request_free(req);
4435 			}
4436 		}
4437 		kvfree(ic->sk_requests);
4438 	}
4439 	kvfree(ic->journal_tree);
4440 	if (ic->sb)
4441 		free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4442 
4443 	if (ic->internal_hash)
4444 		crypto_free_shash(ic->internal_hash);
4445 	free_alg(&ic->internal_hash_alg);
4446 
4447 	if (ic->journal_crypt)
4448 		crypto_free_skcipher(ic->journal_crypt);
4449 	free_alg(&ic->journal_crypt_alg);
4450 
4451 	if (ic->journal_mac)
4452 		crypto_free_shash(ic->journal_mac);
4453 	free_alg(&ic->journal_mac_alg);
4454 
4455 	kfree(ic);
4456 }
4457 
4458 static struct target_type integrity_target = {
4459 	.name			= "integrity",
4460 	.version		= {1, 6, 0},
4461 	.module			= THIS_MODULE,
4462 	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4463 	.ctr			= dm_integrity_ctr,
4464 	.dtr			= dm_integrity_dtr,
4465 	.map			= dm_integrity_map,
4466 	.postsuspend		= dm_integrity_postsuspend,
4467 	.resume			= dm_integrity_resume,
4468 	.status			= dm_integrity_status,
4469 	.iterate_devices	= dm_integrity_iterate_devices,
4470 	.io_hints		= dm_integrity_io_hints,
4471 };
4472 
dm_integrity_init(void)4473 static int __init dm_integrity_init(void)
4474 {
4475 	int r;
4476 
4477 	journal_io_cache = kmem_cache_create("integrity_journal_io",
4478 					     sizeof(struct journal_io), 0, 0, NULL);
4479 	if (!journal_io_cache) {
4480 		DMERR("can't allocate journal io cache");
4481 		return -ENOMEM;
4482 	}
4483 
4484 	r = dm_register_target(&integrity_target);
4485 	if (r < 0) {
4486 		DMERR("register failed %d", r);
4487 		kmem_cache_destroy(journal_io_cache);
4488 		return r;
4489 	}
4490 
4491 	return 0;
4492 }
4493 
dm_integrity_exit(void)4494 static void __exit dm_integrity_exit(void)
4495 {
4496 	dm_unregister_target(&integrity_target);
4497 	kmem_cache_destroy(journal_io_cache);
4498 }
4499 
4500 module_init(dm_integrity_init);
4501 module_exit(dm_integrity_exit);
4502 
4503 MODULE_AUTHOR("Milan Broz");
4504 MODULE_AUTHOR("Mikulas Patocka");
4505 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4506 MODULE_LICENSE("GPL");
4507