Lines Matching refs:ic
99 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
290 struct dm_integrity_c *ic;
308 struct dm_integrity_c *ic;
320 struct dm_integrity_c *ic;
378 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
381 atomic64_inc(&ic->number_of_mismatches);
382 if (!cmpxchg(&ic->failed, 0, err))
386 static int dm_integrity_failed(struct dm_integrity_c *ic)
388 return READ_ONCE(ic->failed);
391 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
393 if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
394 !ic->legacy_recalculate)
399 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
406 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
409 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
412 if (!ic->meta_dev) {
413 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
422 #define sector_to_block(ic, n) \
424 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
425 (n) >>= (ic)->sb->log2_sectors_per_block; \
428 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
434 ms = area << ic->sb->log2_interleave_sectors;
435 if (likely(ic->log2_metadata_run >= 0))
436 ms += area << ic->log2_metadata_run;
438 ms += area * ic->metadata_run;
439 ms >>= ic->log2_buffer_sectors;
441 sector_to_block(ic, offset);
443 if (likely(ic->log2_tag_size >= 0)) {
444 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
445 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
447 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
448 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
454 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
458 if (ic->meta_dev)
461 result = area << ic->sb->log2_interleave_sectors;
462 if (likely(ic->log2_metadata_run >= 0))
463 result += (area + 1) << ic->log2_metadata_run;
465 result += (area + 1) * ic->metadata_run;
467 result += (sector_t)ic->initial_sectors + offset;
468 result += ic->start;
473 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
475 if (unlikely(*sec_ptr >= ic->journal_sections))
476 *sec_ptr -= ic->journal_sections;
479 static void sb_set_version(struct dm_integrity_c *ic)
481 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
482 ic->sb->version = SB_VERSION_4;
483 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
484 ic->sb->version = SB_VERSION_3;
485 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
486 ic->sb->version = SB_VERSION_2;
488 ic->sb->version = SB_VERSION_1;
491 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
499 io_req.mem.ptr.addr = ic->sb;
501 io_req.client = ic->io;
502 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
503 io_loc.sector = ic->start;
507 sb_set_version(ic);
517 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
523 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
527 ic->sb->log2_sectors_per_block,
528 ic->log2_blocks_per_bitmap_bit,
536 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
538 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
622 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
624 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
634 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
636 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
639 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
640 return &ic->bbs[bitmap_block];
643 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
647 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
649 if (unlikely(section >= ic->journal_sections) ||
652 function, section, offset, ic->journal_sections, limit);
658 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
663 access_journal_check(ic, section, offset, false, "page_list_location");
665 sector = section * ic->journal_section_sectors + offset;
671 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
677 page_list_location(ic, section, offset, &pl_index, &pl_offset);
687 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
689 return access_page_list(ic, ic->journal, section, offset, NULL);
692 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
697 access_journal_check(ic, section, n, true, "access_journal_entry");
702 js = access_journal(ic, section, rel_sector);
703 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
706 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
708 n <<= ic->sb->log2_sectors_per_block;
712 access_journal_check(ic, section, n, false, "access_journal_data");
714 return access_journal(ic, section, n);
717 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
719 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
723 desc->tfm = ic->journal_mac;
727 dm_integrity_io_error(ic, "crypto_shash_init", r);
731 for (j = 0; j < ic->journal_section_entries; j++) {
732 struct journal_entry *je = access_journal_entry(ic, section, j);
735 dm_integrity_io_error(ic, "crypto_shash_update", r);
740 size = crypto_shash_digestsize(ic->journal_mac);
745 dm_integrity_io_error(ic, "crypto_shash_final", r);
753 dm_integrity_io_error(ic, "digest_size", -EINVAL);
758 dm_integrity_io_error(ic, "crypto_shash_final", r);
769 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
774 if (!ic->journal_mac)
777 section_mac(ic, section, result);
780 struct journal_sector *js = access_journal(ic, section, j);
786 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
799 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
803 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
808 source_pl = ic->journal;
809 target_pl = ic->journal_io;
811 source_pl = ic->journal_io;
812 target_pl = ic->journal;
815 page_list_location(ic, section, 0, &pl_index, &pl_offset);
831 rw_section_mac(ic, section, true);
836 page_list_location(ic, section, 0, §ion_index, &dummy);
842 src_pages[1] = ic->journal_xor[pl_index].page;
861 complete(&comp->ic->crypto_backoff);
864 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
883 wait_for_completion(&comp->ic->crypto_backoff);
884 reinit_completion(&comp->ic->crypto_backoff);
887 dm_integrity_io_error(comp->ic, "encrypt", r);
891 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
900 source_sg = ic->journal_scatterlist;
901 target_sg = ic->journal_io_scatterlist;
903 source_sg = ic->journal_io_scatterlist;
904 target_sg = ic->journal_scatterlist;
913 rw_section_mac(ic, section, true);
915 req = ic->sk_requests[section];
916 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
935 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
938 if (ic->journal_xor)
939 return xor_journal(ic, encrypt, section, n_sections, comp);
941 return crypt_journal(ic, encrypt, section, n_sections, comp);
948 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
952 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
960 if (unlikely(dm_integrity_failed(ic))) {
972 if (ic->journal_io)
973 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
975 io_req.mem.ptr.pl = &ic->journal[pl_index];
983 io_req.client = ic->io;
984 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
985 io_loc.sector = ic->start + SB_SECTORS + sector;
990 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
998 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
1003 sector = section * ic->journal_section_sectors;
1004 n_sectors = n_sections * ic->journal_section_sectors;
1006 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1009 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1016 io_comp.ic = ic;
1019 if (commit_start + commit_sections <= ic->journal_sections) {
1021 if (ic->journal_io) {
1022 crypt_comp_1.ic = ic;
1025 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1029 rw_section_mac(ic, commit_start + i, true);
1031 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1036 to_end = ic->journal_sections - commit_start;
1037 if (ic->journal_io) {
1038 crypt_comp_1.ic = ic;
1041 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1043 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1046 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1049 crypt_comp_2.ic = ic;
1052 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1054 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1059 rw_section_mac(ic, commit_start + i, true);
1060 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1062 rw_section_mac(ic, i, true);
1064 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1070 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1078 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1080 if (unlikely(dm_integrity_failed(ic))) {
1085 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1093 io_req.mem.ptr.pl = &ic->journal[pl_index];
1097 io_req.client = ic->io;
1098 io_loc.bdev = ic->dev->bdev;
1115 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1117 struct rb_node **n = &ic->in_progress.rb_node;
1120 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1124 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1146 rb_insert_color(&new_range->node, &ic->in_progress);
1151 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1153 rb_erase(&range->node, &ic->in_progress);
1154 while (unlikely(!list_empty(&ic->wait_list))) {
1156 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1160 if (!add_new_range(ic, last_range, false)) {
1162 list_add(&last_range->wait_entry, &ic->wait_list);
1170 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1174 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1175 remove_range_unlocked(ic, range);
1176 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1179 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1182 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1186 spin_unlock_irq(&ic->endio_wait.lock);
1188 spin_lock_irq(&ic->endio_wait.lock);
1192 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1194 if (unlikely(!add_new_range(ic, new_range, true)))
1195 wait_and_add_new_range(ic, new_range);
1204 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1212 link = &ic->journal_tree_root.rb_node;
1226 rb_insert_color(&node->node, &ic->journal_tree_root);
1229 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1232 rb_erase(&node->node, &ic->journal_tree_root);
1238 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1240 struct rb_node *n = ic->journal_tree_root.rb_node;
1246 found = j - ic->journal_tree;
1259 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1264 if (unlikely(pos >= ic->journal_entries))
1266 node = &ic->journal_tree[pos];
1280 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1297 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1298 if (next_section >= ic->committed_section &&
1299 next_section < ic->committed_section + ic->n_committed_sections)
1301 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1311 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1317 unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1325 r = dm_integrity_failed(ic);
1329 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1333 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1343 if (likely(is_power_of_2(ic->tag_size))) {
1345 if (unlikely(!ic->discard) ||
1360 if (unlikely(hash_offset == ic->tag_size)) {
1366 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1375 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1380 if (unlikely(!is_power_of_2(ic->tag_size))) {
1381 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1395 struct dm_integrity_c *ic;
1403 dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
1407 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1413 if (!ic->meta_dev)
1422 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1423 fr.io_reg.bdev = ic->dev->bdev,
1426 fr.ic = ic;
1432 r = dm_bufio_write_dirty_buffers(ic->bufio);
1434 dm_integrity_io_error(ic, "writing tags", r);
1440 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1443 __add_wait_queue(&ic->endio_wait, &wait);
1445 spin_unlock_irq(&ic->endio_wait.lock);
1447 spin_lock_irq(&ic->endio_wait.lock);
1448 __remove_wait_queue(&ic->endio_wait, &wait);
1453 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1455 if (likely(!dm_integrity_failed(ic)))
1456 queue_work(ic->commit_wq, &ic->commit_work);
1459 static void schedule_autocommit(struct dm_integrity_c *ic)
1461 if (!timer_pending(&ic->autocommit_timer))
1462 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1465 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1470 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1472 bio_list_add(&ic->flush_bio_list, bio);
1473 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1475 queue_work(ic->commit_wq, &ic->commit_work);
1478 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1480 int r = dm_integrity_failed(ic);
1483 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1485 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1486 bio_list_add(&ic->synchronous_bios, bio);
1487 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1488 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1494 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1498 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1499 submit_flush_bio(ic, dio);
1501 do_endio(ic, bio);
1507 struct dm_integrity_c *ic = dio->ic;
1510 remove_range(ic, &dio->range);
1513 schedule_autocommit(ic);
1523 queue_work(ic->offload_wq, &dio->work);
1526 do_endio_flush(ic, dio);
1544 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1548 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1552 req->tfm = ic->internal_hash;
1556 dm_integrity_io_error(ic, "crypto_shash_init", r);
1562 dm_integrity_io_error(ic, "crypto_shash_update", r);
1566 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1568 dm_integrity_io_error(ic, "crypto_shash_update", r);
1574 dm_integrity_io_error(ic, "crypto_shash_final", r);
1578 digest_size = crypto_shash_digestsize(ic->internal_hash);
1579 if (unlikely(digest_size < ic->tag_size))
1580 memset(result + digest_size, 0, ic->tag_size - digest_size);
1586 get_random_bytes(result, ic->tag_size);
1592 struct dm_integrity_c *ic = dio->ic;
1596 if (ic->internal_hash) {
1599 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1602 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1607 if (unlikely(ic->mode == 'R'))
1611 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1628 unsigned max_blocks = max_size / ic->tag_size;
1632 unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1634 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1635 this_step_blocks * ic->tag_size, TAG_WRITE);
1642 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1647 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1648 bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1669 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1670 checksums_ptr += ic->tag_size;
1671 sectors_to_process -= ic->sectors_per_block;
1672 pos += ic->sectors_per_block << SECTOR_SHIFT;
1673 sector += ic->sectors_per_block;
1677 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1683 (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1685 atomic64_inc(&ic->number_of_mismatches);
1711 sector_to_block(ic, data_to_process);
1712 data_to_process *= ic->tag_size;
1721 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1741 struct dm_integrity_c *ic = ti->private;
1747 dio->ic = ic;
1765 submit_flush_bio(ic, dio);
1778 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1781 ic->provided_data_sectors);
1784 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1786 ic->sectors_per_block,
1791 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1795 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1797 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1804 if (!ic->internal_hash) {
1806 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1807 if (ic->log2_tag_size >= 0)
1808 wanted_tag_size <<= ic->log2_tag_size;
1810 wanted_tag_size *= ic->tag_size;
1824 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1827 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1828 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1829 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1838 struct dm_integrity_c *ic = dio->ic;
1858 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1869 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1874 js = access_journal_data(ic, journal_section, journal_entry);
1882 } while (++s < ic->sectors_per_block);
1884 if (ic->internal_hash) {
1887 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1888 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1896 if (!ic->internal_hash) {
1898 unsigned tag_todo = ic->tag_size;
1899 char *tag_ptr = journal_entry_tag(ic, je);
1924 js = access_journal_data(ic, journal_section, journal_entry);
1925 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1930 } while (++s < ic->sectors_per_block);
1932 if (ic->internal_hash) {
1933 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1934 if (unlikely(digest_size > ic->tag_size)) {
1936 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1937 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1939 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1944 logical_sector += ic->sectors_per_block;
1947 if (unlikely(journal_entry == ic->journal_section_entries)) {
1950 wraparound_section(ic, &journal_section);
1953 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1954 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1963 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1964 wake_up(&ic->copy_to_journal_wait);
1965 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1966 queue_work(ic->commit_wq, &ic->commit_work);
1968 schedule_autocommit(ic);
1971 remove_range(ic, &dio->range);
1978 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1979 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1988 struct dm_integrity_c *ic = dio->ic;
1994 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1995 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2000 queue_work(ic->offload_wq, &dio->work);
2005 spin_lock_irq(&ic->endio_wait.lock);
2007 if (unlikely(dm_integrity_failed(ic))) {
2008 spin_unlock_irq(&ic->endio_wait.lock);
2009 do_endio(ic, bio);
2014 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2020 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2024 sleep_on_endio_wait(ic);
2027 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2028 ic->free_sectors -= range_sectors;
2029 journal_section = ic->free_section;
2030 journal_entry = ic->free_section_entry;
2032 next_entry = ic->free_section_entry + range_sectors;
2033 ic->free_section_entry = next_entry % ic->journal_section_entries;
2034 ic->free_section += next_entry / ic->journal_section_entries;
2035 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2036 wraparound_section(ic, &ic->free_section);
2038 pos = journal_section * ic->journal_section_entries + journal_entry;
2045 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2047 if (unlikely(pos >= ic->journal_entries))
2050 je = access_journal_entry(ic, ws, we);
2054 if (unlikely(we == ic->journal_section_entries)) {
2057 wraparound_section(ic, &ws);
2059 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2061 spin_unlock_irq(&ic->endio_wait.lock);
2065 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2072 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2073 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2080 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2088 spin_unlock_irq(&ic->endio_wait.lock);
2090 queue_work(ic->wait_wq, &dio->work);
2094 dio->range.n_sectors = ic->sectors_per_block;
2095 wait_and_add_new_range(ic, &dio->range);
2103 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2105 remove_range_unlocked(ic, &dio->range);
2110 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2112 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2115 remove_range_unlocked(ic, &dio->range);
2116 spin_unlock_irq(&ic->endio_wait.lock);
2117 queue_work(ic->commit_wq, &ic->commit_work);
2118 flush_workqueue(ic->commit_wq);
2119 queue_work(ic->writer_wq, &ic->writer_work);
2120 flush_workqueue(ic->writer_wq);
2125 spin_unlock_irq(&ic->endio_wait.lock);
2128 journal_section = journal_read_pos / ic->journal_section_entries;
2129 journal_entry = journal_read_pos % ic->journal_section_entries;
2133 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2134 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2138 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2142 queue_work(ic->writer_wq, &bbs->work);
2156 bio_set_dev(bio, ic->dev->bdev);
2162 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2164 dm_integrity_flush_buffers(ic, false);
2178 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2179 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2181 if (ic->mode == 'B') {
2182 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2195 queue_work(ic->metadata_wq, &dio->work);
2204 do_endio_flush(ic, dio);
2215 static void pad_uncommitted(struct dm_integrity_c *ic)
2217 if (ic->free_section_entry) {
2218 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2219 ic->free_section_entry = 0;
2220 ic->free_section++;
2221 wraparound_section(ic, &ic->free_section);
2222 ic->n_uncommitted_sections++;
2224 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2225 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2226 ic->journal_section_entries + ic->free_sectors)) {
2230 ic->journal_sections, ic->journal_section_entries,
2231 ic->n_uncommitted_sections, ic->n_committed_sections,
2232 ic->journal_section_entries, ic->free_sectors);
2238 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2243 del_timer(&ic->autocommit_timer);
2245 spin_lock_irq(&ic->endio_wait.lock);
2246 flushes = bio_list_get(&ic->flush_bio_list);
2247 if (unlikely(ic->mode != 'J')) {
2248 spin_unlock_irq(&ic->endio_wait.lock);
2249 dm_integrity_flush_buffers(ic, true);
2253 pad_uncommitted(ic);
2254 commit_start = ic->uncommitted_section;
2255 commit_sections = ic->n_uncommitted_sections;
2256 spin_unlock_irq(&ic->endio_wait.lock);
2261 ic->wrote_to_journal = true;
2265 for (j = 0; j < ic->journal_section_entries; j++) {
2267 je = access_journal_entry(ic, i, j);
2268 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2270 for (j = 0; j < ic->journal_section_sectors; j++) {
2272 js = access_journal(ic, i, j);
2273 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2276 if (unlikely(i >= ic->journal_sections))
2277 ic->commit_seq = next_commit_seq(ic->commit_seq);
2278 wraparound_section(ic, &i);
2282 write_journal(ic, commit_start, commit_sections);
2284 spin_lock_irq(&ic->endio_wait.lock);
2285 ic->uncommitted_section += commit_sections;
2286 wraparound_section(ic, &ic->uncommitted_section);
2287 ic->n_uncommitted_sections -= commit_sections;
2288 ic->n_committed_sections += commit_sections;
2289 spin_unlock_irq(&ic->endio_wait.lock);
2291 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2292 queue_work(ic->writer_wq, &ic->writer_work);
2298 do_endio(ic, flushes);
2307 struct dm_integrity_c *ic = comp->ic;
2308 remove_range(ic, &io->range);
2309 mempool_free(io, &ic->journal_io_mempool);
2311 dm_integrity_io_error(ic, "copying from journal", -EIO);
2315 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2322 } while (++s < ic->sectors_per_block);
2325 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2334 comp.ic = ic;
2339 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2343 rw_section_mac(ic, i, false);
2344 for (j = 0; j < ic->journal_section_entries; j++) {
2345 struct journal_entry *je = access_journal_entry(ic, i, j);
2357 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2358 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2359 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2361 if (unlikely(sec >= ic->provided_data_sectors)) {
2366 get_area_and_offset(ic, sec, &area, &offset);
2367 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2368 for (k = j + 1; k < ic->journal_section_entries; k++) {
2369 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2375 if (unlikely(sec2 >= ic->provided_data_sectors))
2377 get_area_and_offset(ic, sec2, &area2, &offset2);
2378 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2380 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2384 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2387 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2389 spin_lock_irq(&ic->endio_wait.lock);
2390 add_new_range_and_wait(ic, &io->range);
2393 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2396 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2397 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2400 remove_journal_node(ic, §ion_node[j]);
2402 sec += ic->sectors_per_block;
2403 offset += ic->sectors_per_block;
2405 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2406 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2409 remove_journal_node(ic, §ion_node[k - 1]);
2413 remove_range_unlocked(ic, &io->range);
2414 spin_unlock_irq(&ic->endio_wait.lock);
2415 mempool_free(io, &ic->journal_io_mempool);
2419 remove_journal_node(ic, §ion_node[l]);
2422 spin_unlock_irq(&ic->endio_wait.lock);
2424 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2427 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2433 ic->internal_hash) {
2436 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2437 (char *)access_journal_data(ic, i, l), test_tag);
2438 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2439 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2443 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2444 ic->tag_size, TAG_WRITE);
2446 dm_integrity_io_error(ic, "reading tags", r);
2451 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2452 (k - j) << ic->sb->log2_sectors_per_block,
2453 get_data_sector(ic, area, offset),
2460 dm_bufio_write_dirty_buffers_async(ic->bufio);
2467 dm_integrity_flush_buffers(ic, true);
2472 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2477 spin_lock_irq(&ic->endio_wait.lock);
2478 write_start = ic->committed_section;
2479 write_sections = ic->n_committed_sections;
2480 spin_unlock_irq(&ic->endio_wait.lock);
2485 do_journal_write(ic, write_start, write_sections, false);
2487 spin_lock_irq(&ic->endio_wait.lock);
2489 ic->committed_section += write_sections;
2490 wraparound_section(ic, &ic->committed_section);
2491 ic->n_committed_sections -= write_sections;
2493 prev_free_sectors = ic->free_sectors;
2494 ic->free_sectors += write_sections * ic->journal_section_entries;
2496 wake_up_locked(&ic->endio_wait);
2498 spin_unlock_irq(&ic->endio_wait.lock);
2501 static void recalc_write_super(struct dm_integrity_c *ic)
2505 dm_integrity_flush_buffers(ic, false);
2506 if (dm_integrity_failed(ic))
2509 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2511 dm_integrity_io_error(ic, "writing superblock", r);
2516 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2529 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2531 spin_lock_irq(&ic->endio_wait.lock);
2535 if (unlikely(dm_post_suspending(ic->ti)))
2538 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2539 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2540 if (ic->mode == 'B') {
2541 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2543 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2548 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2549 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2550 if (!ic->meta_dev)
2551 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2553 add_new_range_and_wait(ic, &range);
2554 spin_unlock_irq(&ic->endio_wait.lock);
2558 if (ic->mode == 'B') {
2559 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2562 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2563 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2564 logical_sector += ic->sectors_per_block;
2565 n_sectors -= ic->sectors_per_block;
2568 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2569 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2570 n_sectors -= ic->sectors_per_block;
2573 get_area_and_offset(ic, logical_sector, &area, &offset);
2579 recalc_write_super(ic);
2580 if (ic->mode == 'B') {
2581 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2586 if (unlikely(dm_integrity_failed(ic)))
2592 io_req.mem.ptr.addr = ic->recalc_buffer;
2594 io_req.client = ic->io;
2595 io_loc.bdev = ic->dev->bdev;
2596 io_loc.sector = get_data_sector(ic, area, offset);
2601 dm_integrity_io_error(ic, "reading data", r);
2605 t = ic->recalc_tags;
2606 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2607 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2608 t += ic->tag_size;
2611 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2613 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2615 dm_integrity_io_error(ic, "writing tags", r);
2619 if (ic->mode == 'B') {
2622 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2623 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2625 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2626 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2627 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2633 spin_lock_irq(&ic->endio_wait.lock);
2634 remove_range_unlocked(ic, &range);
2635 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2639 remove_range(ic, &range);
2643 spin_unlock_irq(&ic->endio_wait.lock);
2645 recalc_write_super(ic);
2651 struct dm_integrity_c *ic = bbs->ic;
2668 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2670 remove_range(ic, &dio->range);
2672 queue_work(ic->offload_wq, &dio->work);
2674 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2683 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2690 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2693 remove_range(ic, &dio->range);
2695 queue_work(ic->offload_wq, &dio->work);
2698 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2703 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2708 dm_integrity_flush_buffers(ic, false);
2711 range.n_sectors = ic->provided_data_sectors;
2713 spin_lock_irq(&ic->endio_wait.lock);
2714 add_new_range_and_wait(ic, &range);
2715 spin_unlock_irq(&ic->endio_wait.lock);
2717 dm_integrity_flush_buffers(ic, true);
2719 limit = ic->provided_data_sectors;
2720 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2721 limit = le64_to_cpu(ic->sb->recalc_sector)
2722 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2723 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2726 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2727 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2729 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2730 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2732 spin_lock_irq(&ic->endio_wait.lock);
2733 remove_range_unlocked(ic, &range);
2734 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2736 spin_unlock_irq(&ic->endio_wait.lock);
2737 spin_lock_irq(&ic->endio_wait.lock);
2739 spin_unlock_irq(&ic->endio_wait.lock);
2743 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2753 wraparound_section(ic, &i);
2754 for (j = 0; j < ic->journal_section_sectors; j++) {
2755 struct journal_sector *js = access_journal(ic, i, j);
2757 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2759 for (j = 0; j < ic->journal_section_entries; j++) {
2760 struct journal_entry *je = access_journal_entry(ic, i, j);
2765 write_journal(ic, start_section, n_sections);
2768 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2772 if (dm_integrity_commit_id(ic, i, j, k) == id)
2775 dm_integrity_io_error(ic, "journal commit id", -EIO);
2779 static void replay_journal(struct dm_integrity_c *ic)
2789 if (ic->mode == 'R')
2792 if (ic->journal_uptodate)
2798 if (!ic->just_formatted) {
2800 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2801 if (ic->journal_io)
2802 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2803 if (ic->journal_io) {
2805 crypt_comp.ic = ic;
2808 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2811 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2814 if (dm_integrity_failed(ic))
2820 for (i = 0; i < ic->journal_sections; i++) {
2821 for (j = 0; j < ic->journal_section_sectors; j++) {
2823 struct journal_sector *js = access_journal(ic, i, j);
2824 k = find_commit_seq(ic, i, j, js->commit_id);
2831 for (j = 0; j < ic->journal_section_entries; j++) {
2832 struct journal_entry *je = access_journal_entry(ic, i, j);
2850 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2865 if (unlikely(write_start >= ic->journal_sections))
2867 wraparound_section(ic, &write_start);
2870 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2871 for (j = 0; j < ic->journal_section_sectors; j++) {
2872 struct journal_sector *js = access_journal(ic, i, j);
2874 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2881 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2886 if (unlikely(i >= ic->journal_sections))
2888 wraparound_section(ic, &i);
2895 do_journal_write(ic, write_start, write_sections, true);
2898 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2900 ic->commit_seq = want_commit_seq;
2901 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2910 init_journal(ic, s, 1, erase_seq);
2912 wraparound_section(ic, &s);
2913 if (ic->journal_sections >= 2) {
2914 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2915 s += ic->journal_sections - 2;
2916 wraparound_section(ic, &s);
2917 init_journal(ic, s, 1, erase_seq);
2921 ic->commit_seq = next_commit_seq(erase_seq);
2924 ic->committed_section = continue_section;
2925 ic->n_committed_sections = 0;
2927 ic->uncommitted_section = continue_section;
2928 ic->n_uncommitted_sections = 0;
2930 ic->free_section = continue_section;
2931 ic->free_section_entry = 0;
2932 ic->free_sectors = ic->journal_entries;
2934 ic->journal_tree_root = RB_ROOT;
2935 for (i = 0; i < ic->journal_entries; i++)
2936 init_journal_node(&ic->journal_tree[i]);
2939 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2943 if (ic->mode == 'B') {
2944 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2945 ic->synchronous_mode = 1;
2947 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2948 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2949 flush_workqueue(ic->commit_wq);
2955 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2959 dm_integrity_enter_synchronous_mode(ic);
2966 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2969 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2971 del_timer_sync(&ic->autocommit_timer);
2973 if (ic->recalc_wq)
2974 drain_workqueue(ic->recalc_wq);
2976 if (ic->mode == 'B')
2977 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2979 queue_work(ic->commit_wq, &ic->commit_work);
2980 drain_workqueue(ic->commit_wq);
2982 if (ic->mode == 'J') {
2983 queue_work(ic->writer_wq, &ic->writer_work);
2984 drain_workqueue(ic->writer_wq);
2985 dm_integrity_flush_buffers(ic, true);
2986 if (ic->wrote_to_journal) {
2987 init_journal(ic, ic->free_section,
2988 ic->journal_sections - ic->free_section, ic->commit_seq);
2989 if (ic->free_section) {
2990 init_journal(ic, 0, ic->free_section,
2991 next_commit_seq(ic->commit_seq));
2996 if (ic->mode == 'B') {
2997 dm_integrity_flush_buffers(ic, true);
3000 init_journal(ic, 0, ic->journal_sections, 0);
3001 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3002 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3004 dm_integrity_io_error(ic, "writing superblock", r);
3008 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3010 ic->journal_uptodate = true;
3015 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3016 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3021 ic->wrote_to_journal = false;
3023 if (ic->provided_data_sectors != old_provided_data_sectors) {
3024 if (ic->provided_data_sectors > old_provided_data_sectors &&
3025 ic->mode == 'B' &&
3026 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3027 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3028 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3029 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3030 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3031 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3032 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3035 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3036 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3038 dm_integrity_io_error(ic, "writing superblock", r);
3041 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3043 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3044 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3045 if (ic->mode == 'B') {
3046 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3047 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3048 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3049 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3051 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3052 ic->sb->recalc_sector = cpu_to_le64(0);
3056 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3057 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3058 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3059 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3060 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3061 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3062 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3063 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3064 ic->sb->recalc_sector = cpu_to_le64(0);
3067 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3068 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3069 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3070 ic->sb->recalc_sector = cpu_to_le64(0);
3072 init_journal(ic, 0, ic->journal_sections, 0);
3073 replay_journal(ic);
3074 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3076 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3078 dm_integrity_io_error(ic, "writing superblock", r);
3080 replay_journal(ic);
3081 if (ic->mode == 'B') {
3082 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3083 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3084 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3086 dm_integrity_io_error(ic, "writing superblock", r);
3088 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3089 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3090 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3091 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3092 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3093 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3094 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3095 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3096 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3097 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3098 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3100 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3101 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3105 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3106 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3107 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3108 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3109 if (recalc_pos < ic->provided_data_sectors) {
3110 queue_work(ic->recalc_wq, &ic->recalc_work);
3111 } else if (recalc_pos > ic->provided_data_sectors) {
3112 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3113 recalc_write_super(ic);
3117 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3118 ic->reboot_notifier.next = NULL;
3119 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3120 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3124 dm_integrity_enter_synchronous_mode(ic);
3131 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3138 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3139 ic->provided_data_sectors);
3140 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3141 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3147 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3148 watermark_percentage += ic->journal_entries / 2;
3149 do_div(watermark_percentage, ic->journal_entries);
3151 arg_count += !!ic->meta_dev;
3152 arg_count += ic->sectors_per_block != 1;
3153 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3154 arg_count += ic->discard;
3155 arg_count += ic->mode == 'J';
3156 arg_count += ic->mode == 'J';
3157 arg_count += ic->mode == 'B';
3158 arg_count += ic->mode == 'B';
3159 arg_count += !!ic->internal_hash_alg.alg_string;
3160 arg_count += !!ic->journal_crypt_alg.alg_string;
3161 arg_count += !!ic->journal_mac_alg.alg_string;
3162 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3163 arg_count += ic->legacy_recalculate;
3164 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3165 ic->tag_size, ic->mode, arg_count);
3166 if (ic->meta_dev)
3167 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3168 if (ic->sectors_per_block != 1)
3169 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3170 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3172 if (ic->discard)
3174 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3175 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3176 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3177 if (ic->mode == 'J') {
3179 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3181 if (ic->mode == 'B') {
3182 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3183 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3185 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3187 if (ic->legacy_recalculate)
3192 if (ic->a.alg_string) { \
3193 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3194 if (ic->a.key_string) \
3195 DMEMIT(":%s", ic->a.key_string);\
3209 struct dm_integrity_c *ic = ti->private;
3211 if (!ic->meta_dev)
3212 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3214 return fn(ti, ic->dev, 0, ti->len, data);
3219 struct dm_integrity_c *ic = ti->private;
3221 if (ic->sectors_per_block > 1) {
3222 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3223 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3224 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3228 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3232 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3233 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3236 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3238 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3239 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3240 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3241 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3244 static int calculate_device_limits(struct dm_integrity_c *ic)
3248 calculate_journal_section_size(ic);
3249 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3250 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3252 ic->initial_sectors = initial_sectors;
3254 if (!ic->meta_dev) {
3259 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3263 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3265 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3266 ic->log2_metadata_run = __ffs(ic->metadata_run);
3268 ic->log2_metadata_run = -1;
3270 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3271 last_sector = get_data_sector(ic, last_area, last_offset);
3272 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3275 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3276 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3277 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3278 meta_size <<= ic->log2_buffer_sectors;
3279 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3280 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3282 ic->metadata_run = 1;
3283 ic->log2_metadata_run = 0;
3289 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3291 if (!ic->meta_dev) {
3293 ic->provided_data_sectors = 0;
3294 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3295 __u64 prev_data_sectors = ic->provided_data_sectors;
3297 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3298 if (calculate_device_limits(ic))
3299 ic->provided_data_sectors = prev_data_sectors;
3302 ic->provided_data_sectors = ic->data_device_sectors;
3303 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3307 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3312 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3313 memcpy(ic->sb->magic, SB_MAGIC, 8);
3314 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3315 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3316 if (ic->journal_mac_alg.alg_string)
3317 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3319 calculate_journal_section_size(ic);
3320 journal_sections = journal_sectors / ic->journal_section_sectors;
3324 if (!ic->meta_dev) {
3325 if (ic->fix_padding)
3326 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3327 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3330 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3331 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3332 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3334 get_provided_data_sectors(ic);
3335 if (!ic->provided_data_sectors)
3338 ic->sb->log2_interleave_sectors = 0;
3340 get_provided_data_sectors(ic);
3341 if (!ic->provided_data_sectors)
3345 ic->sb->journal_sections = cpu_to_le32(0);
3347 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3351 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3352 if (calculate_device_limits(ic))
3353 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3356 if (!le32_to_cpu(ic->sb->journal_sections)) {
3357 if (ic->log2_buffer_sectors > 3) {
3358 ic->log2_buffer_sectors--;
3365 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3367 sb_set_version(ic);
3372 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3379 bi.tuple_size = ic->tag_size;
3381 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3422 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3425 for (i = 0; i < ic->journal_sections; i++)
3430 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3436 sl = kvmalloc_array(ic->journal_sections,
3442 for (i = 0; i < ic->journal_sections; i++) {
3449 page_list_location(ic, i, 0, &start_index, &start_offset);
3450 page_list_location(ic, i, ic->journal_section_sectors - 1,
3458 dm_integrity_free_journal_scatterlist(ic, sl);
3549 static int create_journal(struct dm_integrity_c *ic, char **error)
3557 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3558 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3559 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3560 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3562 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3570 ic->journal_pages = journal_pages;
3572 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3573 if (!ic->journal) {
3578 if (ic->journal_crypt_alg.alg_string) {
3582 comp.ic = ic;
3583 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3584 if (IS_ERR(ic->journal_crypt)) {
3586 r = PTR_ERR(ic->journal_crypt);
3587 ic->journal_crypt = NULL;
3590 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3591 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3593 if (ic->journal_crypt_alg.key) {
3594 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3595 ic->journal_crypt_alg.key_size);
3602 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3604 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3605 if (!ic->journal_io) {
3614 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3628 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3629 if (!ic->journal_xor) {
3635 sg = kvmalloc_array(ic->journal_pages + 1,
3643 sg_init_table(sg, ic->journal_pages + 1);
3644 for (i = 0; i < ic->journal_pages; i++) {
3645 char *va = lowmem_page_address(ic->journal_xor[i].page);
3649 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3652 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3658 r = dm_integrity_failed(ic);
3663 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3665 crypto_free_skcipher(ic->journal_crypt);
3666 ic->journal_crypt = NULL;
3670 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3691 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3692 if (!ic->journal_scatterlist) {
3697 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3698 if (!ic->journal_io_scatterlist) {
3703 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3706 if (!ic->sk_requests) {
3711 for (i = 0; i < ic->journal_sections; i++) {
3727 r = dm_integrity_failed(ic);
3733 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3748 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3749 ic->sk_requests[i] = section_req;
3759 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3760 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3764 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3767 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3773 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3774 if (!ic->journal_tree) {
3812 struct dm_integrity_c *ic;
3836 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3837 if (!ic) {
3841 ti->private = ic;
3843 ic->ti = ti;
3845 ic->in_progress = RB_ROOT;
3846 INIT_LIST_HEAD(&ic->wait_list);
3847 init_waitqueue_head(&ic->endio_wait);
3848 bio_list_init(&ic->flush_bio_list);
3849 init_waitqueue_head(&ic->copy_to_journal_wait);
3850 init_completion(&ic->crypto_backoff);
3851 atomic64_set(&ic->number_of_mismatches, 0);
3852 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3854 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3865 ic->start = start;
3868 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3877 ic->mode = argv[3][0];
3889 ic->sectors_per_block = 1;
3918 if (ic->meta_dev) {
3919 dm_put_device(ti, ic->meta_dev);
3920 ic->meta_dev = NULL;
3923 dm_table_get_mode(ti->table), &ic->meta_dev);
3936 ic->sectors_per_block = val >> SECTOR_SHIFT;
3945 ic->bitmap_flush_interval = msecs_to_jiffies(val);
3947 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3952 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3957 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3962 ic->recalculate_flag = true;
3964 ic->discard = true;
3966 ic->fix_padding = true;
3968 ic->legacy_recalculate = true;
3976 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3977 if (!ic->meta_dev)
3978 ic->meta_device_sectors = ic->data_device_sectors;
3980 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3984 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3989 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3991 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3996 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4001 if (!ic->tag_size) {
4002 if (!ic->internal_hash) {
4007 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4009 if (ic->tag_size > MAX_TAG_SIZE) {
4014 if (!(ic->tag_size & (ic->tag_size - 1)))
4015 ic->log2_tag_size = __ffs(ic->tag_size);
4017 ic->log2_tag_size = -1;
4019 if (ic->mode == 'B' && !ic->internal_hash) {
4025 if (ic->discard && !ic->internal_hash) {
4031 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4032 ic->autocommit_msec = sync_msec;
4033 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4035 ic->io = dm_io_client_create();
4036 if (IS_ERR(ic->io)) {
4037 r = PTR_ERR(ic->io);
4038 ic->io = NULL;
4043 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4049 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4051 if (!ic->metadata_wq) {
4061 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4062 if (!ic->wait_wq) {
4068 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4070 if (!ic->offload_wq) {
4076 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4077 if (!ic->commit_wq) {
4082 INIT_WORK(&ic->commit_work, integrity_commit);
4084 if (ic->mode == 'J' || ic->mode == 'B') {
4085 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4086 if (!ic->writer_wq) {
4091 INIT_WORK(&ic->writer_work, integrity_writer);
4094 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4095 if (!ic->sb) {
4101 r = sync_rw_sb(ic, REQ_OP_READ, 0);
4107 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4108 if (ic->mode != 'R') {
4109 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4116 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4121 if (ic->mode != 'R')
4125 if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4130 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4135 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4140 if (!le32_to_cpu(ic->sb->journal_sections)) {
4146 if (!ic->meta_dev) {
4147 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4148 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4154 if (ic->sb->log2_interleave_sectors) {
4160 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4166 get_provided_data_sectors(ic);
4167 if (!ic->provided_data_sectors) {
4174 r = calculate_device_limits(ic);
4176 if (ic->meta_dev) {
4177 if (ic->log2_buffer_sectors > 3) {
4178 ic->log2_buffer_sectors--;
4188 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4189 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4191 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4194 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4197 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4198 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4200 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4202 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4204 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4206 if (!ic->meta_dev)
4207 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4209 if (ti->len > ic->provided_data_sectors) {
4216 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4219 ic->free_sectors_threshold = threshold;
4222 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4223 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4224 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4225 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4226 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4227 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4228 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4229 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4230 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4231 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4232 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4233 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4234 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4235 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4238 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4239 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4240 ic->sb->recalc_sector = cpu_to_le64(0);
4243 if (ic->internal_hash) {
4245 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4246 if (!ic->recalc_wq ) {
4251 INIT_WORK(&ic->recalc_work, integrity_recalc);
4252 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4253 if (!ic->recalc_buffer) {
4258 recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
4259 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
4260 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
4261 ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
4262 if (!ic->recalc_tags) {
4268 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4275 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4276 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4277 dm_integrity_disable_recalculate(ic)) {
4283 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4284 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4285 if (IS_ERR(ic->bufio)) {
4286 r = PTR_ERR(ic->bufio);
4288 ic->bufio = NULL;
4291 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4293 if (ic->mode != 'R') {
4294 r = create_journal(ic, &ti->error);
4300 if (ic->mode == 'B') {
4302 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4304 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4305 if (!ic->recalc_bitmap) {
4309 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4310 if (!ic->may_write_bitmap) {
4314 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4315 if (!ic->bbs) {
4319 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4320 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4321 struct bitmap_block_status *bbs = &ic->bbs[i];
4325 bbs->ic = ic;
4334 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4339 init_journal(ic, 0, ic->journal_sections, 0);
4340 r = dm_integrity_failed(ic);
4345 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4350 ic->just_formatted = true;
4353 if (!ic->meta_dev) {
4354 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4358 if (ic->mode == 'B') {
4359 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4370 if (!ic->internal_hash)
4371 dm_integrity_set(ti, ic);
4375 if (ic->discard)
4387 struct dm_integrity_c *ic = ti->private;
4389 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4390 BUG_ON(!list_empty(&ic->wait_list));
4392 if (ic->mode == 'B')
4393 cancel_delayed_work_sync(&ic->bitmap_flush_work);
4394 if (ic->metadata_wq)
4395 destroy_workqueue(ic->metadata_wq);
4396 if (ic->wait_wq)
4397 destroy_workqueue(ic->wait_wq);
4398 if (ic->offload_wq)
4399 destroy_workqueue(ic->offload_wq);
4400 if (ic->commit_wq)
4401 destroy_workqueue(ic->commit_wq);
4402 if (ic->writer_wq)
4403 destroy_workqueue(ic->writer_wq);
4404 if (ic->recalc_wq)
4405 destroy_workqueue(ic->recalc_wq);
4406 vfree(ic->recalc_buffer);
4407 kvfree(ic->recalc_tags);
4408 kvfree(ic->bbs);
4409 if (ic->bufio)
4410 dm_bufio_client_destroy(ic->bufio);
4411 mempool_exit(&ic->journal_io_mempool);
4412 if (ic->io)
4413 dm_io_client_destroy(ic->io);
4414 if (ic->dev)
4415 dm_put_device(ti, ic->dev);
4416 if (ic->meta_dev)
4417 dm_put_device(ti, ic->meta_dev);
4418 dm_integrity_free_page_list(ic->journal);
4419 dm_integrity_free_page_list(ic->journal_io);
4420 dm_integrity_free_page_list(ic->journal_xor);
4421 dm_integrity_free_page_list(ic->recalc_bitmap);
4422 dm_integrity_free_page_list(ic->may_write_bitmap);
4423 if (ic->journal_scatterlist)
4424 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4425 if (ic->journal_io_scatterlist)
4426 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4427 if (ic->sk_requests) {
4430 for (i = 0; i < ic->journal_sections; i++) {
4431 struct skcipher_request *req = ic->sk_requests[i];
4437 kvfree(ic->sk_requests);
4439 kvfree(ic->journal_tree);
4440 if (ic->sb)
4441 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4443 if (ic->internal_hash)
4444 crypto_free_shash(ic->internal_hash);
4445 free_alg(&ic->internal_hash_alg);
4447 if (ic->journal_crypt)
4448 crypto_free_skcipher(ic->journal_crypt);
4449 free_alg(&ic->journal_crypt_alg);
4451 if (ic->journal_mac)
4452 crypto_free_shash(ic->journal_mac);
4453 free_alg(&ic->journal_mac_alg);
4455 kfree(ic);