Lines Matching defs:log

18 #include "raid5-log.h"
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
30 * In write through mode, the reclaim runs every log->max_free_space.
70 * writes are committed from the log device. Therefore, a stripe in
72 * - write to log device
87 sector_t device_size; /* log device size, round to
92 sector_t last_checkpoint; /* log tail. where recovery scan
94 u64 last_cp_seq; /* log tail sequence */
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
107 * written to the log */
109 * written to the log but not yet written
111 struct list_head flushing_ios; /* io_units which are waiting for log
113 struct list_head finished_ios; /* io_units which settle down in log disk */
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
205 * unit is written to log disk with normal write, as we always flush log disk
210 struct r5l_log *log;
221 struct list_head log_sibling; /* log->running_ios */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
250 bool r5c_is_writeback(struct r5l_log *log)
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
259 if (start >= log->device_size)
260 start = start - log->device_size;
264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
270 return end + log->device_size - start;
273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
280 return log->device_size > used_size + size;
324 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
331 if (!r5c_is_writeback(conf->log))
347 r5l_wake_reclaim(conf->log, 0);
356 if (!r5c_is_writeback(conf->log))
366 r5l_wake_reclaim(conf->log, 0);
370 * Total log space (in sectors) needed to flush all data in cache
372 * To avoid deadlock due to log space, it is necessary to reserve log
373 * space to flush critical stripes (stripes that occupying log space near
374 * last_checkpoint). This function helps check how much log space is
377 * To reduce log space requirements, two mechanisms are used to give cache
399 struct r5l_log *log = conf->log;
401 if (!r5c_is_writeback(log))
405 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
410 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
412 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
413 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
416 static inline void r5c_update_log_state(struct r5l_log *log)
418 struct r5conf *conf = log->rdev->mddev->private;
423 if (!r5c_is_writeback(log))
426 free_space = r5l_ring_distance(log, log->log_start,
427 log->last_checkpoint);
442 r5l_wake_reclaim(log, 0);
452 struct r5l_log *log = conf->log;
454 BUG_ON(!r5c_is_writeback(log));
490 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494 struct r5l_log *log = sh->raid_conf->log;
496 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
527 static void r5l_log_run_stripes(struct r5l_log *log)
531 lockdep_assert_held(&log->io_list_lock);
533 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
538 list_move_tail(&io->log_sibling, &log->finished_ios);
543 static void r5l_move_to_end_ios(struct r5l_log *log)
547 lockdep_assert_held(&log->io_list_lock);
549 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
553 list_move_tail(&io->log_sibling, &log->io_end_ios);
562 struct r5l_log *log = io->log;
568 md_error(log->rdev->mddev, log->rdev);
571 mempool_free(io->meta_page, &log->meta_pool);
573 spin_lock_irqsave(&log->io_list_lock, flags);
585 if (log->need_cache_flush && !list_empty(&io->stripe_list))
586 r5l_move_to_end_ios(log);
588 r5l_log_run_stripes(log);
589 if (!list_empty(&log->running_ios)) {
594 io_deferred = list_first_entry(&log->running_ios,
597 schedule_work(&log->deferred_io_work);
600 spin_unlock_irqrestore(&log->io_list_lock, flags);
602 if (log->need_cache_flush)
603 md_wakeup_thread(log->rdev->mddev->thread);
624 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
628 spin_lock_irqsave(&log->io_list_lock, flags);
630 spin_unlock_irqrestore(&log->io_list_lock, flags);
661 struct r5l_log *log = container_of(work, struct r5l_log,
666 spin_lock_irqsave(&log->io_list_lock, flags);
667 if (!list_empty(&log->running_ios)) {
668 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
675 spin_unlock_irqrestore(&log->io_list_lock, flags);
677 r5l_do_submit_io(log, io);
682 struct r5l_log *log = container_of(work, struct r5l_log,
684 struct mddev *mddev = log->rdev->mddev;
688 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
695 conf->log == NULL ||
700 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
706 static void r5l_submit_current_io(struct r5l_log *log)
708 struct r5l_io_unit *io = log->current_io;
719 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
722 log->current_io = NULL;
723 spin_lock_irqsave(&log->io_list_lock, flags);
725 if (io != list_first_entry(&log->running_ios,
731 spin_unlock_irqrestore(&log->io_list_lock, flags);
733 r5l_do_submit_io(log, io);
736 static struct bio *r5l_bio_alloc(struct r5l_log *log)
738 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
741 bio_set_dev(bio, log->rdev->bdev);
742 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
747 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
749 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
751 r5c_update_log_state(log);
753 * If we filled up the log device start from the beginning again,
756 * Note: for this to work properly the log size needs to me a multiple
759 if (log->log_start == 0)
762 io->log_end = log->log_start;
765 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
770 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
775 io->log = log;
781 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
786 block->seq = cpu_to_le64(log->seq);
787 block->position = cpu_to_le64(log->log_start);
789 io->log_start = log->log_start;
791 io->seq = log->seq++;
793 io->current_bio = r5l_bio_alloc(log);
798 r5_reserve_log_entry(log, io);
800 spin_lock_irq(&log->io_list_lock);
801 list_add_tail(&io->log_sibling, &log->running_ios);
802 spin_unlock_irq(&log->io_list_lock);
807 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
809 if (log->current_io &&
810 log->current_io->meta_offset + payload_size > PAGE_SIZE)
811 r5l_submit_current_io(log);
813 if (!log->current_io) {
814 log->current_io = r5l_new_meta(log);
815 if (!log->current_io)
822 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
827 struct r5l_io_unit *io = log->current_io;
844 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
846 struct r5l_io_unit *io = log->current_io;
851 io->current_bio = r5l_bio_alloc(log);
859 r5_reserve_log_entry(log, io);
862 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
864 struct mddev *mddev = log->rdev->mddev;
878 mutex_lock(&log->io_mutex);
881 if (r5l_get_meta(log, meta_size)) {
882 mutex_unlock(&log->io_mutex);
887 io = log->current_io;
899 mutex_unlock(&log->io_mutex);
902 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
916 ret = r5l_get_meta(log, meta_size);
920 io = log->current_io;
932 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
940 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
943 r5l_append_payload_page(log, sh->dev[i].page);
947 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
950 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
951 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
953 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
956 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
964 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
970 spin_lock_irq(&log->stripe_in_journal_lock);
972 &log->stripe_in_journal_list);
973 spin_unlock_irq(&log->stripe_in_journal_lock);
974 atomic_inc(&log->stripe_in_journal_count);
980 static inline void r5l_add_no_space_stripe(struct r5l_log *log,
983 spin_lock(&log->no_space_stripes_lock);
984 list_add_tail(&sh->log_list, &log->no_space_stripes);
985 spin_unlock(&log->no_space_stripes_lock);
990 * data from log to raid disks), so we shouldn't wait for reclaim here
992 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
1002 if (!log)
1007 /* the stripe is written to log, we start writing it to raid */
1026 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1041 mutex_lock(&log->io_mutex);
1045 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1046 if (!r5l_has_free_space(log, reserve)) {
1047 r5l_add_no_space_stripe(log, sh);
1050 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1052 spin_lock_irq(&log->io_list_lock);
1054 &log->no_mem_stripes);
1055 spin_unlock_irq(&log->io_list_lock);
1060 * log space critical, do not process stripes that are
1065 r5l_add_no_space_stripe(log, sh);
1068 } else if (!r5l_has_free_space(log, reserve)) {
1069 if (sh->log_start == log->last_checkpoint)
1072 r5l_add_no_space_stripe(log, sh);
1074 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1076 spin_lock_irq(&log->io_list_lock);
1078 &log->no_mem_stripes);
1079 spin_unlock_irq(&log->io_list_lock);
1084 mutex_unlock(&log->io_mutex);
1086 r5l_wake_reclaim(log, reserve);
1090 void r5l_write_stripe_run(struct r5l_log *log)
1092 if (!log)
1094 mutex_lock(&log->io_mutex);
1095 r5l_submit_current_io(log);
1096 mutex_unlock(&log->io_mutex);
1099 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1101 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1104 * we flush log disk cache first, then write stripe data to
1105 * raid disks. So if bio is finished, the log disk cache is
1107 * the bio from log disk, so we don't need to flush again
1117 mutex_lock(&log->io_mutex);
1118 r5l_get_meta(log, 0);
1119 bio_list_add(&log->current_io->flush_barriers, bio);
1120 log->current_io->has_flush = 1;
1121 log->current_io->has_null_flush = 1;
1122 atomic_inc(&log->current_io->pending_stripe);
1123 r5l_submit_current_io(log);
1124 mutex_unlock(&log->io_mutex);
1131 /* This will run after log space is reclaimed */
1132 static void r5l_run_no_space_stripes(struct r5l_log *log)
1136 spin_lock(&log->no_space_stripes_lock);
1137 while (!list_empty(&log->no_space_stripes)) {
1138 sh = list_first_entry(&log->no_space_stripes,
1144 spin_unlock(&log->no_space_stripes_lock);
1149 * for write through mode, returns log->next_checkpoint
1155 struct r5l_log *log = conf->log;
1159 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1160 return log->next_checkpoint;
1162 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1163 if (list_empty(&conf->log->stripe_in_journal_list)) {
1165 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1166 return log->next_checkpoint;
1168 sh = list_first_entry(&conf->log->stripe_in_journal_list,
1171 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1175 static sector_t r5l_reclaimable_space(struct r5l_log *log)
1177 struct r5conf *conf = log->rdev->mddev->private;
1179 return r5l_ring_distance(log, log->last_checkpoint,
1183 static void r5l_run_no_mem_stripe(struct r5l_log *log)
1187 lockdep_assert_held(&log->io_list_lock);
1189 if (!list_empty(&log->no_mem_stripes)) {
1190 sh = list_first_entry(&log->no_mem_stripes,
1198 static bool r5l_complete_finished_ios(struct r5l_log *log)
1203 lockdep_assert_held(&log->io_list_lock);
1205 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1210 log->next_checkpoint = io->log_start;
1213 mempool_free(io, &log->io_pool);
1214 r5l_run_no_mem_stripe(log);
1224 struct r5l_log *log = io->log;
1225 struct r5conf *conf = log->rdev->mddev->private;
1228 spin_lock_irqsave(&log->io_list_lock, flags);
1231 if (!r5l_complete_finished_ios(log)) {
1232 spin_unlock_irqrestore(&log->io_list_lock, flags);
1236 if (r5l_reclaimable_space(log) > log->max_free_space ||
1238 r5l_wake_reclaim(log, 0);
1240 spin_unlock_irqrestore(&log->io_list_lock, flags);
1241 wake_up(&log->iounit_wait);
1257 struct r5l_log *log = container_of(bio, struct r5l_log,
1263 md_error(log->rdev->mddev, log->rdev);
1265 spin_lock_irqsave(&log->io_list_lock, flags);
1266 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1268 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1269 spin_unlock_irqrestore(&log->io_list_lock, flags);
1274 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1275 * broken meta in the middle of a log causes recovery can't find meta at the
1276 * head of log. If operations require meta at the head persistent in log, we
1277 * must make sure meta before it persistent in log too. A case is:
1279 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1280 * data/parity must be persistent in log before we do the write to raid disks.
1284 * one whose data/parity is in log.
1286 void r5l_flush_stripe_to_raid(struct r5l_log *log)
1290 if (!log || !log->need_cache_flush)
1293 spin_lock_irq(&log->io_list_lock);
1295 if (!list_empty(&log->flushing_ios)) {
1296 spin_unlock_irq(&log->io_list_lock);
1299 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1300 do_flush = !list_empty(&log->flushing_ios);
1301 spin_unlock_irq(&log->io_list_lock);
1305 bio_reset(&log->flush_bio);
1306 bio_set_dev(&log->flush_bio, log->rdev->bdev);
1307 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1308 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1309 submit_bio(&log->flush_bio);
1312 static void r5l_write_super(struct r5l_log *log, sector_t cp);
1313 static void r5l_write_super_and_discard_space(struct r5l_log *log,
1316 struct block_device *bdev = log->rdev->bdev;
1319 r5l_write_super(log, end);
1324 mddev = log->rdev->mddev;
1327 * superblock is updated to new log tail. Updating superblock (either
1344 if (log->last_checkpoint < end) {
1346 log->last_checkpoint + log->rdev->data_offset,
1347 end - log->last_checkpoint, GFP_NOIO, 0);
1350 log->last_checkpoint + log->rdev->data_offset,
1351 log->device_size - log->last_checkpoint,
1353 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1403 if (!conf->log)
1424 struct r5l_log *log = conf->log;
1432 if (!r5c_is_writeback(log))
1466 /* if log space is tight, flush stripes on stripe_in_journal_list */
1468 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1470 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1488 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1492 r5l_run_no_space_stripes(log);
1497 static void r5l_do_reclaim(struct r5l_log *log)
1499 struct r5conf *conf = log->rdev->mddev->private;
1500 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1505 spin_lock_irq(&log->io_list_lock);
1506 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1507 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1514 reclaimable = r5l_reclaimable_space(log);
1516 (list_empty(&log->running_ios) &&
1517 list_empty(&log->io_end_ios) &&
1518 list_empty(&log->flushing_ios) &&
1519 list_empty(&log->finished_ios)))
1522 md_wakeup_thread(log->rdev->mddev->thread);
1523 wait_event_lock_irq(log->iounit_wait,
1524 r5l_reclaimable_space(log) > reclaimable,
1525 log->io_list_lock);
1529 spin_unlock_irq(&log->io_list_lock);
1536 * here, because the log area might be reused soon and we don't want to
1539 r5l_write_super_and_discard_space(log, next_checkpoint);
1541 mutex_lock(&log->io_mutex);
1542 log->last_checkpoint = next_checkpoint;
1543 r5c_update_log_state(log);
1544 mutex_unlock(&log->io_mutex);
1546 r5l_run_no_space_stripes(log);
1553 struct r5l_log *log = conf->log;
1555 if (!log)
1558 r5l_do_reclaim(log);
1561 void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1566 if (!log)
1569 target = log->reclaim_target;
1572 } while (cmpxchg(&log->reclaim_target, target, new) != target);
1573 md_wakeup_thread(log->reclaim_thread);
1576 void r5l_quiesce(struct r5l_log *log, int quiesce)
1582 mddev = log->rdev->mddev;
1584 kthread_park(log->reclaim_thread->tsk);
1585 r5l_wake_reclaim(log, MaxSector);
1586 r5l_do_reclaim(log);
1588 kthread_unpark(log->reclaim_thread->tsk);
1593 struct r5l_log *log;
1597 log = rcu_dereference(conf->log);
1599 if (!log)
1602 ret = test_bit(Faulty, &log->rdev->flags);
1620 * in recovery, log is read sequentially. It is not efficient to
1622 * reads multiple pages with one IO, so further log read can
1632 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1637 ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
1661 static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1677 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1682 bio_set_dev(ctx->ra_bio, log->rdev->bdev);
1684 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
1694 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1707 static int r5l_recovery_read_page(struct r5l_log *log,
1716 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1731 static int r5l_recovery_read_meta_block(struct r5l_log *log,
1739 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1753 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1766 r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1781 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1790 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1792 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1794 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
1810 static void r5l_recovery_load_data(struct r5l_log *log,
1816 struct mddev *mddev = log->rdev->mddev;
1823 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1832 static void r5l_recovery_load_parity(struct r5l_log *log,
1838 struct mddev *mddev = log->rdev->mddev;
1842 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1849 log, ctx, sh->dev[sh->qd_idx].page,
1850 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1983 r5l_recovery_verify_data_checksum(struct r5l_log *log,
1991 r5l_recovery_read_page(log, ctx, page, log_offset);
1993 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
2003 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2006 struct mddev *mddev = log->rdev->mddev;
2010 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2025 log, ctx, page, log_offset,
2030 log, ctx, page, log_offset,
2035 log, ctx, page,
2036 r5l_ring_add(log, log_offset,
2050 log_offset = r5l_ring_add(log, log_offset,
2076 r5c_recovery_analyze_meta_block(struct r5l_log *log,
2080 struct mddev *mddev = log->rdev->mddev;
2096 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2104 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2187 r5l_recovery_load_data(log, sh, ctx, payload,
2190 r5l_recovery_load_parity(log, sh, ctx, payload,
2195 log_offset = r5l_ring_add(log, log_offset,
2210 static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2226 * Scan through the log for all to-be-flushed data
2241 static int r5c_recovery_flush_log(struct r5l_log *log,
2247 /* scan through the log */
2249 if (r5l_recovery_read_meta_block(log, ctx))
2252 ret = r5c_recovery_analyze_meta_block(log, ctx,
2261 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2275 r5c_recovery_load_one_stripe(log, sh);
2284 * log will start here. but we can't let superblock point to last valid
2285 * meta block. The log might looks like:
2297 * Before recovery, the log looks like the following
2300 * | valid log | invalid log |
2303 * |- log->last_checkpoint
2304 * |- log->last_cp_seq
2306 * Now we scan through the log until we see invalid entry
2309 * | valid log | invalid log |
2312 * |- log->last_checkpoint |- ctx->pos
2313 * |- log->last_cp_seq |- ctx->seq
2319 * | valid log | invalid log |
2322 * |- log->last_checkpoint |- ctx->pos+1
2323 * |- log->last_cp_seq |- ctx->seq+10001
2330 * | valid log | data only stripes | invalid log |
2333 * |- log->last_checkpoint |- ctx->pos+n
2334 * |- log->last_cp_seq |- ctx->seq+10000+n
2337 * again from log->last_checkpoint.
2342 * | old log | data only stripes | invalid log |
2345 * |- log->last_checkpoint |- ctx->pos+n
2346 * |- log->last_cp_seq |- ctx->seq+10000+n
2349 * point on, the recovery will start from new log->last_checkpoint.
2352 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2356 struct mddev *mddev = log->rdev->mddev;
2376 r5l_recovery_create_empty_meta_block(log, page,
2380 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2396 crc32c_le(log->uuid_checksum, addr,
2399 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2401 write_pos = r5l_ring_add(log, write_pos,
2409 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2411 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2414 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2415 atomic_inc(&log->stripe_in_journal_count);
2420 log->next_checkpoint = next_checkpoint;
2425 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2428 struct mddev *mddev = log->rdev->mddev;
2440 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2453 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2458 static int r5l_recovery_log(struct r5l_log *log)
2460 struct mddev *mddev = log->rdev->mddev;
2469 ctx->pos = log->last_checkpoint;
2470 ctx->seq = log->last_cp_seq;
2479 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2484 ret = r5c_recovery_flush_log(log, ctx);
2501 log->next_checkpoint = ctx->pos;
2502 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2503 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2504 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2511 log->log_start = ctx->pos;
2512 log->seq = ctx->seq;
2513 log->last_checkpoint = pos;
2514 r5l_write_super(log, pos);
2516 r5c_recovery_flush_data_only_stripes(log, ctx);
2519 r5l_recovery_free_ra_pool(log, ctx);
2527 static void r5l_write_super(struct r5l_log *log, sector_t cp)
2529 struct mddev *mddev = log->rdev->mddev;
2531 log->rdev->journal_tail = cp;
2542 if (!conf || !conf->log) {
2547 switch (conf->log->r5c_journal_mode) {
2582 if (!conf || !conf->log)
2590 conf->log->r5c_journal_mode = mode;
2641 struct r5l_log *log = conf->log;
2650 BUG_ON(!r5c_is_writeback(log));
2700 spin_lock(&log->tree_lock);
2701 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2705 pslot, &log->tree_lock) >>
2708 &log->big_stripe_tree, pslot,
2716 &log->big_stripe_tree, tree_index,
2719 spin_unlock(&log->tree_lock);
2724 spin_unlock(&log->tree_lock);
2808 struct r5l_log *log = conf->log;
2815 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2821 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2843 spin_lock_irq(&log->stripe_in_journal_lock);
2845 spin_unlock_irq(&log->stripe_in_journal_lock);
2848 atomic_dec(&log->stripe_in_journal_count);
2849 r5c_update_log_state(log);
2855 spin_lock(&log->tree_lock);
2856 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2860 pslot, &log->tree_lock) >>
2863 radix_tree_delete(&log->big_stripe_tree, tree_index);
2866 &log->big_stripe_tree, pslot,
2868 spin_unlock(&log->tree_lock);
2883 r5l_append_flush_payload(log, sh->sector);
2889 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2897 BUG_ON(!log);
2905 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2919 mutex_lock(&log->io_mutex);
2925 r5l_add_no_space_stripe(log, sh);
2926 else if (!r5l_has_free_space(log, reserve)) {
2927 if (sh->log_start == log->last_checkpoint)
2930 r5l_add_no_space_stripe(log, sh);
2932 ret = r5l_log_stripe(log, sh, pages, 0);
2934 spin_lock_irq(&log->io_list_lock);
2935 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2936 spin_unlock_irq(&log->io_list_lock);
2940 mutex_unlock(&log->io_mutex);
2947 struct r5l_log *log = conf->log;
2951 if (!log)
2956 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2960 static int r5l_load_log(struct r5l_log *log)
2962 struct md_rdev *rdev = log->rdev;
2965 sector_t cp = log->rdev->journal_tail;
2990 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
3001 log->last_cp_seq = prandom_u32();
3003 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
3006 * data very soon. If super hasn't correct log tail address,
3007 * recovery can't find the log
3009 r5l_write_super(log, cp);
3011 log->last_cp_seq = le64_to_cpu(mb->seq);
3013 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3014 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3015 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3016 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3017 log->last_checkpoint = cp;
3022 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3023 log->seq = log->last_cp_seq + 1;
3024 log->next_checkpoint = cp;
3026 ret = r5l_recovery_log(log);
3028 r5c_update_log_state(log);
3035 int r5l_start(struct r5l_log *log)
3039 if (!log)
3042 ret = r5l_load_log(log);
3044 struct mddev *mddev = log->rdev->mddev;
3055 struct r5l_log *log = conf->log;
3057 if (!log)
3062 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3063 schedule_work(&log->disable_writeback_work);
3069 struct r5l_log *log;
3094 log = kzalloc(sizeof(*log), GFP_KERNEL);
3095 if (!log)
3097 log->rdev = rdev;
3099 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
3101 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3104 mutex_init(&log->io_mutex);
3106 spin_lock_init(&log->io_list_lock);
3107 INIT_LIST_HEAD(&log->running_ios);
3108 INIT_LIST_HEAD(&log->io_end_ios);
3109 INIT_LIST_HEAD(&log->flushing_ios);
3110 INIT_LIST_HEAD(&log->finished_ios);
3111 bio_init(&log->flush_bio, NULL, 0);
3113 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3114 if (!log->io_kc)
3117 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3121 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3125 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3129 spin_lock_init(&log->tree_lock);
3130 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3132 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
3133 log->rdev->mddev, "reclaim");
3134 if (!log->reclaim_thread)
3136 log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3138 init_waitqueue_head(&log->iounit_wait);
3140 INIT_LIST_HEAD(&log->no_mem_stripes);
3142 INIT_LIST_HEAD(&log->no_space_stripes);
3143 spin_lock_init(&log->no_space_stripes_lock);
3145 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3146 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3148 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3149 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3150 spin_lock_init(&log->stripe_in_journal_lock);
3151 atomic_set(&log->stripe_in_journal_count, 0);
3153 rcu_assign_pointer(conf->log, log);
3159 mempool_exit(&log->meta_pool);
3161 bioset_exit(&log->bs);
3163 mempool_exit(&log->io_pool);
3165 kmem_cache_destroy(log->io_kc);
3167 kfree(log);
3173 struct r5l_log *log = conf->log;
3175 conf->log = NULL;
3180 flush_work(&log->disable_writeback_work);
3181 md_unregister_thread(&log->reclaim_thread);
3182 mempool_exit(&log->meta_pool);
3183 bioset_exit(&log->bs);
3184 mempool_exit(&log->io_pool);
3185 kmem_cache_destroy(log->io_kc);
3186 kfree(log);