Lines Matching defs:log
15 #include "raid5-log.h"
76 * data+parity is written). The log->io_list tracks all io_units of a log
97 atomic64_t seq; /* current log write sequence number */
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
119 * this log instance */
124 struct list_head io_list; /* all io_units of this log */
136 struct ppl_log *log;
143 u64 seq; /* sequence number of this log write */
144 struct list_head log_sibling; /* log->io_list */
150 bool submitted; /* true if write to log started */
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
235 struct ppl_conf *ppl_conf = log->ppl_conf;
248 io->log = log;
253 bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
267 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
269 struct ppl_io_unit *io = log->current_io;
280 if (io && (io->pp_size == log->entry_space ||
289 io = ppl_new_iounit(log, sh);
292 spin_lock_irq(&log->io_list_lock);
293 list_add_tail(&io->log_sibling, &log->io_list);
294 spin_unlock_irq(&log->io_list_lock);
296 log->current_io = io;
365 struct ppl_log *log;
374 log = &ppl_conf->child_logs[sh->pd_idx];
376 mutex_lock(&log->io_mutex);
378 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
379 mutex_unlock(&log->io_mutex);
387 if (ppl_log_stripe(log, sh)) {
393 mutex_unlock(&log->io_mutex);
401 struct ppl_log *log = io->log;
402 struct ppl_conf *ppl_conf = log->ppl_conf;
408 md_error(ppl_conf->mddev, log->rdev);
430 struct ppl_log *log = io->log;
431 struct ppl_conf *ppl_conf = log->ppl_conf;
439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
460 if (log->use_multippl &&
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
463 log->next_io_sector = log->rdev->ppl.sector;
467 bio->bi_iter.bi_sector = log->next_io_sector;
470 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
471 (unsigned long long)log->next_io_sector);
473 if (log->use_multippl)
474 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
476 WARN_ON(log->disk_flush_bitmap != 0);
484 set_bit(i, &log->disk_flush_bitmap);
509 static void ppl_submit_current_io(struct ppl_log *log)
513 spin_lock_irq(&log->io_list_lock);
515 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
520 spin_unlock_irq(&log->io_list_lock);
525 if (io == log->current_io)
526 log->current_io = NULL;
535 struct ppl_log *log;
539 log = &ppl_conf->child_logs[i];
541 mutex_lock(&log->io_mutex);
542 ppl_submit_current_io(log);
543 mutex_unlock(&log->io_mutex);
549 struct ppl_log *log = io->log;
550 struct ppl_conf *ppl_conf = log->ppl_conf;
558 spin_lock(&log->io_list_lock);
560 spin_unlock(&log->io_list_lock);
584 struct ppl_log *log = io->log;
585 struct ppl_conf *ppl_conf = log->ppl_conf;
610 struct ppl_log *log = io->log;
611 struct ppl_conf *ppl_conf = log->ppl_conf;
619 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
645 log->disk_flush_bitmap = 0;
654 struct ppl_log *log)
658 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
671 struct ppl_log *log = &ppl_conf->child_logs[i];
673 spin_lock_irq(&log->io_list_lock);
675 ppl_no_io_unit_submitted(conf, log),
676 log->io_list_lock);
677 spin_unlock_irq(&log->io_list_lock);
700 if (io->log->disk_flush_bitmap)
786 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
789 struct ppl_conf *ppl_conf = log->ppl_conf;
920 if (!sync_page_io(log->rdev,
921 ppl_sector - log->rdev->data_offset + i,
926 md_error(mddev, log->rdev);
943 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
965 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
968 struct ppl_conf *ppl_conf = log->ppl_conf;
969 struct md_rdev *rdev = log->rdev;
1025 ret = ppl_recover_entry(log, e, ppl_sector);
1041 static int ppl_write_empty_header(struct ppl_log *log)
1045 struct md_rdev *rdev = log->rdev;
1058 log->rdev->ppl.size, GFP_NOIO, 0);
1060 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1074 static int ppl_load_distributed(struct ppl_log *log)
1076 struct ppl_conf *ppl_conf = log->ppl_conf;
1077 struct md_rdev *rdev = log->rdev;
1171 /* attempt to recover from log if we are starting a dirty array */
1173 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1177 ret = ppl_write_empty_header(log);
1196 struct ppl_log *log = &ppl_conf->child_logs[i];
1199 if (!log->rdev)
1202 ret = ppl_load_distributed(log);
1302 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1306 log->use_multippl = true;
1308 &log->ppl_conf->mddev->flags);
1309 log->entry_space = PPL_SPACE_SIZE;
1311 log->use_multippl = false;
1312 log->entry_space = (log->rdev->ppl.size << 9) -
1315 log->next_io_sector = rdev->ppl.sector;
1318 log->wb_cache_on = true;
1406 struct ppl_log *log = &ppl_conf->child_logs[i];
1411 mutex_init(&log->io_mutex);
1412 spin_lock_init(&log->io_list_lock);
1413 INIT_LIST_HEAD(&log->io_list);
1415 log->ppl_conf = ppl_conf;
1416 log->rdev = rdev;
1423 ppl_init_child_log(log, rdev);
1459 struct ppl_log *log;
1475 log = &ppl_conf->child_logs[rdev->raid_disk];
1477 mutex_lock(&log->io_mutex);
1481 log->rdev = rdev;
1482 ret = ppl_write_empty_header(log);
1483 ppl_init_child_log(log, rdev);
1486 log->rdev = NULL;
1488 mutex_unlock(&log->io_mutex);