Lines Matching defs:log
15 #include "raid5-log.h"
76 * data+parity is written). The log->io_list tracks all io_units of a log
97 atomic64_t seq; /* current log write sequence number */
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
119 * this log instance */
124 struct list_head io_list; /* all io_units of this log */
136 struct ppl_log *log;
143 u64 seq; /* sequence number of this log write */
144 struct list_head log_sibling; /* log->io_list */
150 bool submitted; /* true if write to log started */
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
235 struct ppl_conf *ppl_conf = log->ppl_conf;
248 io->log = log;
266 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
268 struct ppl_io_unit *io = log->current_io;
279 if (io && (io->pp_size == log->entry_space ||
288 io = ppl_new_iounit(log, sh);
291 spin_lock_irq(&log->io_list_lock);
292 list_add_tail(&io->log_sibling, &log->io_list);
293 spin_unlock_irq(&log->io_list_lock);
295 log->current_io = io;
364 struct ppl_log *log;
373 log = &ppl_conf->child_logs[sh->pd_idx];
375 mutex_lock(&log->io_mutex);
377 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
378 mutex_unlock(&log->io_mutex);
386 if (ppl_log_stripe(log, sh)) {
392 mutex_unlock(&log->io_mutex);
400 struct ppl_log *log = io->log;
401 struct ppl_conf *ppl_conf = log->ppl_conf;
407 md_error(ppl_conf->mddev, log->rdev);
431 struct ppl_log *log = io->log;
432 struct ppl_conf *ppl_conf = log->ppl_conf;
440 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
461 if (log->use_multippl &&
462 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
464 log->next_io_sector = log->rdev->ppl.sector;
469 bio_set_dev(bio, log->rdev->bdev);
470 bio->bi_iter.bi_sector = log->next_io_sector;
474 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
475 (unsigned long long)log->next_io_sector);
477 if (log->use_multippl)
478 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
480 WARN_ON(log->disk_flush_bitmap != 0);
488 set_bit(i, &log->disk_flush_bitmap);
515 static void ppl_submit_current_io(struct ppl_log *log)
519 spin_lock_irq(&log->io_list_lock);
521 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
526 spin_unlock_irq(&log->io_list_lock);
531 if (io == log->current_io)
532 log->current_io = NULL;
541 struct ppl_log *log;
545 log = &ppl_conf->child_logs[i];
547 mutex_lock(&log->io_mutex);
548 ppl_submit_current_io(log);
549 mutex_unlock(&log->io_mutex);
555 struct ppl_log *log = io->log;
556 struct ppl_conf *ppl_conf = log->ppl_conf;
564 spin_lock(&log->io_list_lock);
566 spin_unlock(&log->io_list_lock);
590 struct ppl_log *log = io->log;
591 struct ppl_conf *ppl_conf = log->ppl_conf;
617 struct ppl_log *log = io->log;
618 struct ppl_conf *ppl_conf = log->ppl_conf;
626 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
654 log->disk_flush_bitmap = 0;
663 struct ppl_log *log)
667 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
680 struct ppl_log *log = &ppl_conf->child_logs[i];
682 spin_lock_irq(&log->io_list_lock);
684 ppl_no_io_unit_submitted(conf, log),
685 log->io_list_lock);
686 spin_unlock_irq(&log->io_list_lock);
691 int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
709 if (io->log->disk_flush_bitmap)
795 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
798 struct ppl_conf *ppl_conf = log->ppl_conf;
928 if (!sync_page_io(log->rdev,
929 ppl_sector - log->rdev->data_offset + i,
934 md_error(mddev, log->rdev);
948 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
970 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
973 struct ppl_conf *ppl_conf = log->ppl_conf;
974 struct md_rdev *rdev = log->rdev;
1030 ret = ppl_recover_entry(log, e, ppl_sector);
1046 static int ppl_write_empty_header(struct ppl_log *log)
1050 struct md_rdev *rdev = log->rdev;
1063 log->rdev->ppl.size, GFP_NOIO, 0);
1065 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1079 static int ppl_load_distributed(struct ppl_log *log)
1081 struct ppl_conf *ppl_conf = log->ppl_conf;
1082 struct md_rdev *rdev = log->rdev;
1178 /* attempt to recover from log if we are starting a dirty array */
1180 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1184 ret = ppl_write_empty_header(log);
1203 struct ppl_log *log = &ppl_conf->child_logs[i];
1206 if (!log->rdev)
1209 ret = ppl_load_distributed(log);
1310 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1316 log->use_multippl = true;
1318 &log->ppl_conf->mddev->flags);
1319 log->entry_space = PPL_SPACE_SIZE;
1321 log->use_multippl = false;
1322 log->entry_space = (log->rdev->ppl.size << 9) -
1325 log->next_io_sector = rdev->ppl.sector;
1329 log->wb_cache_on = true;
1418 struct ppl_log *log = &ppl_conf->child_logs[i];
1421 mutex_init(&log->io_mutex);
1422 spin_lock_init(&log->io_list_lock);
1423 INIT_LIST_HEAD(&log->io_list);
1425 log->ppl_conf = ppl_conf;
1426 log->rdev = rdev;
1433 ppl_init_child_log(log, rdev);
1469 struct ppl_log *log;
1486 log = &ppl_conf->child_logs[rdev->raid_disk];
1488 mutex_lock(&log->io_mutex);
1492 log->rdev = rdev;
1493 ret = ppl_write_empty_header(log);
1494 ppl_init_child_log(log, rdev);
1497 log->rdev = NULL;
1499 mutex_unlock(&log->io_mutex);