Lines Matching defs:rdev

118 	struct md_rdev *rdev;		/* array member disk associated with
253 bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
378 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
408 md_error(ppl_conf->mddev, log->rdev);
439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
463 log->next_io_sector = log->rdev->ppl.sector;
591 struct md_rdev *rdev;
594 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
595 if (rdev)
596 md_error(rdev->mddev, rdev);
620 struct md_rdev *rdev;
624 rdev = rcu_dereference(conf->disks[i].rdev);
625 if (rdev && !test_bit(Faulty, &rdev->flags))
626 bdev = rdev->bdev;
858 struct md_rdev *rdev;
886 rdev = rcu_dereference_protected(
887 conf->disks[dd_idx].rdev, 1);
888 if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
889 sector >= rdev->recovery_offset)) {
897 __func__, indent, "", rdev->bdev,
899 if (!sync_page_io(rdev, sector, block_size, page2,
901 md_error(mddev, rdev);
920 if (!sync_page_io(log->rdev,
921 ppl_sector - log->rdev->data_offset + i,
926 md_error(mddev, log->rdev);
941 conf->disks[sh.pd_idx].rdev, 1);
943 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
969 struct md_rdev *rdev = log->rdev;
970 struct mddev *mddev = rdev->mddev;
971 sector_t ppl_sector = rdev->ppl.sector + offset +
990 __func__, rdev->raid_disk, i,
1000 if (!sync_page_io(rdev, sector - rdev->data_offset,
1002 md_error(mddev, rdev);
1035 ret = blkdev_issue_flush(rdev->bdev);
1045 struct md_rdev *rdev = log->rdev;
1049 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1057 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1058 log->rdev->ppl.size, GFP_NOIO, 0);
1063 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1066 md_error(rdev->mddev, rdev);
1077 struct md_rdev *rdev = log->rdev;
1078 struct mddev *mddev = rdev->mddev;
1086 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1099 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1100 if (!sync_page_io(rdev,
1101 rdev->ppl.sector - rdev->data_offset +
1104 md_error(mddev, rdev);
1199 if (!log->rdev)
1255 static int ppl_validate_rdev(struct md_rdev *rdev)
1266 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1270 RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1274 mdname(rdev->mddev), rdev->bdev);
1280 if ((rdev->ppl.sector < rdev->data_offset &&
1281 rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1282 (rdev->ppl.sector >= rdev->data_offset &&
1283 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1285 mdname(rdev->mddev), rdev->bdev);
1289 if (!rdev->mddev->external &&
1290 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1291 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1293 mdname(rdev->mddev), rdev->bdev);
1297 rdev->ppl.size = ppl_size_new;
1302 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1304 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1312 log->entry_space = (log->rdev->ppl.size << 9) -
1315 log->next_io_sector = rdev->ppl.sector;
1317 if (bdev_write_cache(rdev->bdev))
1408 struct md_rdev *rdev =
1409 rcu_dereference_protected(conf->disks[i].rdev, 1);
1416 log->rdev = rdev;
1418 if (rdev) {
1419 ret = ppl_validate_rdev(rdev);
1423 ppl_init_child_log(log, rdev);
1456 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1462 if (!rdev)
1466 __func__, rdev->raid_disk, add ? "add" : "remove",
1467 rdev->bdev);
1469 if (rdev->raid_disk < 0)
1472 if (rdev->raid_disk >= ppl_conf->count)
1475 log = &ppl_conf->child_logs[rdev->raid_disk];
1479 ret = ppl_validate_rdev(rdev);
1481 log->rdev = rdev;
1483 ppl_init_child_log(log, rdev);
1486 log->rdev = NULL;