Lines Matching refs:era
15 #define DM_MSG_PREFIX "era"
45 * after digesting into the era array.
273 * We preallocate 2 writesets. When an era rolls over we
452 DMERR("couldn't create era array");
658 * Writesets get 'digested' into the main era array.
665 uint32_t era;
681 uint64_t key = d->era;
742 d->era = key;
943 DMERR("%s: new era failed", __func__);
1011 * Metadata snapshots allow userland to access era data.
1025 DMERR("%s: era rollover failed", __func__);
1059 DMERR("%s: couldn't inc era tree root", __func__);
1107 DMERR("%s: error deleting era array clone", __func__);
1122 uint32_t era;
1146 s->era = md->current_era;
1153 struct era {
1191 static bool block_size_is_power_of_two(struct era *era)
1193 return era->sectors_per_block_shift >= 0;
1196 static dm_block_t get_block(struct era *era, struct bio *bio)
1200 if (!block_size_is_power_of_two(era))
1201 (void) sector_div(block_nr, era->sectors_per_block);
1203 block_nr >>= era->sectors_per_block_shift;
1208 static void remap_to_origin(struct era *era, struct bio *bio)
1210 bio_set_dev(bio, era->origin_dev->bdev);
1216 static void wake_worker(struct era *era)
1218 if (!atomic_read(&era->suspended))
1219 queue_work(era->wq, &era->worker);
1222 static void process_old_eras(struct era *era)
1226 if (!era->digest.step)
1229 r = era->digest.step(era->md, &era->digest);
1232 era->digest.step = NULL;
1234 } else if (era->digest.step)
1235 wake_worker(era);
1238 static void process_deferred_bios(struct era *era)
1246 struct writeset *ws = era->md->current_writeset;
1251 spin_lock(&era->deferred_lock);
1252 bio_list_merge(&deferred_bios, &era->deferred_bios);
1253 bio_list_init(&era->deferred_bios);
1254 spin_unlock(&era->deferred_lock);
1260 r = writeset_test_and_set(&era->md->bitset_info, ws,
1261 get_block(era, bio));
1275 r = metadata_commit(era->md);
1291 set_bit(get_block(era, bio), ws->bits);
1298 static void process_rpc_calls(struct era *era)
1306 spin_lock(&era->rpc_lock);
1307 list_splice_init(&era->rpc_calls, &calls);
1308 spin_unlock(&era->rpc_lock);
1311 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1316 r = metadata_commit(era->md);
1326 static void kick_off_digest(struct era *era)
1328 if (era->md->archived_writesets) {
1329 era->md->archived_writesets = false;
1330 metadata_digest_start(era->md, &era->digest);
1336 struct era *era = container_of(ws, struct era, worker);
1338 kick_off_digest(era);
1339 process_old_eras(era);
1340 process_deferred_bios(era);
1341 process_rpc_calls(era);
1344 static void defer_bio(struct era *era, struct bio *bio)
1346 spin_lock(&era->deferred_lock);
1347 bio_list_add(&era->deferred_bios, bio);
1348 spin_unlock(&era->deferred_lock);
1350 wake_worker(era);
1356 static int perform_rpc(struct era *era, struct rpc *rpc)
1361 spin_lock(&era->rpc_lock);
1362 list_add(&rpc->list, &era->rpc_calls);
1363 spin_unlock(&era->rpc_lock);
1365 wake_worker(era);
1371 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1377 return perform_rpc(era, &rpc);
1380 static int in_worker1(struct era *era,
1388 return perform_rpc(era, &rpc);
1391 static void start_worker(struct era *era)
1393 atomic_set(&era->suspended, 0);
1396 static void stop_worker(struct era *era)
1398 atomic_set(&era->suspended, 1);
1399 drain_workqueue(era->wq);
1405 static void era_destroy(struct era *era)
1407 if (era->md)
1408 metadata_close(era->md);
1410 if (era->wq)
1411 destroy_workqueue(era->wq);
1413 if (era->origin_dev)
1414 dm_put_device(era->ti, era->origin_dev);
1416 if (era->metadata_dev)
1417 dm_put_device(era->ti, era->metadata_dev);
1419 kfree(era);
1422 static dm_block_t calc_nr_blocks(struct era *era)
1424 return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1442 struct era *era;
1450 era = kzalloc(sizeof(*era), GFP_KERNEL);
1451 if (!era) {
1452 ti->error = "Error allocating era structure";
1456 era->ti = ti;
1458 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1461 era_destroy(era);
1465 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1468 era_destroy(era);
1472 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1475 era_destroy(era);
1479 r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1482 era_destroy(era);
1486 if (!valid_block_size(era->sectors_per_block)) {
1488 era_destroy(era);
1491 if (era->sectors_per_block & (era->sectors_per_block - 1))
1492 era->sectors_per_block_shift = -1;
1494 era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1496 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1499 era_destroy(era);
1502 era->md = md;
1504 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1505 if (!era->wq) {
1507 era_destroy(era);
1510 INIT_WORK(&era->worker, do_work);
1512 spin_lock_init(&era->deferred_lock);
1513 bio_list_init(&era->deferred_bios);
1515 spin_lock_init(&era->rpc_lock);
1516 INIT_LIST_HEAD(&era->rpc_calls);
1518 ti->private = era;
1534 struct era *era = ti->private;
1535 dm_block_t block = get_block(era, bio);
1540 * block is marked in this era.
1542 remap_to_origin(era, bio);
1549 !metadata_current_marked(era->md, block)) {
1550 defer_bio(era, bio);
1560 struct era *era = ti->private;
1562 r = in_worker0(era, metadata_era_archive);
1564 DMERR("%s: couldn't archive current era", __func__);
1568 stop_worker(era);
1570 r = metadata_commit(era->md);
1580 struct era *era = ti->private;
1581 dm_block_t new_size = calc_nr_blocks(era);
1583 if (era->nr_blocks != new_size) {
1584 r = metadata_resize(era->md, &new_size);
1590 r = metadata_commit(era->md);
1596 era->nr_blocks = new_size;
1599 start_worker(era);
1601 r = in_worker0(era, metadata_era_rollover);
1614 * <current era> <held metadata root | '-'>
1620 struct era *era = ti->private;
1627 r = in_worker1(era, metadata_get_stats, &stats);
1635 (unsigned) stats.era);
1644 format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1646 format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1647 DMEMIT("%s %u", buf, era->sectors_per_block);
1660 struct era *era = ti->private;
1668 return in_worker0(era, metadata_checkpoint);
1671 return in_worker0(era, metadata_take_snap);
1674 return in_worker0(era, metadata_drop_snap);
1688 struct era *era = ti->private;
1689 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1694 struct era *era = ti->private;
1699 * era device's blocksize (io_opt is a factor) do not override them.
1701 if (io_opt_sectors < era->sectors_per_block ||
1702 do_div(io_opt_sectors, era->sectors_per_block)) {
1704 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1711 .name = "era",
1731 DMERR("era target registration failed: %d", r);
1746 MODULE_DESCRIPTION(DM_NAME " era target");