Lines Matching defs:dmz
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
128 clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
153 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
169 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
172 struct dmz_metadata *zmd = dmz->metadata;
229 ret = dmz_submit_bio(dmz, rzone, bio,
236 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
249 static int dmz_handle_direct_write(struct dmz_target *dmz,
254 struct dmz_metadata *zmd = dmz->metadata;
262 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
282 static int dmz_handle_buffered_write(struct dmz_target *dmz,
287 struct dmz_metadata *zmd = dmz->metadata;
300 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
318 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
321 struct dmz_metadata *zmd = dmz->metadata;
343 return dmz_handle_direct_write(dmz, zone, bio,
351 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
357 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
360 struct dmz_metadata *zmd = dmz->metadata;
374 dmz_metadata_label(dmz->metadata),
395 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
400 struct dmz_metadata *zmd = dmz->metadata;
427 ret = dmz_handle_read(dmz, zone, bio);
430 ret = dmz_handle_write(dmz, zone, bio);
434 ret = dmz_handle_discard(dmz, zone, bio);
438 dmz_metadata_label(dmz->metadata), bio_op(bio));
481 struct dmz_target *dmz = cw->target;
484 mutex_lock(&dmz->chunk_lock);
488 mutex_unlock(&dmz->chunk_lock);
489 dmz_handle_bio(dmz, cw, bio);
490 mutex_lock(&dmz->chunk_lock);
497 mutex_unlock(&dmz->chunk_lock);
505 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
510 ret = dmz_flush_metadata(dmz->metadata);
513 dmz_metadata_label(dmz->metadata), ret);
517 spin_lock(&dmz->flush_lock);
518 bio = bio_list_pop(&dmz->flush_list);
519 spin_unlock(&dmz->flush_lock);
527 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
534 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
536 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
540 mutex_lock(&dmz->chunk_lock);
543 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
556 cw->target = dmz;
560 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
569 if (queue_work(dmz->chunk_wq, &cw->work))
572 mutex_unlock(&dmz->chunk_lock);
627 struct dmz_target *dmz = ti->private;
628 struct dmz_metadata *zmd = dmz->metadata;
660 spin_lock(&dmz->flush_lock);
661 bio_list_add(&dmz->flush_list, bio);
662 spin_unlock(&dmz->flush_lock);
663 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
673 ret = dmz_queue_chunk_work(dmz, bio);
691 struct dmz_target *dmz = ti->private;
714 if (dmz->ddev[0]) {
718 dev = &dmz->dev[idx];
721 if (dmz->ddev[idx]) {
729 dev = &dmz->dev[idx];
740 dmz->ddev[idx] = ddev;
753 struct dmz_target *dmz = ti->private;
756 for (i = 0; i < dmz->nr_ddevs; i++)
757 if (dmz->ddev[i])
758 dm_put_device(ti, dmz->ddev[i]);
760 kfree(dmz->ddev);
765 struct dmz_target *dmz = ti->private;
774 if (dmz->nr_ddevs > 1) {
775 reg_dev = &dmz->dev[0];
780 for (i = 1; i < dmz->nr_ddevs; i++) {
781 struct dmz_dev *zoned_dev = &dmz->dev[i];
798 struct dmz_dev *zoned_dev = &dmz->dev[0];
818 for (i = 1; i < dmz->nr_ddevs; i++) {
819 dmz->dev[i].zone_offset = zone_offset;
820 zone_offset += dmz->dev[i].nr_zones;
831 struct dmz_target *dmz;
841 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
842 if (!dmz) {
846 dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
847 if (!dmz->dev) {
849 kfree(dmz);
852 dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
853 if (!dmz->ddev) {
858 dmz->nr_ddevs = argc;
860 ti->private = dmz;
873 ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
881 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
890 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
891 dmz_zone_nr_sectors_shift(dmz->metadata);
894 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
901 mutex_init(&dmz->chunk_lock);
902 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
903 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
905 dmz_metadata_label(dmz->metadata));
906 if (!dmz->chunk_wq) {
913 spin_lock_init(&dmz->flush_lock);
914 bio_list_init(&dmz->flush_list);
915 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
916 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
917 dmz_metadata_label(dmz->metadata));
918 if (!dmz->flush_wq) {
923 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
926 for (i = 0; i < dmz->nr_ddevs; i++) {
927 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
935 dmz_metadata_label(dmz->metadata),
941 destroy_workqueue(dmz->flush_wq);
943 destroy_workqueue(dmz->chunk_wq);
945 mutex_destroy(&dmz->chunk_lock);
946 bioset_exit(&dmz->bio_set);
948 dmz_dtr_metadata(dmz->metadata);
952 kfree(dmz->dev);
953 kfree(dmz);
963 struct dmz_target *dmz = ti->private;
966 destroy_workqueue(dmz->chunk_wq);
968 for (i = 0; i < dmz->nr_ddevs; i++)
969 dmz_dtr_reclaim(dmz->dev[i].reclaim);
971 cancel_delayed_work_sync(&dmz->flush_work);
972 destroy_workqueue(dmz->flush_wq);
974 (void) dmz_flush_metadata(dmz->metadata);
976 dmz_dtr_metadata(dmz->metadata);
978 bioset_exit(&dmz->bio_set);
982 mutex_destroy(&dmz->chunk_lock);
984 kfree(dmz->dev);
985 kfree(dmz);
993 struct dmz_target *dmz = ti->private;
994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
1021 struct dmz_target *dmz = ti->private;
1022 struct dmz_dev *dev = &dmz->dev[0];
1037 struct dmz_target *dmz = ti->private;
1040 flush_workqueue(dmz->chunk_wq);
1041 for (i = 0; i < dmz->nr_ddevs; i++)
1042 dmz_suspend_reclaim(dmz->dev[i].reclaim);
1043 cancel_delayed_work_sync(&dmz->flush_work);
1051 struct dmz_target *dmz = ti->private;
1054 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
1055 for (i = 0; i < dmz->nr_ddevs; i++)
1056 dmz_resume_reclaim(dmz->dev[i].reclaim);
1062 struct dmz_target *dmz = ti->private;
1063 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
1067 for (i = 0; i < dmz->nr_ddevs; i++) {
1068 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
1069 r = fn(ti, dmz->ddev[i], 0, capacity, data);
1080 struct dmz_target *dmz = ti->private;
1089 dmz_nr_zones(dmz->metadata),
1090 dmz_nr_unmap_cache_zones(dmz->metadata),
1091 dmz_nr_cache_zones(dmz->metadata));
1092 for (i = 0; i < dmz->nr_ddevs; i++) {
1098 (dmz_nr_cache_zones(dmz->metadata) > 0))
1101 dmz_nr_unmap_rnd_zones(dmz->metadata, i),
1102 dmz_nr_rnd_zones(dmz->metadata, i),
1103 dmz_nr_unmap_seq_zones(dmz->metadata, i),
1104 dmz_nr_seq_zones(dmz->metadata, i));
1108 dev = &dmz->dev[0];
1111 for (i = 1; i < dmz->nr_ddevs; i++) {
1112 dev = &dmz->dev[i];
1126 struct dmz_target *dmz = ti->private;
1132 for (i = 0; i < dmz->nr_ddevs; i++)
1133 dmz_schedule_reclaim(dmz->dev[i].reclaim);