Lines Matching defs:dmz

116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
128 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
135 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
154 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
170 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
173 struct dmz_metadata *zmd = dmz->metadata;
230 ret = dmz_submit_bio(dmz, rzone, bio,
237 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
250 static int dmz_handle_direct_write(struct dmz_target *dmz,
255 struct dmz_metadata *zmd = dmz->metadata;
263 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
283 static int dmz_handle_buffered_write(struct dmz_target *dmz,
288 struct dmz_metadata *zmd = dmz->metadata;
301 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
319 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
322 struct dmz_metadata *zmd = dmz->metadata;
344 return dmz_handle_direct_write(dmz, zone, bio,
352 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
358 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
361 struct dmz_metadata *zmd = dmz->metadata;
375 dmz_metadata_label(dmz->metadata),
396 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
401 struct dmz_metadata *zmd = dmz->metadata;
428 ret = dmz_handle_read(dmz, zone, bio);
431 ret = dmz_handle_write(dmz, zone, bio);
435 ret = dmz_handle_discard(dmz, zone, bio);
439 dmz_metadata_label(dmz->metadata), bio_op(bio));
482 struct dmz_target *dmz = cw->target;
485 mutex_lock(&dmz->chunk_lock);
489 mutex_unlock(&dmz->chunk_lock);
490 dmz_handle_bio(dmz, cw, bio);
491 mutex_lock(&dmz->chunk_lock);
498 mutex_unlock(&dmz->chunk_lock);
506 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
511 ret = dmz_flush_metadata(dmz->metadata);
514 dmz_metadata_label(dmz->metadata), ret);
518 spin_lock(&dmz->flush_lock);
519 bio = bio_list_pop(&dmz->flush_list);
520 spin_unlock(&dmz->flush_lock);
528 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
535 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
537 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
541 mutex_lock(&dmz->chunk_lock);
544 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
557 cw->target = dmz;
561 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
570 if (queue_work(dmz->chunk_wq, &cw->work))
573 mutex_unlock(&dmz->chunk_lock);
628 struct dmz_target *dmz = ti->private;
629 struct dmz_metadata *zmd = dmz->metadata;
661 spin_lock(&dmz->flush_lock);
662 bio_list_add(&dmz->flush_list, bio);
663 spin_unlock(&dmz->flush_lock);
664 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
674 ret = dmz_queue_chunk_work(dmz, bio);
692 struct dmz_target *dmz = ti->private;
715 if (dmz->ddev[0]) {
719 dev = &dmz->dev[idx];
722 if (dmz->ddev[idx]) {
730 dev = &dmz->dev[idx];
742 dmz->ddev[idx] = ddev;
755 struct dmz_target *dmz = ti->private;
758 for (i = 0; i < dmz->nr_ddevs; i++)
759 if (dmz->ddev[i])
760 dm_put_device(ti, dmz->ddev[i]);
762 kfree(dmz->ddev);
767 struct dmz_target *dmz = ti->private;
777 if (dmz->nr_ddevs > 1) {
778 reg_dev = &dmz->dev[0];
783 for (i = 1; i < dmz->nr_ddevs; i++) {
784 zoned_dev = &dmz->dev[i];
802 zoned_dev = &dmz->dev[0];
821 for (i = 1; i < dmz->nr_ddevs; i++) {
822 dmz->dev[i].zone_offset = zone_offset;
823 zone_offset += dmz->dev[i].nr_zones;
834 struct dmz_target *dmz;
844 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
845 if (!dmz) {
849 dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
850 if (!dmz->dev) {
852 kfree(dmz);
855 dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
856 if (!dmz->ddev) {
861 dmz->nr_ddevs = argc;
863 ti->private = dmz;
876 ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
884 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
893 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
894 dmz_zone_nr_sectors_shift(dmz->metadata);
897 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
904 mutex_init(&dmz->chunk_lock);
905 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
906 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
908 dmz_metadata_label(dmz->metadata));
909 if (!dmz->chunk_wq) {
916 spin_lock_init(&dmz->flush_lock);
917 bio_list_init(&dmz->flush_list);
918 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
919 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
920 dmz_metadata_label(dmz->metadata));
921 if (!dmz->flush_wq) {
926 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
929 for (i = 0; i < dmz->nr_ddevs; i++) {
930 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
938 dmz_metadata_label(dmz->metadata),
944 destroy_workqueue(dmz->flush_wq);
946 destroy_workqueue(dmz->chunk_wq);
948 mutex_destroy(&dmz->chunk_lock);
949 bioset_exit(&dmz->bio_set);
951 dmz_dtr_metadata(dmz->metadata);
955 kfree(dmz->dev);
956 kfree(dmz);
966 struct dmz_target *dmz = ti->private;
969 flush_workqueue(dmz->chunk_wq);
970 destroy_workqueue(dmz->chunk_wq);
972 for (i = 0; i < dmz->nr_ddevs; i++)
973 dmz_dtr_reclaim(dmz->dev[i].reclaim);
975 cancel_delayed_work_sync(&dmz->flush_work);
976 destroy_workqueue(dmz->flush_wq);
978 (void) dmz_flush_metadata(dmz->metadata);
980 dmz_dtr_metadata(dmz->metadata);
982 bioset_exit(&dmz->bio_set);
986 mutex_destroy(&dmz->chunk_lock);
988 kfree(dmz->dev);
989 kfree(dmz);
997 struct dmz_target *dmz = ti->private;
998 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
1025 struct dmz_target *dmz = ti->private;
1026 struct dmz_dev *dev = &dmz->dev[0];
1041 struct dmz_target *dmz = ti->private;
1044 flush_workqueue(dmz->chunk_wq);
1045 for (i = 0; i < dmz->nr_ddevs; i++)
1046 dmz_suspend_reclaim(dmz->dev[i].reclaim);
1047 cancel_delayed_work_sync(&dmz->flush_work);
1055 struct dmz_target *dmz = ti->private;
1058 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
1059 for (i = 0; i < dmz->nr_ddevs; i++)
1060 dmz_resume_reclaim(dmz->dev[i].reclaim);
1066 struct dmz_target *dmz = ti->private;
1067 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
1071 for (i = 0; i < dmz->nr_ddevs; i++) {
1072 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
1073 r = fn(ti, dmz->ddev[i], 0, capacity, data);
1084 struct dmz_target *dmz = ti->private;
1093 dmz_nr_zones(dmz->metadata),
1094 dmz_nr_unmap_cache_zones(dmz->metadata),
1095 dmz_nr_cache_zones(dmz->metadata));
1096 for (i = 0; i < dmz->nr_ddevs; i++) {
1102 (dmz_nr_cache_zones(dmz->metadata) > 0))
1105 dmz_nr_unmap_rnd_zones(dmz->metadata, i),
1106 dmz_nr_rnd_zones(dmz->metadata, i),
1107 dmz_nr_unmap_seq_zones(dmz->metadata, i),
1108 dmz_nr_seq_zones(dmz->metadata, i));
1112 dev = &dmz->dev[0];
1115 for (i = 1; i < dmz->nr_ddevs; i++) {
1116 dev = &dmz->dev[i];
1128 struct dmz_target *dmz = ti->private;
1134 for (i = 0; i < dmz->nr_ddevs; i++)
1135 dmz_schedule_reclaim(dmz->dev[i].reclaim);