Lines Matching refs:md

19 	struct mapped_device *md;
59 int dm_request_based(struct mapped_device *md)
61 return queue_is_mq(md->queue);
127 static void rq_end_stats(struct mapped_device *md, struct request *orig)
129 if (unlikely(dm_stats_used(&md->stats))) {
133 dm_stats_account_io(&md->stats, rq_data_dir(orig),
140 * Don't touch any member of the md after calling this function because
141 * the md may be freed in dm_put() at the end of this function.
144 static void rq_completed(struct mapped_device *md)
149 dm_put(md);
160 struct mapped_device *md = tio->md;
166 rq_end_stats(md, rq);
168 rq_completed(md);
176 void dm_mq_kick_requeue_list(struct mapped_device *md)
178 __dm_mq_kick_requeue_list(md->queue, 0);
190 struct mapped_device *md = tio->md;
194 rq_end_stats(md, rq);
201 rq_completed(md);
220 disable_discard(tio->md);
223 disable_write_zeroes(tio->md);
258 struct mapped_device *md = tio->md;
260 rq_end_stats(md, rq);
262 rq_completed(md);
325 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
339 struct mapped_device *md)
341 tio->md = md;
352 if (!md->init_tio_pdu)
366 struct mapped_device *md = tio->md;
384 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
422 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
427 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
433 static void dm_start_request(struct mapped_device *md, struct request *orig)
437 if (unlikely(dm_stats_used(&md->stats))) {
442 dm_stats_account_io(&md->stats, rq_data_dir(orig),
448 * Hold the md reference here for the in-flight I/O.
454 dm_get(md);
460 struct mapped_device *md = set->driver_data;
464 * Must initialize md member of tio, otherwise it won't
467 tio->md = md;
469 if (md->init_tio_pdu) {
482 struct mapped_device *md = tio->md;
483 struct dm_target *ti = md->immutable_target;
490 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
497 map = dm_get_live_table(md, &srcu_idx);
499 dm_put_live_table(md, srcu_idx);
503 dm_put_live_table(md, srcu_idx);
509 dm_start_request(md, rq);
511 /* Init tio using md established in .init_request */
512 init_tio(tio, rq, md);
522 rq_end_stats(md, rq);
523 rq_completed(md);
536 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
541 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
542 if (!md->tag_set)
545 md->tag_set->ops = &dm_mq_ops;
546 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
547 md->tag_set->numa_node = md->numa_node_id;
548 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
549 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
550 md->tag_set->driver_data = md;
552 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
556 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
557 md->init_tio_pdu = true;
560 err = blk_mq_alloc_tag_set(md->tag_set);
564 err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
570 blk_mq_free_tag_set(md->tag_set);
572 kfree(md->tag_set);
573 md->tag_set = NULL;
578 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
580 if (md->tag_set) {
581 blk_mq_free_tag_set(md->tag_set);
582 kfree(md->tag_set);
583 md->tag_set = NULL;