Lines Matching refs:mapped_device

97 	struct mapped_device *md;
337 int dm_deleting_md(struct mapped_device *md)
344 struct mapped_device *md;
368 struct mapped_device *md;
385 int dm_open_count(struct mapped_device *md)
393 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
413 int dm_cancel_deferred_remove(struct mapped_device *md)
436 struct mapped_device *md = bdev->bd_disk->private_data;
475 struct mapped_device *md = disk->private_data;
519 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
553 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
561 struct mapped_device *md = bdev->bd_disk->private_data;
599 struct mapped_device *md = io->md;
609 static void end_io_acct(struct mapped_device *md, struct bio *bio,
628 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
655 static void free_io(struct mapped_device *md, struct dm_io *io)
695 static void queue_io(struct mapped_device *md, struct bio *bio)
710 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
717 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
722 void dm_sync_table(struct mapped_device *md)
732 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
738 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
749 struct mapped_device *md)
775 static void close_table_device(struct table_device *td, struct mapped_device *md)
799 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
837 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
867 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
877 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
891 static int __noflush_suspending(struct mapped_device *md)
905 struct mapped_device *md = io->md;
958 void disable_discard(struct mapped_device *md)
967 void disable_write_same(struct mapped_device *md)
975 void disable_write_zeroes(struct mapped_device *md)
993 struct mapped_device *md = tio->io->md;
1039 struct mapped_device *md = io->md;
1095 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1116 struct mapped_device *md = dax_get_private(dax_dev);
1143 struct mapped_device *md = dax_get_private(dax_dev);
1163 struct mapped_device *md = dax_get_private(dax_dev);
1187 struct mapped_device *md = dax_get_private(dax_dev);
1211 struct mapped_device *md = dax_get_private(dax_dev);
1280 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1316 struct mapped_device *md = io->md;
1335 struct mapped_device *md = io->md;
1343 struct mapped_device *md = io->md;
1618 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1632 static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1693 struct mapped_device *md = bio->bi_disk->private_data;
1780 static void cleanup_mapped_device(struct mapped_device *md)
1822 static struct mapped_device *alloc_dev(int minor)
1825 struct mapped_device *md;
1937 static void unlock_fs(struct mapped_device *md);
1939 static void free_dev(struct mapped_device *md)
1955 static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
2004 struct mapped_device *md = (struct mapped_device *) context;
2020 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2073 static struct dm_table *__unbind(struct mapped_device *md)
2090 int dm_create(int minor, struct mapped_device **result)
2093 struct mapped_device *md;
2113 void dm_lock_md_type(struct mapped_device *md)
2118 void dm_unlock_md_type(struct mapped_device *md)
2123 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2129 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2134 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2143 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2153 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2187 struct mapped_device *dm_get_md(dev_t dev)
2189 struct mapped_device *md;
2211 void *dm_get_mdptr(struct mapped_device *md)
2216 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2221 void dm_get(struct mapped_device *md)
2227 int dm_hold(struct mapped_device *md)
2240 const char *dm_device_name(struct mapped_device *md)
2246 static void __dm_destroy(struct mapped_device *md, bool wait)
2279 * No one should increment the reference count of the mapped_device,
2280 * after the mapped_device state becomes DMF_FREEING.
2286 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2294 void dm_destroy(struct mapped_device *md)
2299 void dm_destroy_immediate(struct mapped_device *md)
2304 void dm_put(struct mapped_device *md)
2310 static bool md_in_flight_bios(struct mapped_device *md)
2324 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
2349 static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2376 struct mapped_device *md = container_of(work, struct mapped_device, work);
2392 static void dm_queue_flush(struct mapped_device *md)
2402 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2447 static int lock_fs(struct mapped_device *md)
2465 static void unlock_fs(struct mapped_device *md)
2484 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2592 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2629 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2652 int dm_resume(struct mapped_device *md)
2694 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2724 static void __dm_internal_resume(struct mapped_device *md)
2746 void dm_internal_suspend_noflush(struct mapped_device *md)
2754 void dm_internal_resume(struct mapped_device *md)
2767 void dm_internal_suspend_fast(struct mapped_device *md)
2780 void dm_internal_resume_fast(struct mapped_device *md)
2795 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2819 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2824 uint32_t dm_get_event_nr(struct mapped_device *md)
2829 int dm_wait_event(struct mapped_device *md, int event_nr)
2835 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2848 struct gendisk *dm_disk(struct mapped_device *md)
2854 struct kobject *dm_kobject(struct mapped_device *md)
2859 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2861 struct mapped_device *md;
2863 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2877 int dm_suspended_md(struct mapped_device *md)
2882 static int dm_post_suspending_md(struct mapped_device *md)
2887 int dm_suspended_internally_md(struct mapped_device *md)
2892 int dm_test_deferred_remove_flag(struct mapped_device *md)
2915 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2984 struct mapped_device *md = bdev->bd_disk->private_data;
3054 struct mapped_device *md = bdev->bd_disk->private_data;
3074 struct mapped_device *md = bdev->bd_disk->private_data;
3095 struct mapped_device *md = bdev->bd_disk->private_data;
3115 struct mapped_device *md = bdev->bd_disk->private_data;