Lines Matching defs:map

69 	struct dm_table *map;
476 struct dm_table *map;
487 map = dm_get_live_table(md, &srcu_idx);
488 if (!map) {
496 tgt = dm_table_find_target(map, args.next_sector);
523 struct dm_table *map;
528 map = dm_get_live_table(md, srcu_idx);
529 if (!map || !dm_table_get_size(map))
533 if (dm_table_get_num_targets(map) != 1)
536 tgt = dm_table_get_target(map, 0);
707 * function to access the md->map field, and make sure they call
714 return srcu_dereference(md->map, &md->io_barrier);
735 return rcu_dereference(md->map);
746 * Open a table device so we can use it as a map destination.
1099 struct dm_table *map;
1102 map = dm_get_live_table(md, srcu_idx);
1103 if (!map)
1106 ti = dm_table_find_target(map, sector);
1144 struct dm_table *map;
1148 map = dm_get_live_table(md, &srcu_idx);
1149 if (!map)
1152 ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
1236 * A target may call dm_accept_partial_bio only from the map routine. It is
1323 r = ti->type->map(ti, clone);
1350 DMWARN("unimplemented target map return value: %d", r);
1496 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1599 ti = dm_table_find_target(ci->map, ci->sector);
1619 struct dm_table *map, struct bio *bio)
1621 ci->map = map;
1633 struct dm_table *map, struct bio *bio)
1639 init_clone_info(&ci, md, map, bio);
1696 struct dm_table *map;
1698 map = dm_get_live_table(md, &srcu_idx);
1700 /* If suspended, or map not yet available, queue this IO for later */
1702 unlikely(!map)) {
1719 ret = __split_and_process_bio(md, map, bio);
2018 * Returns old map, which caller must destroy.
2058 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2059 rcu_assign_pointer(md->map, (void *)t);
2075 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2077 if (!map)
2080 dm_table_event_callback(map, NULL, NULL);
2081 RCU_INIT_POINTER(md->map, NULL);
2084 return map;
2248 struct dm_table *map;
2265 map = dm_get_live_table(md, &srcu_idx);
2267 dm_table_presuspend_targets(map);
2270 dm_table_postsuspend_targets(map);
2404 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2430 map = ERR_PTR(r);
2435 map = __bind(md, table, &limits);
2440 return map;
2484 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2507 dm_table_presuspend_targets(map);
2518 dm_table_presuspend_undo_targets(map);
2535 if (map)
2558 if (map)
2569 dm_table_presuspend_undo_targets(map);
2594 struct dm_table *map = NULL;
2614 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2616 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2621 dm_table_postsuspend_targets(map);
2629 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2631 if (map) {
2632 int r = dm_table_resume_targets(map);
2655 struct dm_table *map = NULL;
2673 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2674 if (!map || !dm_table_get_size(map))
2677 r = __dm_resume(md, map);
2696 struct dm_table *map = NULL;
2708 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2716 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2720 dm_table_postsuspend_targets(map);
2736 * (which may fail -- so best to avoid it for now by passing NULL map)