Lines Matching refs:lo

102  * @lo: struct loop_device
103 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
111 static int loop_global_lock_killable(struct loop_device *lo, bool global)
120 err = mutex_lock_killable(&lo->lo_mutex);
129 * @lo: struct loop_device
130 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
132 static void loop_global_unlock(struct loop_device *lo, bool global)
134 mutex_unlock(&lo->lo_mutex);
163 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
165 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
173 static bool lo_bdev_can_use_dio(struct loop_device *lo,
178 if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
180 if (lo->lo_offset & (sb_bsize - 1))
185 static void __loop_update_dio(struct loop_device *lo, bool dio)
187 struct file *file = lo->lo_backing_file;
198 (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
200 if (lo->use_dio == use_dio)
211 if (lo->lo_state == Lo_bound)
212 blk_mq_freeze_queue(lo->lo_queue);
213 lo->use_dio = use_dio;
215 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
216 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
218 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
221 if (lo->lo_state == Lo_bound)
222 blk_mq_unfreeze_queue(lo->lo_queue);
227 * @lo: struct loop_device to set the size for
233 static void loop_set_size(struct loop_device *lo, loff_t size)
235 if (!set_capacity_and_notify(lo->lo_disk, size))
236 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
261 static int lo_write_simple(struct loop_device *lo, struct request *rq,
269 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
278 static int lo_read_simple(struct loop_device *lo, struct request *rq,
288 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
307 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
314 struct file *file = lo->lo_backing_file;
319 if (!bdev_max_discard_sectors(lo->lo_device))
328 static int lo_req_flush(struct loop_device *lo, struct request *rq)
330 int ret = vfs_fsync(lo->lo_backing_file, 0);
392 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
400 struct file *file = lo->lo_backing_file;
461 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
464 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
477 return lo_req_flush(lo, rq);
483 return lo_fallocate(lo, rq, pos,
488 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
491 return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
493 return lo_write_simple(lo, rq, pos);
496 return lo_rw_aio(lo, cmd, pos, ITER_DEST);
498 return lo_read_simple(lo, rq, pos);
505 static inline void loop_update_dio(struct loop_device *lo)
507 __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
508 lo->use_dio);
511 static void loop_reread_partitions(struct loop_device *lo)
515 mutex_lock(&lo->lo_disk->open_mutex);
516 rc = bdev_disk_changed(lo->lo_disk, false);
517 mutex_unlock(&lo->lo_disk->open_mutex);
520 __func__, lo->lo_number, lo->lo_file_name, rc);
546 /* Order wrt setting lo->lo_backing_file in loop_configure(). */
563 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
576 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
579 error = loop_global_lock_killable(lo, is_loop);
583 if (lo->lo_state != Lo_bound)
588 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
595 old_file = lo->lo_backing_file;
600 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
604 disk_force_media_change(lo->lo_disk);
605 blk_mq_freeze_queue(lo->lo_queue);
606 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
607 lo->lo_backing_file = file;
608 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
610 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
611 loop_update_dio(lo);
612 blk_mq_unfreeze_queue(lo->lo_queue);
613 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
614 loop_global_unlock(lo, is_loop);
631 loop_reread_partitions(lo);
636 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
640 loop_global_unlock(lo, is_loop);
652 struct loop_device *lo = disk->private_data;
654 return callback(lo, page);
667 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
672 spin_lock_irq(&lo->lo_lock);
673 if (lo->lo_backing_file)
674 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
675 spin_unlock_irq(&lo->lo_lock);
689 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
691 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
694 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
696 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
699 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
701 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
706 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
708 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
713 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
715 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
742 static void loop_sysfs_init(struct loop_device *lo)
744 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
748 static void loop_sysfs_exit(struct loop_device *lo)
750 if (lo->sysfs_inited)
751 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
755 static void loop_config_discard(struct loop_device *lo)
757 struct file *file = lo->lo_backing_file;
759 struct request_queue *q = lo->lo_queue;
810 struct loop_device *lo;
829 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
836 spin_lock_irq(&lo->lo_work_lock);
841 node = &lo->worker_tree.rb_node;
876 worker->lo = lo;
878 rb_insert_color(&worker->rb_node, &lo->worker_tree);
891 work = &lo->rootcg_work;
892 cmd_list = &lo->rootcg_cmd_list;
895 queue_work(lo->workqueue, work);
896 spin_unlock_irq(&lo->lo_work_lock);
899 static void loop_set_timer(struct loop_device *lo)
901 timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
904 static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
908 spin_lock_irq(&lo->lo_work_lock);
909 list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
916 rb_erase(&worker->rb_node, &lo->worker_tree);
920 if (!list_empty(&lo->idle_worker_list))
921 loop_set_timer(lo);
922 spin_unlock_irq(&lo->lo_work_lock);
927 struct loop_device *lo = container_of(timer, struct loop_device, timer);
929 return loop_free_idle_workers(lo, false);
932 static void loop_update_rotational(struct loop_device *lo)
934 struct file *file = lo->lo_backing_file;
937 struct request_queue *q = lo->lo_queue;
952 * @lo: struct loop_device to configure
959 loop_set_status_from_info(struct loop_device *lo,
982 lo->lo_offset = info->lo_offset;
983 lo->lo_sizelimit = info->lo_sizelimit;
985 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
986 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
987 lo->lo_flags = info->lo_flags;
991 static int loop_configure(struct loop_device *lo, blk_mode_t mode,
1021 error = loop_global_lock_killable(lo, is_loop);
1026 if (lo->lo_state != Lo_unbound)
1047 error = loop_set_status_from_info(lo, &config->info);
1053 lo->lo_flags |= LO_FLAGS_READ_ONLY;
1055 if (!lo->workqueue) {
1056 lo->workqueue = alloc_workqueue("loop%d",
1058 0, lo->lo_number);
1059 if (!lo->workqueue) {
1066 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
1068 disk_force_media_change(lo->lo_disk);
1069 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1071 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1072 lo->lo_device = bdev;
1073 lo->lo_backing_file = file;
1074 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1075 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1077 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1078 blk_queue_write_cache(lo->lo_queue, true, false);
1082 else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1088 blk_queue_logical_block_size(lo->lo_queue, bsize);
1089 blk_queue_physical_block_size(lo->lo_queue, bsize);
1090 blk_queue_io_min(lo->lo_queue, bsize);
1092 loop_config_discard(lo);
1093 loop_update_rotational(lo);
1094 loop_update_dio(lo);
1095 loop_sysfs_init(lo);
1097 size = get_loop_size(lo, file);
1098 loop_set_size(lo, size);
1103 lo->lo_state = Lo_bound;
1105 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1106 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1108 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1111 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
1113 loop_global_unlock(lo, is_loop);
1115 loop_reread_partitions(lo);
1123 loop_global_unlock(lo, is_loop);
1134 static void __loop_clr_fd(struct loop_device *lo, bool release)
1137 gfp_t gfp = lo->old_gfp_mask;
1139 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
1140 blk_queue_write_cache(lo->lo_queue, false, false);
1148 blk_mq_freeze_queue(lo->lo_queue);
1150 spin_lock_irq(&lo->lo_lock);
1151 filp = lo->lo_backing_file;
1152 lo->lo_backing_file = NULL;
1153 spin_unlock_irq(&lo->lo_lock);
1155 lo->lo_device = NULL;
1156 lo->lo_offset = 0;
1157 lo->lo_sizelimit = 0;
1158 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1159 blk_queue_logical_block_size(lo->lo_queue, 512);
1160 blk_queue_physical_block_size(lo->lo_queue, 512);
1161 blk_queue_io_min(lo->lo_queue, 512);
1162 invalidate_disk(lo->lo_disk);
1163 loop_sysfs_exit(lo);
1165 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
1170 blk_mq_unfreeze_queue(lo->lo_queue);
1172 disk_force_media_change(lo->lo_disk);
1174 if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
1186 mutex_lock(&lo->lo_disk->open_mutex);
1187 err = bdev_disk_changed(lo->lo_disk, false);
1189 mutex_unlock(&lo->lo_disk->open_mutex);
1192 __func__, lo->lo_number, err);
1197 * lo->lo_state is set to Lo_unbound here after above partscan has
1200 * change the 'lo' device.
1202 lo->lo_flags = 0;
1204 set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1205 mutex_lock(&lo->lo_mutex);
1206 lo->lo_state = Lo_unbound;
1207 mutex_unlock(&lo->lo_mutex);
1217 static int loop_clr_fd(struct loop_device *lo)
1230 err = loop_global_lock_killable(lo, true);
1233 if (lo->lo_state != Lo_bound) {
1234 loop_global_unlock(lo, true);
1247 if (disk_openers(lo->lo_disk) > 1) {
1248 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1249 loop_global_unlock(lo, true);
1252 lo->lo_state = Lo_rundown;
1253 loop_global_unlock(lo, true);
1255 __loop_clr_fd(lo, false);
1260 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1267 err = mutex_lock_killable(&lo->lo_mutex);
1270 if (lo->lo_state != Lo_bound) {
1275 if (lo->lo_offset != info->lo_offset ||
1276 lo->lo_sizelimit != info->lo_sizelimit) {
1278 sync_blockdev(lo->lo_device);
1279 invalidate_bdev(lo->lo_device);
1283 blk_mq_freeze_queue(lo->lo_queue);
1285 prev_lo_flags = lo->lo_flags;
1287 err = loop_set_status_from_info(lo, info);
1292 lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1294 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1296 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1299 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1300 lo->lo_backing_file);
1301 loop_set_size(lo, new_size);
1304 loop_config_discard(lo);
1307 __loop_update_dio(lo, lo->use_dio);
1310 blk_mq_unfreeze_queue(lo->lo_queue);
1312 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1314 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1318 mutex_unlock(&lo->lo_mutex);
1320 loop_reread_partitions(lo);
1326 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1332 ret = mutex_lock_killable(&lo->lo_mutex);
1335 if (lo->lo_state != Lo_bound) {
1336 mutex_unlock(&lo->lo_mutex);
1341 info->lo_number = lo->lo_number;
1342 info->lo_offset = lo->lo_offset;
1343 info->lo_sizelimit = lo->lo_sizelimit;
1344 info->lo_flags = lo->lo_flags;
1345 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1348 path = lo->lo_backing_file->f_path;
1350 mutex_unlock(&lo->lo_mutex);
1398 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1406 return loop_set_status(lo, &info64);
1410 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1416 return loop_set_status(lo, &info64);
1420 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1427 err = loop_get_status(lo, &info64);
1437 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1443 err = loop_get_status(lo, &info64);
1450 static int loop_set_capacity(struct loop_device *lo)
1454 if (unlikely(lo->lo_state != Lo_bound))
1457 size = get_loop_size(lo, lo->lo_backing_file);
1458 loop_set_size(lo, size);
1463 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1466 if (lo->lo_state != Lo_bound)
1469 __loop_update_dio(lo, !!arg);
1470 if (lo->use_dio == !!arg)
1477 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1481 if (lo->lo_state != Lo_bound)
1488 if (lo->lo_queue->limits.logical_block_size == arg)
1491 sync_blockdev(lo->lo_device);
1492 invalidate_bdev(lo->lo_device);
1494 blk_mq_freeze_queue(lo->lo_queue);
1495 blk_queue_logical_block_size(lo->lo_queue, arg);
1496 blk_queue_physical_block_size(lo->lo_queue, arg);
1497 blk_queue_io_min(lo->lo_queue, arg);
1498 loop_update_dio(lo);
1499 blk_mq_unfreeze_queue(lo->lo_queue);
1504 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1509 err = mutex_lock_killable(&lo->lo_mutex);
1514 err = loop_set_capacity(lo);
1517 err = loop_set_dio(lo, arg);
1520 err = loop_set_block_size(lo, arg);
1525 mutex_unlock(&lo->lo_mutex);
1532 struct loop_device *lo = bdev->bd_disk->private_data;
1548 return loop_configure(lo, mode, bdev, &config);
1556 return loop_configure(lo, mode, bdev, &config);
1559 return loop_change_fd(lo, bdev, arg);
1561 return loop_clr_fd(lo);
1565 err = loop_set_status_old(lo, argp);
1568 return loop_get_status_old(lo, argp);
1572 err = loop_set_status64(lo, argp);
1575 return loop_get_status64(lo, argp);
1583 err = lo_simple_ioctl(lo, cmd, arg);
1663 loop_set_status_compat(struct loop_device *lo,
1672 return loop_set_status(lo, &info64);
1676 loop_get_status_compat(struct loop_device *lo,
1684 err = loop_get_status(lo, &info64);
1693 struct loop_device *lo = bdev->bd_disk->private_data;
1698 err = loop_set_status_compat(lo,
1702 err = loop_get_status_compat(lo,
1728 struct loop_device *lo = disk->private_data;
1733 mutex_lock(&lo->lo_mutex);
1734 if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
1735 lo->lo_state = Lo_rundown;
1736 mutex_unlock(&lo->lo_mutex);
1741 __loop_clr_fd(lo, true);
1744 mutex_unlock(&lo->lo_mutex);
1749 struct loop_device *lo = disk->private_data;
1751 if (lo->workqueue)
1752 destroy_workqueue(lo->workqueue);
1753 loop_free_idle_workers(lo, true);
1754 timer_shutdown_sync(&lo->timer);
1755 mutex_destroy(&lo->lo_mutex);
1756 kfree(lo);
1847 struct loop_device *lo = rq->q->queuedata;
1851 if (lo->lo_state != Lo_bound)
1861 cmd->use_aio = lo->use_dio;
1880 loop_queue_work(lo, cmd);
1891 struct loop_device *lo = rq->q->queuedata;
1896 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1913 ret = do_req_filebacked(lo, rq);
1935 struct list_head *cmd_list, struct loop_device *lo)
1941 spin_lock_irq(&lo->lo_work_lock);
1946 spin_unlock_irq(&lo->lo_work_lock);
1951 spin_lock_irq(&lo->lo_work_lock);
1961 list_add_tail(&worker->idle_list, &lo->idle_worker_list);
1962 loop_set_timer(lo);
1964 spin_unlock_irq(&lo->lo_work_lock);
1972 loop_process_work(worker, &worker->cmd_list, worker->lo);
1977 struct loop_device *lo =
1979 loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
1989 struct loop_device *lo;
1994 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1995 if (!lo)
1997 lo->worker_tree = RB_ROOT;
1998 INIT_LIST_HEAD(&lo->idle_worker_list);
1999 timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
2000 lo->lo_state = Lo_unbound;
2008 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2012 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2019 lo->tag_set.ops = &loop_mq_ops;
2020 lo->tag_set.nr_hw_queues = 1;
2021 lo->tag_set.queue_depth = hw_queue_depth;
2022 lo->tag_set.numa_node = NUMA_NO_NODE;
2023 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2024 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
2026 lo->tag_set.driver_data = lo;
2028 err = blk_mq_alloc_tag_set(&lo->tag_set);
2032 disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
2037 lo->lo_queue = lo->lo_disk->queue;
2039 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2047 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2069 mutex_init(&lo->lo_mutex);
2070 lo->lo_number = i;
2071 spin_lock_init(&lo->lo_lock);
2072 spin_lock_init(&lo->lo_work_lock);
2073 INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
2074 INIT_LIST_HEAD(&lo->rootcg_cmd_list);
2079 disk->private_data = lo;
2080 disk->queue = lo->lo_queue;
2091 lo->idr_visible = true;
2099 blk_mq_free_tag_set(&lo->tag_set);
2105 kfree(lo);
2110 static void loop_remove(struct loop_device *lo)
2113 del_gendisk(lo->lo_disk);
2114 blk_mq_free_tag_set(&lo->tag_set);
2117 idr_remove(&loop_index_idr, lo->lo_number);
2120 put_disk(lo->lo_disk);
2138 struct loop_device *lo;
2150 lo = idr_find(&loop_index_idr, idx);
2151 if (!lo || !lo->idr_visible)
2154 lo->idr_visible = false;
2160 ret = mutex_lock_killable(&lo->lo_mutex);
2163 if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
2164 mutex_unlock(&lo->lo_mutex);
2169 lo->lo_state = Lo_deleting;
2170 mutex_unlock(&lo->lo_mutex);
2172 loop_remove(lo);
2178 lo->idr_visible = true;
2185 struct loop_device *lo;
2191 idr_for_each_entry(&loop_index_idr, lo, id) {
2193 if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
2290 struct loop_device *lo;
2302 idr_for_each_entry(&loop_index_idr, lo, id)
2303 loop_remove(lo);