Lines Matching refs:ub
201 static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
203 return ub->dev_info.flags & UBLK_F_USER_COPY;
206 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
208 return ub->dev_info.flags & UBLK_F_ZONED;
218 static int ublk_get_nr_zones(const struct ublk_device *ub)
220 const struct ublk_param_basic *p = &ub->params.basic;
226 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
228 return blk_revalidate_disk_zones(ub->ub_disk, NULL);
231 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
233 const struct ublk_param_zoned *p = &ub->params.zoned;
236 if (!ublk_dev_is_zoned(ub))
242 nr_zones = ublk_get_nr_zones(ub);
253 static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
255 const struct ublk_param_zoned *p = &ub->params.zoned;
257 disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
258 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
259 blk_queue_required_elevator_features(ub->ub_disk->queue,
261 disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
262 disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
263 blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
265 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
301 struct ublk_device *ub = disk->private_data;
310 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
313 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
434 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
439 static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
444 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
499 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
501 struct request_queue *q = ub->ub_disk->queue;
502 const struct ublk_param_basic *p = &ub->params.basic;
521 set_disk_ro(ub->ub_disk, true);
523 set_capacity(ub->ub_disk, p->dev_sectors);
526 static void ublk_dev_param_discard_apply(struct ublk_device *ub)
528 struct request_queue *q = ub->ub_disk->queue;
529 const struct ublk_param_discard *p = &ub->params.discard;
539 static int ublk_validate_params(const struct ublk_device *ub)
542 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
543 const struct ublk_param_basic *p = &ub->params.basic;
551 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
554 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
559 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
560 const struct ublk_param_discard *p = &ub->params.discard;
571 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
574 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
575 return ublk_dev_param_zoned_validate(ub);
576 else if (ublk_dev_is_zoned(ub))
582 static int ublk_apply_params(struct ublk_device *ub)
584 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
587 ublk_dev_param_basic_apply(ub);
589 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
590 ublk_dev_param_discard_apply(ub);
592 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
593 return ublk_dev_param_zoned_apply(ub);
651 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
653 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
654 return ub;
658 static void ublk_put_device(struct ublk_device *ub)
660 put_device(&ub->cdev_dev);
681 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
683 return ublk_get_queue(ub, q_id)->io_cmd_buf;
686 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
688 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
707 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
709 return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
714 struct ublk_device *ub = disk->private_data;
716 clear_bit(UB_STATE_USED, &ub->state);
717 put_device(&ub->cdev_dev);
734 struct ublk_device *ub = disk->private_data;
748 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
753 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
754 ub->dev_info.owner_gid)
1323 struct ublk_device *ub = driver_data;
1324 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1338 struct ublk_device *ub = container_of(inode->i_cdev,
1341 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1343 filp->private_data = ub;
1349 struct ublk_device *ub = filp->private_data;
1351 clear_bit(UB_STATE_OPEN, &ub->state);
1358 struct ublk_device *ub = filp->private_data;
1364 spin_lock(&ub->mm_lock);
1365 if (!ub->mm)
1366 ub->mm = current->mm;
1367 if (current->mm != ub->mm)
1369 spin_unlock(&ub->mm_lock);
1377 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1386 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1389 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1393 static void ublk_commit_completion(struct ublk_device *ub,
1397 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1406 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1422 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1426 if (!ublk_get_device(ub))
1439 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1444 ublk_put_device(ub);
1449 struct ublk_device *ub =
1453 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1454 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1458 schedule_work(&ub->quiesce_work);
1460 schedule_work(&ub->stop_work);
1463 ublk_abort_queue(ub, ubq);
1468 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1471 * No need ub->mutex, monitor work are canceled after state is marked
1474 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1475 schedule_delayed_work(&ub->monitor_work,
1509 static void ublk_cancel_dev(struct ublk_device *ub)
1513 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1514 ublk_cancel_queue(ublk_get_queue(ub, i));
1528 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1532 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1535 blk_mq_tagset_busy_iter(&ub->tag_set,
1543 static void __ublk_quiesce_dev(struct ublk_device *ub)
1545 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1546 __func__, ub->dev_info.dev_id,
1547 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1549 blk_mq_quiesce_queue(ub->ub_disk->queue);
1550 ublk_wait_tagset_rqs_idle(ub);
1551 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1560 cancel_delayed_work_sync(&ub->monitor_work);
1565 struct ublk_device *ub =
1568 mutex_lock(&ub->mutex);
1569 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1571 __ublk_quiesce_dev(ub);
1573 mutex_unlock(&ub->mutex);
1574 ublk_cancel_dev(ub);
1577 static void ublk_unquiesce_dev(struct ublk_device *ub)
1581 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1582 __func__, ub->dev_info.dev_id,
1583 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1590 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1591 ublk_get_queue(ub, i)->force_abort = true;
1593 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1595 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1598 static void ublk_stop_dev(struct ublk_device *ub)
1600 mutex_lock(&ub->mutex);
1601 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1603 if (ublk_can_use_recovery(ub)) {
1604 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1605 __ublk_quiesce_dev(ub);
1606 ublk_unquiesce_dev(ub);
1608 del_gendisk(ub->ub_disk);
1609 ub->dev_info.state = UBLK_S_DEV_DEAD;
1610 ub->dev_info.ublksrv_pid = -1;
1611 put_disk(ub->ub_disk);
1612 ub->ub_disk = NULL;
1614 mutex_unlock(&ub->mutex);
1615 ublk_cancel_dev(ub);
1616 cancel_delayed_work_sync(&ub->monitor_work);
1620 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1622 mutex_lock(&ub->mutex);
1627 ub->nr_queues_ready++;
1630 ub->nr_privileged_daemon++;
1632 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1633 complete_all(&ub->completion);
1634 mutex_unlock(&ub->mutex);
1637 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1640 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1641 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1671 struct ublk_device *ub = cmd->file->private_data;
1683 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1686 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1745 ublk_mark_io_ready(ub, ubq);
1748 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1771 ublk_commit_completion(ub, ub_cmd);
1777 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1791 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1799 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1858 struct ublk_device *ub = iocb->ki_filp->private_data;
1864 if (!ub)
1870 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1877 if (q_id >= ub->dev_info.nr_hw_queues)
1880 ubq = ublk_get_queue(ub, q_id);
1887 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1951 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1953 int size = ublk_queue_cmd_buf_size(ub, q_id);
1954 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1962 static int ublk_init_queue(struct ublk_device *ub, int q_id)
1964 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1970 ubq->flags = ub->dev_info.flags;
1972 ubq->q_depth = ub->dev_info.queue_depth;
1973 size = ublk_queue_cmd_buf_size(ub, q_id);
1980 ubq->dev = ub;
1984 static void ublk_deinit_queues(struct ublk_device *ub)
1986 int nr_queues = ub->dev_info.nr_hw_queues;
1989 if (!ub->__queues)
1993 ublk_deinit_queue(ub, i);
1994 kfree(ub->__queues);
1997 static int ublk_init_queues(struct ublk_device *ub)
1999 int nr_queues = ub->dev_info.nr_hw_queues;
2000 int depth = ub->dev_info.queue_depth;
2004 ub->queue_size = ubq_size;
2005 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2006 if (!ub->__queues)
2010 if (ublk_init_queue(ub, i))
2014 init_completion(&ub->completion);
2018 ublk_deinit_queues(ub);
2022 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2030 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2034 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
2039 ub->ub_number = err;
2044 static void ublk_free_dev_number(struct ublk_device *ub)
2047 idr_remove(&ublk_index_idr, ub->ub_number);
2054 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2056 blk_mq_free_tag_set(&ub->tag_set);
2057 ublk_deinit_queues(ub);
2058 ublk_free_dev_number(ub);
2059 mutex_destroy(&ub->mutex);
2060 kfree(ub);
2063 static int ublk_add_chdev(struct ublk_device *ub)
2065 struct device *dev = &ub->cdev_dev;
2066 int minor = ub->ub_number;
2079 cdev_init(&ub->cdev, &ublk_ch_fops);
2080 ret = cdev_device_add(&ub->cdev, dev);
2093 struct ublk_device *ub =
2096 ublk_stop_dev(ub);
2100 static void ublk_align_max_io_size(struct ublk_device *ub)
2102 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2104 ub->dev_info.max_io_buf_bytes =
2108 static int ublk_add_tag_set(struct ublk_device *ub)
2110 ub->tag_set.ops = &ublk_mq_ops;
2111 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2112 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2113 ub->tag_set.numa_node = NUMA_NO_NODE;
2114 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2115 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2116 ub->tag_set.driver_data = ub;
2117 return blk_mq_alloc_tag_set(&ub->tag_set);
2120 static void ublk_remove(struct ublk_device *ub)
2122 ublk_stop_dev(ub);
2123 cancel_work_sync(&ub->stop_work);
2124 cancel_work_sync(&ub->quiesce_work);
2125 cdev_device_del(&ub->cdev, &ub->cdev_dev);
2126 put_device(&ub->cdev_dev);
2132 struct ublk_device *ub = NULL;
2138 ub = idr_find(&ublk_index_idr, idx);
2139 if (ub)
2140 ub = ublk_get_device(ub);
2143 return ub;
2146 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2156 if (wait_for_completion_interruptible(&ub->completion) != 0)
2159 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2161 mutex_lock(&ub->mutex);
2162 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2163 test_bit(UB_STATE_USED, &ub->state)) {
2168 disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
2173 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2175 disk->private_data = ub;
2177 ub->dev_info.ublksrv_pid = ublksrv_pid;
2178 ub->ub_disk = disk;
2180 ret = ublk_apply_params(ub);
2185 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2188 get_device(&ub->cdev_dev);
2189 ub->dev_info.state = UBLK_S_DEV_LIVE;
2191 if (ublk_dev_is_zoned(ub)) {
2192 ret = ublk_revalidate_disk_zones(ub);
2201 set_bit(UB_STATE_USED, &ub->state);
2205 ub->dev_info.state = UBLK_S_DEV_DEAD;
2206 ublk_put_device(ub);
2212 mutex_unlock(&ub->mutex);
2216 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2235 if (queue >= ub->dev_info.nr_hw_queues)
2242 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2273 struct ublk_device *ub;
2324 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2325 if (!ub)
2327 mutex_init(&ub->mutex);
2328 spin_lock_init(&ub->mm_lock);
2329 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
2330 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
2331 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
2333 ret = ublk_alloc_dev_number(ub, header->dev_id);
2337 memcpy(&ub->dev_info, &info, sizeof(info));
2340 ub->dev_info.dev_id = ub->ub_number;
2348 ub->dev_info.flags &= UBLK_F_ALL;
2350 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2354 if (ublk_dev_is_user_copy(ub))
2355 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2358 if (ublk_dev_is_zoned(ub) &&
2359 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2365 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2367 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2368 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2369 ublk_align_max_io_size(ub);
2371 ret = ublk_init_queues(ub);
2375 ret = ublk_add_tag_set(ub);
2380 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2387 ret = ublk_add_chdev(ub);
2391 blk_mq_free_tag_set(&ub->tag_set);
2393 ublk_deinit_queues(ub);
2395 ublk_free_dev_number(ub);
2397 mutex_destroy(&ub->mutex);
2398 kfree(ub);
2417 struct ublk_device *ub = *p_ub;
2418 int idx = ub->ub_number;
2425 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2426 ublk_remove(ub);
2427 set_bit(UB_STATE_DELETED, &ub->state);
2432 ublk_put_device(ub);
2463 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2465 ublk_stop_dev(ub);
2466 cancel_work_sync(&ub->stop_work);
2467 cancel_work_sync(&ub->quiesce_work);
2472 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2481 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2488 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2490 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2491 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2493 if (ub->ub_disk) {
2494 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2495 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2497 ub->params.devt.disk_major = 0;
2498 ub->params.devt.disk_minor = 0;
2500 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2503 static int ublk_ctrl_get_params(struct ublk_device *ub,
2523 mutex_lock(&ub->mutex);
2524 ublk_ctrl_fill_params_devt(ub);
2525 if (copy_to_user(argp, &ub->params, ph.len))
2529 mutex_unlock(&ub->mutex);
2534 static int ublk_ctrl_set_params(struct ublk_device *ub,
2555 mutex_lock(&ub->mutex);
2556 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2558 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2562 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2563 ret = ublk_validate_params(ub);
2565 ub->params.types = 0;
2567 mutex_unlock(&ub->mutex);
2572 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2582 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2596 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2603 mutex_lock(&ub->mutex);
2604 if (!ublk_can_use_recovery(ub))
2619 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2620 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2625 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2626 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2628 ub->mm = NULL;
2629 ub->nr_queues_ready = 0;
2630 ub->nr_privileged_daemon = 0;
2631 init_completion(&ub->completion);
2634 mutex_unlock(&ub->mutex);
2638 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2646 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2648 if (wait_for_completion_interruptible(&ub->completion))
2652 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2654 mutex_lock(&ub->mutex);
2655 if (!ublk_can_use_recovery(ub))
2658 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2662 ub->dev_info.ublksrv_pid = ublksrv_pid;
2665 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2668 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2669 ub->dev_info.state = UBLK_S_DEV_LIVE;
2670 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2673 mutex_unlock(&ub->mutex);
2696 static int ublk_char_dev_permission(struct ublk_device *ub,
2712 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2722 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2726 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2783 ret = ublk_char_dev_permission(ub, dev_path, mask);
2789 __func__, ub->ub_number, cmd->cmd_op,
2790 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2801 struct ublk_device *ub = NULL;
2824 ub = ublk_get_device_from_id(header->dev_id);
2825 if (!ub)
2828 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2835 ret = ublk_ctrl_start_dev(ub, cmd);
2838 ret = ublk_ctrl_stop_dev(ub);
2842 ret = ublk_ctrl_get_dev_info(ub, cmd);
2848 ret = ublk_ctrl_del_dev(&ub);
2851 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2854 ret = ublk_ctrl_get_params(ub, cmd);
2857 ret = ublk_ctrl_set_params(ub, cmd);
2860 ret = ublk_ctrl_start_recovery(ub, cmd);
2863 ret = ublk_ctrl_end_recovery(ub, cmd);
2871 if (ub)
2872 ublk_put_device(ub);
2925 struct ublk_device *ub;
2928 idr_for_each_entry(&ublk_index_idr, ub, id)
2929 ublk_remove(ub);