Lines Matching defs:ubq

77 	struct ublk_queue *ubq;
118 /* atomic RW with ubq->cancel_lock */
183 * Our ubq->daemon may be killed without any notification, so
198 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
211 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
213 return ubq->flags & UBLK_F_ZONED;
377 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
380 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
381 struct ublk_io *io = &ubq->ios[req->tag];
449 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
598 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
600 return ubq->flags & UBLK_F_USER_COPY;
603 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
609 return ublk_support_user_copy(ubq);
612 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
615 if (ublk_need_req_ref(ubq)) {
622 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
625 if (ublk_need_req_ref(ubq)) {
634 static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
637 if (ublk_need_req_ref(ubq)) {
646 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
648 return ubq->flags & UBLK_F_NEED_GET_DATA;
674 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
678 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
688 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
690 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
695 struct ublk_queue *ubq)
697 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
698 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
702 struct ublk_queue *ubq)
704 return ubq->flags & UBLK_F_USER_RECOVERY;
884 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
889 if (ublk_support_user_copy(ubq))
910 static int ublk_unmap_io(const struct ublk_queue *ubq,
916 if (ublk_support_user_copy(ubq))
961 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
963 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
964 struct ublk_io *io = &ubq->ios[req->tag];
968 if (!ublk_queue_is_zoned(ubq) &&
989 if (ublk_queue_is_zoned(ubq))
990 return ublk_setup_iod_zoned(ubq, req);
1009 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
1011 return ubq->ubq_daemon->flags & PF_EXITING;
1017 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1018 struct ublk_io *io = &ubq->ios[req->tag];
1048 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1085 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1092 if (ublk_queue_can_use_recovery_reissue(ubq))
1095 ublk_put_req_ref(ubq, req);
1117 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1121 if (ublk_queue_can_use_recovery(ubq))
1126 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
1132 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1134 struct ublk_io *io = &ubq->ios[tag];
1138 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1139 ublk_get_iod(ubq, req->tag)->addr);
1150 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1151 __ublk_abort_rq(ubq, req);
1155 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1164 __func__, io->cmd->cmd_op, ubq->q_id,
1176 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1178 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1179 ublk_get_iod(ubq, req->tag)->addr);
1182 mapped_bytes = ublk_map_io(ubq, req, io);
1200 ublk_get_iod(ubq, req->tag)->nr_sectors =
1204 ublk_init_req_ref(ubq, req);
1208 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1211 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1219 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
1221 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1225 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
1231 struct ublk_queue *ubq = pdu->ubq;
1233 ublk_forward_io_cmds(ubq, issue_flags);
1236 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1241 if (!llist_add(&data->node, &ubq->io_cmds))
1244 io = &ubq->ios[rq->tag];
1258 ublk_abort_io_cmds(ubq);
1263 pdu->ubq = ubq;
1270 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1272 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1273 if (!ubq->timeout) {
1274 send_sig(SIGKILL, ubq->ubq_daemon, 0);
1275 ubq->timeout = true;
1287 struct ublk_queue *ubq = hctx->driver_data;
1292 res = ublk_setup_iod(ubq, rq);
1305 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1310 if (unlikely(ubq_daemon_is_dying(ubq))) {
1311 __ublk_abort_rq(ubq, rq);
1315 ublk_queue_cmd(ubq, rq);
1324 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1326 hctx->driver_data = ubq;
1397 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1398 struct ublk_io *io = &ubq->ios[tag];
1414 ublk_put_req_ref(ubq, req);
1422 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1429 for (i = 0; i < ubq->q_depth; i++) {
1430 struct ublk_io *io = &ubq->ios[i];
1439 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1441 __ublk_fail_req(ubq, io, rq);
1454 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1456 if (ubq_daemon_is_dying(ubq)) {
1457 if (ublk_queue_can_use_recovery(ubq))
1463 ublk_abort_queue(ub, ubq);
1479 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1481 return ubq->nr_io_ready == ubq->q_depth;
1484 static void ublk_cancel_queue(struct ublk_queue *ubq)
1488 for (i = 0; i < ubq->q_depth; i++) {
1489 struct ublk_io *io = &ubq->ios[i];
1494 spin_lock(&ubq->cancel_lock);
1498 spin_unlock(&ubq->cancel_lock);
1620 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1623 ubq->nr_io_ready++;
1624 if (ublk_queue_ready(ubq)) {
1625 ubq->ubq_daemon = current;
1626 get_task_struct(ubq->ubq_daemon);
1640 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1643 ublk_queue_cmd(ubq, req);
1672 struct ublk_queue *ubq;
1686 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1687 if (!ubq || ub_cmd->q_id != ubq->q_id)
1690 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1693 if (tag >= ubq->q_depth)
1696 io = &ubq->ios[tag];
1720 if (ublk_queue_ready(ubq)) {
1731 if (!ublk_support_user_copy(ubq)) {
1736 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1745 ublk_mark_io_ready(ub, ubq);
1753 if (!ublk_support_user_copy(ubq)) {
1758 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1792 struct ublk_queue *ubq, int tag, size_t offset)
1796 if (!ublk_need_req_ref(ubq))
1799 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1803 if (!ublk_get_req_ref(ubq, req))
1817 ublk_put_req_ref(ubq, req);
1859 struct ublk_queue *ubq;
1880 ubq = ublk_get_queue(ub, q_id);
1881 if (!ubq)
1884 if (tag >= ubq->q_depth)
1887 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1900 ublk_put_req_ref(ubq, req);
1906 struct ublk_queue *ubq;
1916 ubq = req->mq_hctx->driver_data;
1917 ublk_put_req_ref(ubq, req);
1924 struct ublk_queue *ubq;
1934 ubq = req->mq_hctx->driver_data;
1935 ublk_put_req_ref(ubq, req);
1954 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1956 if (ubq->ubq_daemon)
1957 put_task_struct(ubq->ubq_daemon);
1958 if (ubq->io_cmd_buf)
1959 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1964 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1969 spin_lock_init(&ubq->cancel_lock);
1970 ubq->flags = ub->dev_info.flags;
1971 ubq->q_id = q_id;
1972 ubq->q_depth = ub->dev_info.queue_depth;
1979 ubq->io_cmd_buf = ptr;
1980 ubq->dev = ub;
2184 /* don't probe partitions if any one ubq daemon is un-trusted */
2572 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2576 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2579 ubq->nr_io_ready = 0;
2581 put_task_struct(ubq->ubq_daemon);
2583 ubq->ubq_daemon = NULL;
2584 ubq->timeout = false;
2586 for (i = 0; i < ubq->q_depth; i++) {
2587 struct ublk_io *io = &ubq->ios[i];