Lines Matching refs:req

197 static inline unsigned int ublk_req_build_flags(struct request *req);
323 struct request *req;
329 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
330 if (IS_ERR(req)) {
331 ret = PTR_ERR(req);
335 pdu = blk_mq_rq_to_pdu(req);
340 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
343 blk_mq_free_request(req);
347 status = blk_execute_rq(req, 0);
349 blk_mq_free_request(req);
378 struct request *req)
380 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
381 struct ublk_io *io = &ubq->ios[req->tag];
382 struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
385 switch (req_op(req)) {
408 iod->op_flags = ublk_op | ublk_req_build_flags(req);
422 iod->op_flags = ublk_op | ublk_req_build_flags(req);
423 iod->nr_sectors = blk_rq_sectors(req);
424 iod->start_sector = blk_rq_pos(req);
450 struct request *req)
457 static inline void __ublk_complete_rq(struct request *req);
613 struct request *req)
616 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
623 struct request *req)
626 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
635 struct request *req)
638 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
642 __ublk_complete_rq(req);
818 static bool ublk_advance_io_iter(const struct request *req,
821 struct bio *bio = req->bio;
839 static size_t ublk_copy_user_pages(const struct request *req,
845 if (!ublk_advance_io_iter(req, &iter, offset))
873 static inline bool ublk_need_map_req(const struct request *req)
875 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
878 static inline bool ublk_need_unmap_req(const struct request *req)
880 return ublk_rq_has_data(req) &&
881 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
884 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
887 const unsigned int rq_bytes = blk_rq_bytes(req);
897 if (ublk_need_map_req(req)) {
905 return ublk_copy_user_pages(req, 0, &iter, dir);
911 const struct request *req,
914 const unsigned int rq_bytes = blk_rq_bytes(req);
919 if (ublk_need_unmap_req(req)) {
928 return ublk_copy_user_pages(req, 0, &iter, dir);
933 static inline unsigned int ublk_req_build_flags(struct request *req)
937 if (req->cmd_flags & REQ_FAILFAST_DEV)
940 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
943 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
946 if (req->cmd_flags & REQ_META)
949 if (req->cmd_flags & REQ_FUA)
952 if (req->cmd_flags & REQ_NOUNMAP)
955 if (req->cmd_flags & REQ_SWAP)
961 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
963 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
964 struct ublk_io *io = &ubq->ios[req->tag];
965 enum req_op op = req_op(req);
972 switch (req_op(req)) {
990 return ublk_setup_iod_zoned(ubq, req);
995 iod->op_flags = ublk_op | ublk_req_build_flags(req);
996 iod->nr_sectors = blk_rq_sectors(req);
997 iod->start_sector = blk_rq_pos(req);
1015 static inline void __ublk_complete_rq(struct request *req)
1017 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1018 struct ublk_io *io = &ubq->ios[req->tag];
1029 if (!io->res && req_op(req) == REQ_OP_READ)
1043 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1044 req_op(req) != REQ_OP_DRV_IN)
1048 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1058 if (blk_update_request(req, BLK_STS_OK, io->res))
1059 blk_mq_requeue_request(req, true);
1061 __blk_mq_end_request(req, BLK_STS_OK);
1065 blk_mq_end_request(req, res);
1072 struct request *req = blk_mq_rq_from_pdu(data);
1074 __ublk_complete_rq(req);
1086 struct request *req)
1093 blk_mq_requeue_request(req, false);
1095 ublk_put_req_ref(ubq, req);
1129 static inline void __ublk_rq_task_work(struct request *req,
1132 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1133 int tag = req->tag;
1138 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1139 ublk_get_iod(ubq, req->tag)->addr);
1151 __ublk_abort_rq(ubq, req);
1155 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1165 req->tag, io->flags);
1176 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1178 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1179 ublk_get_iod(ubq, req->tag)->addr);
1182 mapped_bytes = ublk_map_io(ubq, req, io);
1185 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1194 blk_mq_requeue_request(req, false);
1195 blk_mq_delay_kick_requeue_list(req->q,
1200 ublk_get_iod(ubq, req->tag)->nr_sectors =
1204 ublk_init_req_ref(ubq, req);
1399 struct request *req;
1406 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1407 if (WARN_ON_ONCE(unlikely(!req)))
1410 if (req_op(req) == REQ_OP_ZONE_APPEND)
1411 req->__sector = ub_cmd->zone_append_lba;
1413 if (likely(!blk_should_fake_timeout(req->q)))
1414 ublk_put_req_ref(ubq, req);
1641 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1643 ublk_queue_cmd(ubq, req);
1677 struct request *req;
1748 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1759 req_op(req) == REQ_OP_READ))
1761 } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
1794 struct request *req;
1799 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1800 if (!req)
1803 if (!ublk_get_req_ref(ubq, req))
1806 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1809 if (!ublk_rq_has_data(req))
1812 if (offset > blk_rq_bytes(req))
1815 return req;
1817 ublk_put_req_ref(ubq, req);
1838 static inline bool ublk_check_ubuf_dir(const struct request *req,
1842 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
1847 if ((req_op(req) == REQ_OP_WRITE ||
1848 req_op(req) == REQ_OP_ZONE_APPEND) &&
1860 struct request *req;
1887 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1888 if (!req)
1891 if (!req->mq_hctx || !req->mq_hctx->driver_data)
1894 if (!ublk_check_ubuf_dir(req, dir))
1898 return req;
1900 ublk_put_req_ref(ubq, req);
1907 struct request *req;
1911 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
1912 if (IS_ERR(req))
1913 return PTR_ERR(req);
1915 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
1916 ubq = req->mq_hctx->driver_data;
1917 ublk_put_req_ref(ubq, req);
1925 struct request *req;
1929 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
1930 if (IS_ERR(req))
1931 return PTR_ERR(req);
1933 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
1934 ubq = req->mq_hctx->driver_data;
1935 ublk_put_req_ref(ubq, req);