Lines Matching refs:req
161 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
163 unsigned short segments = blk_rq_nr_discard_segments(req);
182 if (queue_max_discard_segments(req->q) == 1) {
184 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
185 range[0].sector = cpu_to_le64(blk_rq_pos(req));
188 __rq_for_each_bio(bio, req) {
201 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
202 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
207 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
209 if (blk_rq_nr_phys_segments(req))
214 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
219 if (!blk_rq_nr_phys_segments(req))
224 blk_rq_nr_phys_segments(req),
230 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
233 static void virtblk_cleanup_cmd(struct request *req)
235 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
236 kfree(bvec_virt(&req->special_vec));
240 struct request *req,
248 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
252 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
254 switch (req_op(req)) {
257 sector = blk_rq_pos(req);
261 sector = blk_rq_pos(req);
271 unmap = !(req->cmd_flags & REQ_NOUNMAP);
278 sector = blk_rq_pos(req);
282 sector = blk_rq_pos(req);
286 sector = blk_rq_pos(req);
290 sector = blk_rq_pos(req);
295 sector = blk_rq_pos(req);
318 if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
335 static inline void virtblk_request_done(struct request *req)
337 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
341 virtblk_unmap_data(req, vbr);
342 virtblk_cleanup_cmd(req);
344 if (req_op(req) == REQ_OP_ZONE_APPEND)
345 req->__sector = virtio64_to_cpu(vblk->vdev,
348 blk_mq_end_request(req, status);
364 struct request *req = blk_mq_rq_from_pdu(vbr);
366 if (likely(!blk_should_fake_timeout(req->q)))
367 blk_mq_complete_request(req);
394 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
396 virtblk_cleanup_cmd(req);
409 struct request *req,
415 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
419 num = virtblk_map_data(hctx, req, vbr);
421 return virtblk_fail_to_queue(req, -ENOMEM);
424 blk_mq_start_request(req);
433 struct request *req = bd->rq;
434 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
441 status = virtblk_prep_rq(hctx, vblk, req, vbr);
455 virtblk_unmap_data(req, vbr);
456 return virtblk_fail_to_queue(req, err);
468 static bool virtblk_prep_rq_batch(struct request *req)
470 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
471 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
473 req->mq_hctx->tags->rqs[req->tag] = req;
475 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
488 struct request *req = rq_list_pop(rqlist);
489 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
493 virtblk_unmap_data(req, vbr);
494 virtblk_cleanup_cmd(req);
495 blk_mq_requeue_request(req, true);
507 struct request *req, *next, *prev = NULL;
510 rq_list_for_each_safe(rqlist, req, next) {
511 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
514 if (!virtblk_prep_rq_batch(req)) {
515 rq_list_move(rqlist, &requeue_list, req, prev);
516 req = prev;
517 if (!req)
521 if (!next || req->mq_hctx != next->mq_hctx) {
522 req->rq_next = NULL;
530 prev = req;
571 struct request *req;
575 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
576 if (IS_ERR(req))
577 return PTR_ERR(req);
579 vbr = blk_mq_rq_to_pdu(req);
584 err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
588 blk_execute_rq(req, false);
591 blk_mq_free_request(req);
870 struct request *req;
874 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
875 if (IS_ERR(req))
876 return PTR_ERR(req);
878 vbr = blk_mq_rq_to_pdu(req);
883 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
887 blk_execute_rq(req, false);
890 blk_mq_free_request(req);
1073 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
1254 struct request *req;
1256 rq_list_for_each(&iob->req_list, req) {
1257 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
1258 virtblk_cleanup_cmd(req);
1275 struct request *req = blk_mq_rq_from_pdu(vbr);
1278 if (!blk_mq_complete_request_remote(req) &&
1279 !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
1281 virtblk_request_done(req);
1361 /* ... but without indirect descs, we use 2 descs per req */