Lines Matching refs:req

207 	struct request *req;
217 req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
218 if (IS_ERR(req))
219 return PTR_ERR(req);
222 ret = blk_rq_map_kern(sdev->request_queue, req,
227 scmd = blk_mq_rq_to_pdu(req);
232 req->timeout = timeout;
233 req->rq_flags |= RQF_QUIET;
238 blk_execute_rq(req, true);
259 blk_mq_free_request(req);
532 static bool scsi_end_request(struct request *req, blk_status_t error,
535 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
539 if (blk_update_request(req, error, bytes))
544 add_disk_randomness(req->q->disk);
546 if (!blk_rq_is_passthrough(req)) {
574 __blk_mq_end_request(req, error);
664 struct request *req = scsi_cmd_to_rq(cmd);
670 wait_for = (cmd->allowed + 1) * req->timeout;
690 struct request *req = scsi_cmd_to_rq(cmd);
819 if (!(req->rq_flags & RQF_QUIET)) {
840 if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
870 struct request *req = scsi_cmd_to_rq(cmd);
877 if (blk_rq_is_passthrough(req)) {
887 } else if (blk_rq_bytes(req) == 0 && sense_current) {
890 * good_bytes != blk_rq_bytes(req) as the signal for an error.
910 else if (req->rq_flags & RQF_QUIET)
957 struct request *req = scsi_cmd_to_rq(cmd);
969 blk_rq_sectors(req), good_bytes));
975 if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
976 if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
982 if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
1172 struct request *req)
1174 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1182 if (req->bio) {
1187 BUG_ON(blk_rq_bytes(req));
1192 cmd->transfersize = blk_rq_bytes(req);
1197 scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1231 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
1239 if (req && !(req->rq_flags & RQF_PM))
1544 static blk_status_t scsi_prepare_cmd(struct request *req)
1546 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1547 struct scsi_device *sdev = req->q->queuedata;
1576 if (blk_rq_bytes(req))
1577 cmd->sc_data_direction = rq_dma_dir(req);
1595 if (blk_rq_is_passthrough(req))
1596 return scsi_setup_scsi_cmnd(sdev, req);
1599 blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1613 struct request *req = scsi_cmd_to_rq(cmd);
1631 blk_mq_complete_request_direct(req, scsi_complete);
1633 blk_mq_complete_request(req);
1693 static void scsi_mq_set_rq_budget_token(struct request *req, int token)
1695 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1700 static int scsi_mq_get_rq_budget_token(struct request *req)
1702 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1710 struct request *req = bd->rq;
1711 struct request_queue *q = req->q;
1714 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1725 ret = scsi_device_state_check(sdev, req);
1741 if (!(req->rq_flags & RQF_DONTPREP)) {
1742 ret = scsi_prepare_cmd(req);
1745 req->rq_flags |= RQF_DONTPREP;
1760 blk_mq_start_request(req);
1788 if (req->rq_flags & RQF_DONTPREP)
1801 if (req->rq_flags & RQF_DONTPREP)