Lines Matching refs:scmd

81 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
85 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
89 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
223 pqi_prep_for_scsi_done(scmd);
224 scsi_done(scmd);
540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
549 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
552 switch (scmd->cmnd[0]) {
719 io_request->scmd = NULL;
725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
730 if (scmd) { /* SML I/O request */
731 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
2684 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2688 switch (scmd->cmnd[0]) {
2693 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2694 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2695 rmd->block_cnt = (u32)scmd->cmnd[4];
2703 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2704 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2711 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2717 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2725 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2965 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2976 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
3047 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3051 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3056 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3129 device = io_request->scmd->device->hostdata;
3155 struct scsi_cmnd *scmd;
3162 scmd = io_request->scmd;
3163 if (!scmd)
3176 residual_count = scsi_bufflen(scmd) - xfer_count;
3177 scsi_set_resid(scmd, residual_count);
3178 if (xfer_count < scmd->underflow)
3222 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3223 struct pqi_scsi_dev *device = scmd->device->hostdata;
3228 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3230 pqi_take_device_offline(scmd->device, "RAID");
3236 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3244 memcpy(scmd->sense_buffer, error_info->data,
3248 scmd->result = scsi_status;
3249 set_host_byte(scmd, host_byte);
3256 struct scsi_cmnd *scmd;
3264 scmd = io_request->scmd;
3269 device = scmd->device->hostdata;
3284 scsi_set_resid(scmd, residual_count);
3285 xfer_count = scsi_bufflen(scmd) - residual_count;
3286 if (xfer_count < scmd->underflow)
3307 pqi_take_device_offline(scmd->device, "AIO");
3337 memcpy(scmd->sense_buffer, error_info->data,
3343 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3345 scmd->result = scsi_status;
3346 set_host_byte(scmd, host_byte);
3445 if (io_request->scmd)
3446 io_request->scmd->result = 0;
5371 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5381 sg_count = scsi_dma_map(scmd);
5391 sg = scsi_sglist(scmd);
5407 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5417 sg_count = scsi_dma_map(scmd);
5428 sg = scsi_sglist(scmd);
5445 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5455 sg_count = scsi_dma_map(scmd);
5464 sg = scsi_sglist(scmd);
5481 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5491 sg_count = scsi_dma_map(scmd);
5502 sg = scsi_sglist(scmd);
5521 struct scsi_cmnd *scmd;
5523 scmd = io_request->scmd;
5525 scsi_dma_unmap(scmd);
5526 pqi_scsi_done(scmd);
5530 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5538 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5543 io_request->scmd = scmd;
5549 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5555 request->ml_device_lun_number = (u8)scmd->device->lun;
5557 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5558 memcpy(request->cdb, scmd->cmnd, cdb_length);
5582 switch (scmd->sc_data_direction) {
5598 scmd->sc_data_direction);
5602 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5614 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5619 io_high_prio = pqi_is_io_high_priority(device, scmd);
5621 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5626 struct scsi_cmnd *scmd;
5633 scmd = io_request->scmd;
5634 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5636 if (host_byte(scmd->result) == DID_NO_CONNECT)
5639 device = scmd->device->hostdata;
5643 ctrl_info = shost_to_hba(scmd->device->host);
5653 struct scsi_cmnd *scmd;
5655 scmd = io_request->scmd;
5656 scsi_dma_unmap(scmd);
5658 set_host_byte(scmd, DID_IMM_RETRY);
5659 pqi_cmd_priv(scmd)->this_residual++;
5663 pqi_scsi_done(scmd);
5667 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5672 io_high_prio = pqi_is_io_high_priority(device, scmd);
5674 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5675 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5680 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5689 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5694 io_request->scmd = scmd;
5702 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5708 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5714 switch (scmd->sc_data_direction) {
5730 scmd->sc_data_direction);
5744 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5756 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5764 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5769 io_request->scmd = scmd;
5783 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5805 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5817 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5825 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5829 io_request->scmd = scmd;
5847 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5872 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5884 struct scsi_cmnd *scmd)
5889 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5892 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5894 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5897 return pqi_cmd_priv(scmd)->this_residual == 0;
5905 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5910 if (!scmd->device) {
5911 set_host_byte(scmd, DID_NO_CONNECT);
5915 device = scmd->device->hostdata;
5917 set_host_byte(scmd, DID_NO_CONNECT);
5921 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5923 wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5929 struct scsi_cmnd *scmd)
5942 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5950 device = scmd->device->hostdata;
6003 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6013 scmd->host_scribble = PQI_NO_COMPLETION;
6015 device = scmd->device->hostdata;
6018 set_host_byte(scmd, DID_NO_CONNECT);
6019 pqi_scsi_done(scmd);
6023 lun = (u8)scmd->device->lun;
6030 set_host_byte(scmd, DID_NO_CONNECT);
6031 pqi_scsi_done(scmd);
6044 scmd->result = 0;
6046 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6052 pqi_is_bypass_eligible_request(scmd) &&
6053 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6054 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6061 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6064 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6066 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6071 scmd->host_scribble = NULL;
6175 struct scsi_cmnd *scmd;
6189 scmd = io_request->scmd;
6190 if (!scmd)
6193 scsi_device = scmd->device->hostdata;
6197 if ((u8)scmd->device->lun != lun)
6201 set_host_byte(scmd, DID_RESET);
6203 scsi_dma_unmap(scmd);
6204 pqi_scsi_done(scmd);
6383 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6391 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6409 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6416 shost = scmd->device->host;
6418 device = scmd->device->hostdata;
6419 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6421 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6427 struct scsi_cmnd *scmd;
6430 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6432 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6435 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6443 shost = scmd->device->host;
6445 device = scmd->device->hostdata;
6449 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6451 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6454 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6455 scmd->result = DID_RESET << 16;
6459 tmf_work = &device->tmf_work[scmd->device->lun];
6461 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6464 tmf_work->lun = (u8)scmd->device->lun;
6465 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6473 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
8991 struct scsi_cmnd *scmd;
8999 scmd = io_request->scmd;
9000 if (scmd) {
9001 sdev = scmd->device;
9006 set_host_byte(scmd, DID_NO_CONNECT);
9201 struct scsi_cmnd *scmd;
9207 scmd = io_request->scmd;
9208 WARN_ON(scmd != NULL); /* IO command from SML */
9209 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/