Lines Matching defs:sdev

142 	struct scsi_device *sdev = scmd->device;
145 if (scsi_host_eh_past_deadline(sdev->host)) {
153 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
156 if (scsi_host_eh_past_deadline(sdev->host)) {
196 struct scsi_device *sdev = scmd->device;
197 struct Scsi_Host *shost = sdev->host;
326 * @sdev: Device on which we are performing recovery.
335 int scsi_block_when_processing_errors(struct scsi_device *sdev)
339 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
341 online = scsi_device_online(sdev);
357 struct scsi_device *sdev;
363 shost_for_each_device(sdev, shost) {
365 if (scmd->device == sdev) {
396 * @sdev: Device reporting the UNIT ATTENTION
398 static void scsi_report_lun_change(struct scsi_device *sdev)
400 sdev->sdev_target->expecting_lun_change = 1;
406 * @sdev: Device reporting the sense code
409 static void scsi_report_sense(struct scsi_device *sdev,
417 sdev_printk(KERN_WARNING, sdev,
421 scsi_report_lun_change(sdev);
422 sdev_printk(KERN_WARNING, sdev,
428 sdev_printk(KERN_WARNING, sdev,
436 sdev_printk(KERN_WARNING, sdev,
444 sdev_printk(KERN_WARNING, sdev,
450 sdev_printk(KERN_WARNING, sdev,
454 sdev_printk(KERN_WARNING, sdev,
458 sdev_printk(KERN_WARNING, sdev,
461 sdev_printk(KERN_WARNING, sdev,
466 set_bit(evt_type, sdev->pending_events);
467 schedule_work(&sdev->event_work);
484 struct scsi_device *sdev = scmd->device;
490 scsi_report_sense(sdev, &sshdr);
495 if (sdev->handler && sdev->handler->check_sense) {
498 rc = sdev->handler->check_sense(sdev, &sshdr);
542 if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
545 sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
644 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
646 struct scsi_host_template *sht = sdev->host->hostt;
650 sdev->queue_depth >= sdev->max_queue_depth)
654 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
658 sdev->last_queue_full_time + sdev->queue_ramp_up_period))
665 shost_for_each_device(tmp_sdev, sdev->host) {
666 if (tmp_sdev->channel != sdev->channel ||
667 tmp_sdev->id != sdev->id ||
668 tmp_sdev->queue_depth == sdev->max_queue_depth)
672 sdev->last_queue_ramp_up = jiffies;
676 static void scsi_handle_queue_full(struct scsi_device *sdev)
678 struct scsi_host_template *sht = sdev->host->hostt;
684 shost_for_each_device(tmp_sdev, sdev->host) {
685 if (tmp_sdev->channel != sdev->channel ||
686 tmp_sdev->id != sdev->id)
844 static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
846 sdev->was_reset = 1;
847 sdev->expecting_cc_ua = 1;
957 struct scsi_device *sdev = scmd->device;
1006 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
1008 (sdev->lun << 5 & 0xe0);
1059 struct scsi_device *sdev = scmd->device;
1060 struct Scsi_Host *shost = sdev->host;
1076 * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
1080 mutex_lock(&sdev->state_mutex);
1081 while (sdev->sdev_state == SDEV_BLOCK && timeleft > 0) {
1082 mutex_unlock(&sdev->state_mutex);
1083 SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_DEBUG, sdev,
1084 "%s: state %d <> %d\n", __func__, sdev->sdev_state,
1089 mutex_lock(&sdev->state_mutex);
1091 if (sdev->sdev_state != SDEV_BLOCK)
1095 mutex_unlock(&sdev->state_mutex);
1342 struct scsi_device *sdev;
1347 sdev = scmd->device;
1350 if (scsi_host_eh_past_deadline(sdev->host)) {
1354 sdev_printk(KERN_INFO, sdev,
1367 if (scmd->device == sdev) {
1419 struct scsi_device *sdev;
1421 shost_for_each_device(sdev, shost) {
1424 sdev_printk(KERN_INFO, sdev,
1427 scsi_device_put(sdev);
1432 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1442 sdev_printk(KERN_INFO, sdev,
1447 if (!scsi_device_online(sdev) ||
1451 if (scmd->device == sdev &&
1458 sdev_printk(KERN_INFO, sdev,
1485 struct scsi_device *sdev;
1488 shost_for_each_device(sdev, shost) {
1491 sdev_printk(KERN_INFO, sdev,
1494 scsi_device_put(sdev);
1499 if (scmd->device == sdev) {
1508 sdev_printk(KERN_INFO, sdev,
1512 if (!scsi_device_online(sdev) ||
1517 if (scmd->device == sdev &&
1525 sdev_printk(KERN_INFO, sdev,
1719 struct scsi_device *sdev;
1724 sdev = scmd->device;
1726 mutex_lock(&sdev->state_mutex);
1727 scsi_device_set_state(sdev, SDEV_OFFLINE);
1728 mutex_unlock(&sdev->state_mutex);
1976 * @sdev: SCSI device to prevent medium removal
1985 static void scsi_eh_lock_door(struct scsi_device *sdev)
1990 req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
2020 struct scsi_device *sdev;
2028 shost_for_each_device(sdev, shost) {
2029 if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
2030 scsi_eh_lock_door(sdev);
2031 sdev->was_reset = 0;
2286 struct scsi_device *sdev;
2288 __shost_for_each_device(sdev, shost) {
2289 if (channel == sdev_channel(sdev))
2290 __scsi_report_device_reset(sdev, NULL);
2319 struct scsi_device *sdev;
2321 __shost_for_each_device(sdev, shost) {
2322 if (channel == sdev_channel(sdev) &&
2323 target == sdev_id(sdev))
2324 __scsi_report_device_reset(sdev, NULL);