Lines Matching defs:sdev
233 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
439 struct scsi_device *sdev;
441 sdev = class_to_sdev(class_dev);
442 put_device(&sdev->sdev_gendev);
447 struct scsi_device *sdev;
455 sdev = container_of(work, struct scsi_device, ew.work);
457 mod = sdev->host->hostt->module;
459 scsi_dh_release_device(sdev);
461 parent = sdev->sdev_gendev.parent;
463 spin_lock_irqsave(sdev->host->host_lock, flags);
464 list_del(&sdev->siblings);
465 list_del(&sdev->same_target_siblings);
466 list_del(&sdev->starved_entry);
467 spin_unlock_irqrestore(sdev->host->host_lock, flags);
469 cancel_work_sync(&sdev->event_work);
471 list_for_each_safe(this, tmp, &sdev->event_list) {
479 blk_put_queue(sdev->request_queue);
481 sdev->request_queue = NULL;
483 mutex_lock(&sdev->inquiry_mutex);
484 vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
485 lockdep_is_held(&sdev->inquiry_mutex));
486 vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
487 lockdep_is_held(&sdev->inquiry_mutex));
488 vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
489 lockdep_is_held(&sdev->inquiry_mutex));
490 vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
491 lockdep_is_held(&sdev->inquiry_mutex));
492 mutex_unlock(&sdev->inquiry_mutex);
502 kfree(sdev->inquiry);
503 kfree(sdev);
543 struct scsi_device *sdev;
548 sdev = to_scsi_device(dev);
550 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
593 struct scsi_device *sdev; \
594 sdev = to_scsi_device(dev); \
595 return snprintf (buf, 20, format_string, sdev->field); \
618 struct scsi_device *sdev; \
619 sdev = to_scsi_device(dev); \
620 sscanf (buf, format_string, &sdev->field); \
640 struct scsi_device *sdev; \
643 sdev = to_scsi_device(dev); \
644 sdev->field = ret; \
681 struct scsi_device *sdev = to_scsi_device(dev);
682 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
690 struct scsi_device *sdev = to_scsi_device(dev);
691 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
701 struct scsi_device *sdev;
702 sdev = to_scsi_device(dev);
703 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
710 struct scsi_device *sdev;
712 sdev = to_scsi_device(dev);
714 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
722 struct scsi_device *sdev;
723 sdev = to_scsi_device(dev);
724 return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ);
731 struct scsi_device *sdev;
738 sdev = to_scsi_device(dev);
742 sdev->eh_timeout = eh_timeout * HZ;
762 struct scsi_device *sdev = to_scsi_device(dev);
768 if (scsi_device_get(sdev))
784 scsi_remove_device(sdev);
787 scsi_device_put(sdev);
797 struct scsi_device *sdev = to_scsi_device(dev);
817 mutex_lock(&sdev->state_mutex);
818 switch (sdev->sdev_state) {
823 mutex_unlock(&sdev->state_mutex);
826 if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
829 ret = scsi_device_set_state(sdev, state);
833 mutex_unlock(&sdev->state_mutex);
844 blk_mq_run_hw_queues(sdev->request_queue, true);
854 struct scsi_device *sdev = to_scsi_device(dev);
855 const char *name = scsi_device_state_name(sdev->sdev_state);
869 struct scsi_device *sdev = to_scsi_device(dev);
872 if (sdev->simple_tags)
882 struct scsi_device *sdev = to_scsi_device(dev);
884 if (!sdev->tagged_supported)
887 sdev_printk(KERN_INFO, sdev,
902 struct scsi_device *sdev = to_scsi_device(dev); \
907 vpd_page = rcu_dereference(sdev->vpd_##_page); \
930 struct scsi_device *sdev = to_scsi_device(dev);
932 if (!sdev->inquiry)
935 return memory_read_from_buffer(buf, count, &off, sdev->inquiry,
936 sdev->inquiry_len);
962 struct scsi_device *sdev = to_scsi_device(dev); \
963 unsigned long long count = atomic_read(&sdev->field); \
975 struct scsi_device *sdev;
976 sdev = to_scsi_device(dev);
977 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
986 struct scsi_device *sdev = to_scsi_device(dev); \
987 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
996 struct scsi_device *sdev = to_scsi_device(dev); \
999 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
1001 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
1026 struct scsi_device *sdev = to_scsi_device(dev);
1027 struct scsi_host_template *sht = sdev->host->hostt;
1034 if (depth < 1 || depth > sdev->host->can_queue)
1037 retval = sht->change_queue_depth(sdev, depth);
1041 sdev->max_queue_depth = sdev->queue_depth;
1054 struct scsi_device *sdev = to_scsi_device(dev);
1057 count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE);
1077 struct scsi_device *sdev = to_scsi_device(dev);
1081 for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
1084 if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
1107 struct scsi_device *sdev = to_scsi_device(dev);
1109 if (!sdev->handler)
1112 return snprintf(buf, 20, "%s\n", sdev->handler->name);
1119 struct scsi_device *sdev = to_scsi_device(dev);
1122 if (sdev->sdev_state == SDEV_CANCEL ||
1123 sdev->sdev_state == SDEV_DEL)
1126 if (!sdev->handler) {
1130 err = scsi_dh_attach(sdev->request_queue, buf);
1135 if (sdev->handler->activate)
1136 err = sdev->handler->activate(sdev, NULL, NULL);
1143 sdev_printk(KERN_WARNING, sdev,
1145 sdev->handler->name);
1160 struct scsi_device *sdev = to_scsi_device(dev);
1164 if (!sdev->handler)
1167 access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK);
1180 struct scsi_device *sdev = to_scsi_device(dev);
1182 if (!sdev->handler)
1185 if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED)
1198 struct scsi_device *sdev;
1199 sdev = to_scsi_device(dev);
1201 jiffies_to_msecs(sdev->queue_ramp_up_period));
1209 struct scsi_device *sdev = to_scsi_device(dev);
1215 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
1227 struct scsi_device *sdev = to_scsi_device(dev);
1231 !sdev->host->hostt->change_queue_depth)
1235 !sdev->host->hostt->change_queue_depth)
1240 !sdev->handler)
1243 !sdev->handler)
1253 struct scsi_device *sdev = to_scsi_device(dev);
1256 if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0)
1259 if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
1262 if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
1265 if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
1353 * @sdev: scsi_device to add
1358 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1361 struct request_queue *rq = sdev->request_queue;
1362 struct scsi_target *starget = sdev->sdev_target;
1370 device_enable_async_suspend(&sdev->sdev_gendev);
1372 pm_runtime_set_active(&sdev->sdev_gendev);
1373 if (!sdev->rpm_autosuspend)
1374 pm_runtime_forbid(&sdev->sdev_gendev);
1375 pm_runtime_enable(&sdev->sdev_gendev);
1378 scsi_autopm_get_device(sdev);
1380 scsi_dh_add_device(sdev);
1382 error = device_add(&sdev->sdev_gendev);
1384 sdev_printk(KERN_INFO, sdev,
1389 device_enable_async_suspend(&sdev->sdev_dev);
1390 error = device_add(&sdev->sdev_dev);
1392 sdev_printk(KERN_INFO, sdev,
1394 device_del(&sdev->sdev_gendev);
1397 transport_add_device(&sdev->sdev_gendev);
1398 sdev->is_visible = 1;
1400 error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
1404 sdev_printk(KERN_INFO, sdev,
1408 if (sdev->host->hostt->sdev_attrs) {
1409 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
1410 error = device_create_file(&sdev->sdev_gendev,
1411 sdev->host->hostt->sdev_attrs[i]);
1417 if (sdev->host->hostt->sdev_groups) {
1418 error = sysfs_create_groups(&sdev->sdev_gendev.kobj,
1419 sdev->host->hostt->sdev_groups);
1424 scsi_autopm_put_device(sdev);
1428 void __scsi_remove_device(struct scsi_device *sdev)
1430 struct device *dev = &sdev->sdev_gendev;
1438 if (sdev->sdev_state == SDEV_DEL)
1441 if (sdev->is_visible) {
1446 mutex_lock(&sdev->state_mutex);
1452 res = scsi_device_set_state(sdev, SDEV_CANCEL);
1454 res = scsi_device_set_state(sdev, SDEV_DEL);
1456 scsi_start_queue(sdev);
1458 mutex_unlock(&sdev->state_mutex);
1463 if (sdev->host->hostt->sdev_groups)
1464 sysfs_remove_groups(&sdev->sdev_gendev.kobj,
1465 sdev->host->hostt->sdev_groups);
1467 bsg_unregister_queue(sdev->request_queue);
1468 device_unregister(&sdev->sdev_dev);
1472 put_device(&sdev->sdev_dev);
1479 mutex_lock(&sdev->state_mutex);
1480 scsi_device_set_state(sdev, SDEV_DEL);
1481 mutex_unlock(&sdev->state_mutex);
1483 blk_cleanup_queue(sdev->request_queue);
1484 cancel_work_sync(&sdev->requeue_work);
1486 if (sdev->host->hostt->slave_destroy)
1487 sdev->host->hostt->slave_destroy(sdev);
1495 scsi_target_reap(scsi_target(sdev));
1502 * @sdev: scsi_device to unregister
1504 void scsi_remove_device(struct scsi_device *sdev)
1506 struct Scsi_Host *shost = sdev->host;
1509 __scsi_remove_device(sdev);
1518 struct scsi_device *sdev;
1522 list_for_each_entry(sdev, &shost->__devices, siblings) {
1529 if (sdev->channel != starget->channel ||
1530 sdev->id != starget->id)
1532 if (sdev->sdev_state == SDEV_DEL ||
1533 sdev->sdev_state == SDEV_CANCEL ||
1534 !get_device(&sdev->sdev_gendev))
1537 scsi_remove_device(sdev);
1538 put_device(&sdev->sdev_gendev);
1627 void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1630 struct Scsi_Host *shost = sdev->host;
1631 struct scsi_target *starget = sdev->sdev_target;
1633 device_initialize(&sdev->sdev_gendev);
1634 sdev->sdev_gendev.bus = &scsi_bus_type;
1635 sdev->sdev_gendev.type = &scsi_dev_type;
1636 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
1637 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1639 device_initialize(&sdev->sdev_dev);
1640 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1641 sdev->sdev_dev.class = &sdev_class;
1642 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
1643 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1647 * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
1651 sdev->scsi_level = starget->scsi_level;
1652 if (sdev->scsi_level <= SCSI_2 &&
1653 sdev->scsi_level != SCSI_UNKNOWN &&
1655 sdev->lun_in_cdb = 1;
1657 transport_setup_device(&sdev->sdev_gendev);
1659 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1660 list_add_tail(&sdev->siblings, &shost->__devices);