Lines Matching defs:sdev
189 * @sdev: scsi_device
201 int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
217 req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
222 ret = blk_rq_map_kern(sdev->request_queue, req,
291 void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
293 struct Scsi_Host *shost = sdev->host;
294 struct scsi_target *starget = scsi_target(sdev);
301 sbitmap_put(&sdev->budget_map, cmd->budget_token);
306 * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
309 static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data)
313 if (sdev != current_sdev)
314 blk_mq_run_hw_queues(sdev->request_queue, true);
350 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
352 if (scsi_device_busy(sdev) >= sdev->queue_depth)
354 if (atomic_read(&sdev->device_blocked) > 0)
382 struct scsi_device *sdev;
404 sdev = list_entry(starved_list.next,
406 list_del_init(&sdev->starved_entry);
407 if (scsi_target_is_busy(scsi_target(sdev))) {
408 list_move_tail(&sdev->starved_entry,
415 * call may remove the sdev from the starved list and destroy
417 * queue and never touching the sdev again after we drop the
423 slq = sdev->request_queue;
446 struct scsi_device *sdev = q->queuedata;
448 if (scsi_target(sdev)->single_lun)
449 scsi_single_lun_run(sdev);
450 if (!list_empty(&sdev->host->starved_list))
451 scsi_starved_list_run(sdev->host);
459 struct scsi_device *sdev;
462 sdev = container_of(work, struct scsi_device, requeue_work);
463 q = sdev->request_queue;
469 struct scsi_device *sdev;
471 shost_for_each_device(sdev, shost)
472 scsi_run_queue(sdev->request_queue);
502 static void scsi_run_queue_async(struct scsi_device *sdev)
504 if (scsi_host_in_recovery(sdev->host))
507 if (scsi_target(sdev)->single_lun ||
508 !list_empty(&sdev->host->starved_list)) {
509 kblockd_schedule_work(&sdev->requeue_work);
514 * scsi_device_unbusy() and reading sdev->restarts.
516 int old = atomic_read(&sdev->restarts);
526 if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
527 blk_mq_run_hw_queues(sdev->request_queue, true);
536 struct scsi_device *sdev = cmd->device;
537 struct request_queue *q = sdev->request_queue;
576 scsi_run_queue_async(sdev);
998 static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
1001 return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
1003 sdev->host->hostt->dma_need_drain(rq);
1020 struct scsi_device *sdev = cmd->device;
1025 bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
1062 sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1065 cmd->extra_len += sdev->dma_drain_len;
1171 static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1197 scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1199 switch (sdev->sdev_state) {
1209 if (!sdev->offline_already) {
1210 sdev->offline_already = true;
1211 sdev_printk(KERN_ERR, sdev,
1220 sdev_printk(KERN_ERR, sdev,
1246 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
1250 struct scsi_device *sdev)
1254 token = sbitmap_get(&sdev->budget_map);
1255 if (atomic_read(&sdev->device_blocked)) {
1259 if (scsi_device_busy(sdev) > 1)
1265 if (atomic_dec_return(&sdev->device_blocked) > 0)
1267 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1274 sbitmap_put(&sdev->budget_map, token);
1281 * @sdev: scsi device on starget to check.
1284 struct scsi_device *sdev)
1286 struct scsi_target *starget = scsi_target(sdev);
1292 starget->starget_sdev_user != sdev) {
1296 starget->starget_sdev_user = sdev;
1325 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1340 struct scsi_device *sdev,
1362 if (!list_empty(&sdev->starved_entry)) {
1364 if (!list_empty(&sdev->starved_entry))
1365 list_del_init(&sdev->starved_entry);
1375 if (list_empty(&sdev->starved_entry))
1376 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1387 * shost/starget/sdev, since the returned value is not guaranteed and
1397 struct scsi_device *sdev = q->queuedata;
1403 shost = sdev->host;
1411 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1547 struct scsi_device *sdev = req->q->queuedata;
1548 struct Scsi_Host *shost = sdev->host;
1552 scsi_init_command(sdev, cmd);
1596 return scsi_setup_scsi_cmnd(sdev, req);
1598 if (sdev->handler && sdev->handler->prep_fn) {
1599 blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1650 struct scsi_device *sdev = q->queuedata;
1652 sbitmap_put(&sdev->budget_map, budget_token);
1664 struct scsi_device *sdev = q->queuedata;
1665 int token = scsi_dev_queue_ready(q, sdev);
1670 atomic_inc(&sdev->restarts);
1673 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1681 * before reading .device_busy, sdev->device_busy will be observed as
1687 if (unlikely(scsi_device_busy(sdev) == 0 &&
1688 !scsi_device_blocked(sdev)))
1689 blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1712 struct scsi_device *sdev = q->queuedata;
1713 struct Scsi_Host *shost = sdev->host;
1724 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1725 ret = scsi_device_state_check(sdev, req);
1731 if (!scsi_target_queue_ready(shost, sdev))
1738 if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1751 if (sdev->simple_tags)
1773 if (scsi_target(sdev)->can_queue > 0)
1774 atomic_dec(&scsi_target(sdev)->target_busy);
1783 if (scsi_device_blocked(sdev))
1792 if (unlikely(!scsi_device_online(sdev)))
1803 scsi_run_queue_async(sdev);
2007 * scsi_device_from_queue - return sdev associated with a request_queue
2008 * @q: The request queue to return the sdev from
2010 * Return the sdev associated with a request queue or NULL if the
2015 struct scsi_device *sdev = NULL;
2019 sdev = q->queuedata;
2020 if (!sdev || !get_device(&sdev->sdev_gendev))
2021 sdev = NULL;
2023 return sdev;
2072 * @sdev: SCSI device to be queried
2087 int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
2106 if (sdev->use_10_for_ms ||
2145 ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len,
2154 * @sdev: SCSI device to be queried
2169 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
2186 dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2194 use_10_for_ms = sdev->use_10_for_ms || len > 255;
2214 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
2238 sdev->use_10_for_ms = 0;
2280 * @sdev: scsi device to change the state of.
2289 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2302 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
2304 if (sdev->removable && scsi_sense_valid(sshdr) &&
2306 sdev->changed = 1;
2316 * @sdev: scsi device to change the state of.
2323 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2325 enum scsi_device_state oldstate = sdev->sdev_state;
2427 sdev->offline_already = false;
2428 sdev->sdev_state = state;
2433 sdev_printk(KERN_ERR, sdev,
2444 * @sdev: associated SCSI device
2449 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2459 scsi_rescan_device(sdev);
2487 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2499 struct scsi_device *sdev;
2503 sdev = container_of(work, struct scsi_device, event_work);
2506 if (test_and_clear_bit(evt_type, sdev->pending_events))
2507 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2514 spin_lock_irqsave(&sdev->list_lock, flags);
2515 list_splice_init(&sdev->event_list, &event_list);
2516 spin_unlock_irqrestore(&sdev->list_lock, flags);
2524 scsi_evt_emit(sdev, evt);
2532 * @sdev: scsi_device event occurred on
2537 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2545 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2551 spin_lock_irqsave(&sdev->list_lock, flags);
2552 list_add_tail(&evt->node, &sdev->event_list);
2553 schedule_work(&sdev->event_work);
2554 spin_unlock_irqrestore(&sdev->list_lock, flags);
2596 * @sdev: scsi_device event occurred on
2602 void sdev_evt_send_simple(struct scsi_device *sdev,
2607 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2612 sdev_evt_send(sdev, evt);
2618 * @sdev: scsi device to quiesce.
2630 scsi_device_quiesce(struct scsi_device *sdev)
2632 struct request_queue *q = sdev->request_queue;
2640 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2642 if (sdev->quiesced_by == current)
2657 mutex_lock(&sdev->state_mutex);
2658 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2660 sdev->quiesced_by = current;
2663 mutex_unlock(&sdev->state_mutex);
2671 * @sdev: scsi device to resume.
2678 void scsi_device_resume(struct scsi_device *sdev)
2684 mutex_lock(&sdev->state_mutex);
2685 if (sdev->sdev_state == SDEV_QUIESCE)
2686 scsi_device_set_state(sdev, SDEV_RUNNING);
2687 if (sdev->quiesced_by) {
2688 sdev->quiesced_by = NULL;
2689 blk_clear_pm_only(sdev->request_queue);
2691 mutex_unlock(&sdev->state_mutex);
2696 device_quiesce_fn(struct scsi_device *sdev, void *data)
2698 scsi_device_quiesce(sdev);
2709 device_resume_fn(struct scsi_device *sdev, void *data)
2711 scsi_device_resume(sdev);
2721 static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
2723 if (scsi_device_set_state(sdev, SDEV_BLOCK))
2724 return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2729 void scsi_start_queue(struct scsi_device *sdev)
2731 if (cmpxchg(&sdev->queue_stopped, 1, 0))
2732 blk_mq_unquiesce_queue(sdev->request_queue);
2735 static void scsi_stop_queue(struct scsi_device *sdev)
2743 if (!cmpxchg(&sdev->queue_stopped, 0, 1))
2744 blk_mq_quiesce_queue_nowait(sdev->request_queue);
2749 * @sdev: device to block
2761 int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2763 int ret = __scsi_internal_device_block_nowait(sdev);
2771 scsi_stop_queue(sdev);
2778 * @sdev: device to block
2791 static void scsi_device_block(struct scsi_device *sdev, void *data)
2796 mutex_lock(&sdev->state_mutex);
2797 err = __scsi_internal_device_block_nowait(sdev);
2798 state = sdev->sdev_state;
2805 scsi_stop_queue(sdev);
2807 mutex_unlock(&sdev->state_mutex);
2810 __func__, dev_name(&sdev->sdev_gendev), state);
2815 * @sdev: device to resume
2828 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2843 switch (sdev->sdev_state) {
2846 sdev->sdev_state = new_state;
2851 sdev->sdev_state = new_state;
2853 sdev->sdev_state = SDEV_CREATED;
2861 scsi_start_queue(sdev);
2869 * @sdev: device to resume
2881 static int scsi_internal_device_unblock(struct scsi_device *sdev,
2886 mutex_lock(&sdev->state_mutex);
2887 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2888 mutex_unlock(&sdev->state_mutex);
2924 device_unblock(struct scsi_device *sdev, void *data)
2926 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2961 struct scsi_device *sdev;
2968 shost_for_each_device(sdev, shost) {
2969 mutex_lock(&sdev->state_mutex);
2970 ret = scsi_internal_device_block_nowait(sdev);
2971 mutex_unlock(&sdev->state_mutex);
2973 scsi_device_put(sdev);
2988 struct scsi_device *sdev;
2991 shost_for_each_device(sdev, shost) {
2992 ret = scsi_internal_device_unblock(sdev, new_state);
2994 scsi_device_put(sdev);
3062 void sdev_disable_disk_events(struct scsi_device *sdev)
3064 atomic_inc(&sdev->disk_events_disable_depth);
3068 void sdev_enable_disk_events(struct scsi_device *sdev)
3070 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3072 atomic_dec(&sdev->disk_events_disable_depth);
3150 * @sdev: SCSI device
3162 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3171 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3279 * @sdev: SCSI device
3286 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3293 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);