Lines Matching defs:sdev

224  * @sdev:	scsi device
240 int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
250 req = blk_get_request(sdev->request_queue,
258 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
323 void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
325 struct Scsi_Host *shost = sdev->host;
326 struct scsi_target *starget = scsi_target(sdev);
333 atomic_dec(&sdev->device_busy);
351 struct scsi_device *sdev, *tmp;
370 list_for_each_entry_safe(sdev, tmp, &starget->devices,
372 if (sdev == current_sdev)
374 if (scsi_device_get(sdev))
378 scsi_kick_queue(sdev->request_queue);
381 scsi_device_put(sdev);
387 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
389 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
391 if (atomic_read(&sdev->device_blocked) > 0)
419 struct scsi_device *sdev;
441 sdev = list_entry(starved_list.next,
443 list_del_init(&sdev->starved_entry);
444 if (scsi_target_is_busy(scsi_target(sdev))) {
445 list_move_tail(&sdev->starved_entry,
452 * call may remove the sdev from the starved list and destroy
454 * queue and never touching the sdev again after we drop the
460 slq = sdev->request_queue;
483 struct scsi_device *sdev = q->queuedata;
485 if (scsi_target(sdev)->single_lun)
486 scsi_single_lun_run(sdev);
487 if (!list_empty(&sdev->host->starved_list))
488 scsi_starved_list_run(sdev->host);
495 struct scsi_device *sdev;
498 sdev = container_of(work, struct scsi_device, requeue_work);
499 q = sdev->request_queue;
505 struct scsi_device *sdev;
507 shost_for_each_device(sdev, shost)
508 scsi_run_queue(sdev->request_queue);
538 static void scsi_run_queue_async(struct scsi_device *sdev)
540 if (scsi_target(sdev)->single_lun ||
541 !list_empty(&sdev->host->starved_list)) {
542 kblockd_schedule_work(&sdev->requeue_work);
547 * scsi_device_unbusy() and reading sdev->restarts.
549 int old = atomic_read(&sdev->restarts);
559 if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
560 blk_mq_run_hw_queues(sdev->request_queue, true);
569 struct scsi_device *sdev = cmd->device;
570 struct request_queue *q = sdev->request_queue;
608 scsi_run_queue_async(sdev);
991 static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
994 return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
996 sdev->host->hostt->dma_need_drain(rq);
1010 struct scsi_device *sdev = cmd->device;
1015 bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
1052 sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
1055 cmd->extra_len += sdev->dma_drain_len;
1176 static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1205 scsi_device_state_check(struct scsi_device *sdev, struct request *req)
1207 switch (sdev->sdev_state) {
1217 if (!sdev->offline_already) {
1218 sdev->offline_already = true;
1219 sdev_printk(KERN_ERR, sdev,
1228 sdev_printk(KERN_ERR, sdev,
1254 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1260 struct scsi_device *sdev)
1264 busy = atomic_inc_return(&sdev->device_busy) - 1;
1265 if (atomic_read(&sdev->device_blocked)) {
1272 if (atomic_dec_return(&sdev->device_blocked) > 0)
1274 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1278 if (busy >= sdev->queue_depth)
1283 atomic_dec(&sdev->device_busy);
1289 * @sdev: scsi device on starget to check.
1292 struct scsi_device *sdev)
1294 struct scsi_target *starget = scsi_target(sdev);
1300 starget->starget_sdev_user != sdev) {
1304 starget->starget_sdev_user = sdev;
1333 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1348 struct scsi_device *sdev,
1373 if (!list_empty(&sdev->starved_entry)) {
1375 if (!list_empty(&sdev->starved_entry))
1376 list_del_init(&sdev->starved_entry);
1386 if (list_empty(&sdev->starved_entry))
1387 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1398 * shost/starget/sdev, since the returned value is not guaranteed and
1408 struct scsi_device *sdev = q->queuedata;
1414 shost = sdev->host;
1422 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1554 struct scsi_device *sdev = req->q->queuedata;
1555 struct Scsi_Host *shost = sdev->host;
1558 scsi_init_command(sdev, cmd);
1583 return scsi_setup_scsi_cmnd(sdev, req);
1585 if (sdev->handler && sdev->handler->prep_fn) {
1586 blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1609 struct scsi_device *sdev = q->queuedata;
1611 atomic_dec(&sdev->device_busy);
1616 struct scsi_device *sdev = q->queuedata;
1618 if (scsi_dev_queue_ready(q, sdev))
1621 atomic_inc(&sdev->restarts);
1624 * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
1632 * before reading .device_busy, sdev->device_busy will be observed as
1638 if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
1639 !scsi_device_blocked(sdev)))
1640 blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
1649 struct scsi_device *sdev = q->queuedata;
1650 struct Scsi_Host *shost = sdev->host;
1659 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1660 ret = scsi_device_state_check(sdev, req);
1666 if (!scsi_target_queue_ready(shost, sdev))
1668 if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1681 if (sdev->simple_tags)
1703 if (scsi_target(sdev)->can_queue > 0)
1704 atomic_dec(&scsi_target(sdev)->target_busy);
1712 if (scsi_device_blocked(sdev))
1716 if (unlikely(!scsi_device_online(sdev)))
1727 scsi_run_queue_async(sdev);
1859 struct scsi_device *sdev = q->queuedata;
1860 struct Scsi_Host *shost = sdev->host;
1883 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
1885 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
1886 if (IS_ERR(sdev->request_queue))
1889 sdev->request_queue->queuedata = sdev;
1890 __scsi_init_queue(sdev->host, sdev->request_queue);
1891 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
1892 return sdev->request_queue;
1932 * scsi_device_from_queue - return sdev associated with a request_queue
1933 * @q: The request queue to return the sdev from
1935 * Return the sdev associated with a request queue or NULL if the
1940 struct scsi_device *sdev = NULL;
1944 sdev = q->queuedata;
1945 if (!sdev || !get_device(&sdev->sdev_gendev))
1946 sdev = NULL;
1948 return sdev;
1990 * @sdev: SCSI device to be queried
2007 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2018 if (sdev->use_10_for_ms) {
2057 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2066 * @sdev: SCSI device to be queried
2080 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2093 dbd = sdev->set_dbd_for_ms ? 8 : dbd;
2102 use_10_for_ms = sdev->use_10_for_ms || len > 255;
2122 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2144 sdev->use_10_for_ms = 0;
2188 * @sdev: scsi device to change the state of.
2197 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2207 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2209 if (sdev->removable && scsi_sense_valid(sshdr) &&
2211 sdev->changed = 1;
2221 * @sdev: scsi device to change the state of.
2228 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2230 enum scsi_device_state oldstate = sdev->sdev_state;
2332 sdev->offline_already = false;
2333 sdev->sdev_state = state;
2338 sdev_printk(KERN_ERR, sdev,
2349 * @sdev: associated SCSI device
2354 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2364 scsi_rescan_device(&sdev->sdev_gendev);
2392 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2404 struct scsi_device *sdev;
2408 sdev = container_of(work, struct scsi_device, event_work);
2411 if (test_and_clear_bit(evt_type, sdev->pending_events))
2412 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2419 spin_lock_irqsave(&sdev->list_lock, flags);
2420 list_splice_init(&sdev->event_list, &event_list);
2421 spin_unlock_irqrestore(&sdev->list_lock, flags);
2429 scsi_evt_emit(sdev, evt);
2437 * @sdev: scsi_device event occurred on
2442 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2450 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2456 spin_lock_irqsave(&sdev->list_lock, flags);
2457 list_add_tail(&evt->node, &sdev->event_list);
2458 schedule_work(&sdev->event_work);
2459 spin_unlock_irqrestore(&sdev->list_lock, flags);
2501 * @sdev: scsi_device event occurred on
2507 void sdev_evt_send_simple(struct scsi_device *sdev,
2512 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2517 sdev_evt_send(sdev, evt);
2523 * @sdev: scsi device to quiesce.
2535 scsi_device_quiesce(struct scsi_device *sdev)
2537 struct request_queue *q = sdev->request_queue;
2545 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2547 if (sdev->quiesced_by == current)
2562 mutex_lock(&sdev->state_mutex);
2563 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2565 sdev->quiesced_by = current;
2568 mutex_unlock(&sdev->state_mutex);
2576 * @sdev: scsi device to resume.
2583 void scsi_device_resume(struct scsi_device *sdev)
2589 mutex_lock(&sdev->state_mutex);
2590 if (sdev->sdev_state == SDEV_QUIESCE)
2591 scsi_device_set_state(sdev, SDEV_RUNNING);
2592 if (sdev->quiesced_by) {
2593 sdev->quiesced_by = NULL;
2594 blk_clear_pm_only(sdev->request_queue);
2596 mutex_unlock(&sdev->state_mutex);
2601 device_quiesce_fn(struct scsi_device *sdev, void *data)
2603 scsi_device_quiesce(sdev);
2614 device_resume_fn(struct scsi_device *sdev, void *data)
2616 scsi_device_resume(sdev);
2626 static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
2628 if (scsi_device_set_state(sdev, SDEV_BLOCK))
2629 return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2636 void scsi_start_queue(struct scsi_device *sdev)
2642 need_start = sdev->queue_stopped;
2643 sdev->queue_stopped = 0;
2647 blk_mq_unquiesce_queue(sdev->request_queue);
2650 static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
2656 need_stop = !sdev->queue_stopped;
2657 sdev->queue_stopped = 1;
2662 blk_mq_quiesce_queue_nowait(sdev->request_queue);
2664 blk_mq_quiesce_queue(sdev->request_queue);
2670 * @sdev: device to block
2682 int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2684 int ret = __scsi_internal_device_block_nowait(sdev);
2692 scsi_stop_queue(sdev, true);
2699 * @sdev: device to block
2712 static int scsi_internal_device_block(struct scsi_device *sdev)
2716 mutex_lock(&sdev->state_mutex);
2717 err = __scsi_internal_device_block_nowait(sdev);
2719 scsi_stop_queue(sdev, false);
2720 mutex_unlock(&sdev->state_mutex);
2727 * @sdev: device to resume
2740 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2755 switch (sdev->sdev_state) {
2758 sdev->sdev_state = new_state;
2763 sdev->sdev_state = new_state;
2765 sdev->sdev_state = SDEV_CREATED;
2773 scsi_start_queue(sdev);
2781 * @sdev: device to resume
2793 static int scsi_internal_device_unblock(struct scsi_device *sdev,
2798 mutex_lock(&sdev->state_mutex);
2799 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2800 mutex_unlock(&sdev->state_mutex);
2806 device_block(struct scsi_device *sdev, void *data)
2810 ret = scsi_internal_device_block(sdev);
2813 dev_name(&sdev->sdev_gendev), ret);
2837 device_unblock(struct scsi_device *sdev, void *data)
2839 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2865 struct scsi_device *sdev;
2872 shost_for_each_device(sdev, shost) {
2873 mutex_lock(&sdev->state_mutex);
2874 ret = scsi_internal_device_block_nowait(sdev);
2875 mutex_unlock(&sdev->state_mutex);
2877 scsi_device_put(sdev);
2898 struct scsi_device *sdev;
2901 shost_for_each_device(sdev, shost) {
2902 ret = scsi_internal_device_unblock(sdev, new_state);
2904 scsi_device_put(sdev);
2972 void sdev_disable_disk_events(struct scsi_device *sdev)
2974 atomic_inc(&sdev->disk_events_disable_depth);
2978 void sdev_enable_disk_events(struct scsi_device *sdev)
2980 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2982 atomic_dec(&sdev->disk_events_disable_depth);
3060 * @sdev: SCSI device
3072 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
3081 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3190 * @sdev: SCSI device
3197 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3204 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);