Lines Matching defs:shost
61 int scsi_init_sense_cache(struct Scsi_Host *shost)
268 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
271 * host_failed counter or that it notices the shost state change made by
274 static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
280 if (unlikely(scsi_host_in_recovery(shost))) {
281 unsigned int busy = scsi_host_busy(shost);
283 spin_lock_irqsave(shost->host_lock, flags);
284 if (shost->host_failed || shost->host_eh_scheduled)
285 scsi_eh_wakeup(shost, busy);
286 spin_unlock_irqrestore(shost->host_lock, flags);
293 struct Scsi_Host *shost = sdev->host;
296 scsi_dec_host_busy(shost, cmd);
326 struct Scsi_Host *shost = current_sdev->host;
330 spin_lock_irqsave(shost->host_lock, flags);
332 spin_unlock_irqrestore(shost->host_lock, flags);
341 shost->queuecommand_may_block);
343 spin_lock_irqsave(shost->host_lock, flags);
347 spin_unlock_irqrestore(shost->host_lock, flags);
370 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
372 if (atomic_read(&shost->host_blocked) > 0)
374 if (shost->host_self_blocked)
379 static void scsi_starved_list_run(struct Scsi_Host *shost)
385 spin_lock_irqsave(shost->host_lock, flags);
386 list_splice_init(&shost->starved_list, &starved_list);
392 * As long as shost is accepting commands and we have
401 if (scsi_host_is_busy(shost))
409 &shost->starved_list);
426 spin_unlock_irqrestore(shost->host_lock, flags);
431 spin_lock_irqsave(shost->host_lock, flags);
434 list_splice(&starved_list, &shost->starved_list);
435 spin_unlock_irqrestore(shost->host_lock, flags);
467 void scsi_run_host_queues(struct Scsi_Host *shost)
471 shost_for_each_device(sdev, shost)
1283 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1290 spin_lock_irq(shost->host_lock);
1293 spin_unlock_irq(shost->host_lock);
1297 spin_unlock_irq(shost->host_lock);
1324 spin_lock_irq(shost->host_lock);
1325 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1326 spin_unlock_irq(shost->host_lock);
1334 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1339 struct Scsi_Host *shost,
1343 if (atomic_read(&shost->host_blocked) > 0) {
1344 if (scsi_host_busy(shost) > 0)
1350 if (atomic_dec_return(&shost->host_blocked) > 0)
1354 shost_printk(KERN_INFO, shost,
1358 if (shost->host_self_blocked)
1363 spin_lock_irq(shost->host_lock);
1366 spin_unlock_irq(shost->host_lock);
1374 spin_lock_irq(shost->host_lock);
1376 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1377 spin_unlock_irq(shost->host_lock);
1379 scsi_dec_host_busy(shost, cmd);
1387 * shost/starget/sdev, since the returned value is not guaranteed and
1398 struct Scsi_Host *shost;
1403 shost = sdev->host;
1411 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1538 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1540 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1548 struct Scsi_Host *shost = sdev->host;
1572 if (!shost->hostt->init_cmd_priv)
1573 memset(cmd + 1, 0, shost->hostt->cmd_size);
1581 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1584 if (scsi_host_get_prot(shost)) {
1713 struct Scsi_Host *shost = sdev->host;
1731 if (!scsi_target_queue_ready(shost, sdev))
1733 if (unlikely(scsi_host_in_recovery(shost))) {
1738 if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1771 scsi_dec_host_busy(shost, cmd);
1812 struct Scsi_Host *shost = set->driver_data;
1822 if (scsi_host_get_prot(shost)) {
1824 shost->hostt->cmd_size;
1825 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1828 if (shost->hostt->init_cmd_priv) {
1829 ret = shost->hostt->init_cmd_priv(shost, cmd);
1840 struct Scsi_Host *shost = set->driver_data;
1843 if (shost->hostt->exit_cmd_priv)
1844 shost->hostt->exit_cmd_priv(shost, cmd);
1851 struct Scsi_Host *shost = hctx->driver_data;
1853 if (shost->hostt->mq_poll)
1854 return shost->hostt->mq_poll(shost, hctx->queue_num);
1862 struct Scsi_Host *shost = data;
1864 hctx->driver_data = shost;
1870 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1872 if (shost->hostt->map_queues)
1873 return shost->hostt->map_queues(shost);
1877 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1879 struct device *dev = shost->dma_dev;
1884 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1887 if (scsi_host_prot_dma(shost)) {
1888 shost->sg_prot_tablesize =
1889 min_not_zero(shost->sg_prot_tablesize,
1891 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1892 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1895 blk_queue_max_hw_sectors(q, shost->max_sectors);
1896 blk_queue_segment_boundary(q, shost->dma_boundary);
1897 dma_set_seg_boundary(dev, shost->dma_boundary);
1899 blk_queue_max_segment_size(q, shost->max_segment_size);
1900 blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1937 struct Scsi_Host *shost = hctx->driver_data;
1939 shost->hostt->commit_rqs(shost, hctx->queue_num);
1963 int scsi_mq_setup_tags(struct Scsi_Host *shost)
1966 struct blk_mq_tag_set *tag_set = &shost->tag_set;
1969 scsi_mq_inline_sgl_size(shost));
1970 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1971 if (scsi_host_get_prot(shost))
1976 if (shost->hostt->commit_rqs)
1980 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1981 tag_set->nr_maps = shost->nr_maps ? : 1;
1982 tag_set->queue_depth = shost->can_queue;
1984 tag_set->numa_node = dev_to_node(shost->dma_dev);
1987 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1988 if (shost->queuecommand_may_block)
1990 tag_set->driver_data = shost;
1991 if (shost->host_tagset)
1999 struct Scsi_Host *shost = container_of(kref, typeof(*shost),
2002 blk_mq_free_tag_set(&shost->tag_set);
2003 complete(&shost->tagset_freed);
2037 * @shost: host in question
2042 void scsi_block_requests(struct Scsi_Host *shost)
2044 shost->host_self_blocked = 1;
2051 * @shost: host in question
2058 void scsi_unblock_requests(struct Scsi_Host *shost)
2060 shost->host_self_blocked = 0;
2061 scsi_run_host_queues(shost);
2905 * @shost: the Scsi_Host to which this device belongs
2915 scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
2919 blk_mq_wait_quiesce_done(&shost->tag_set);
2951 * @shost: device to block
2959 scsi_host_block(struct Scsi_Host *shost)
2968 shost_for_each_device(sdev, shost) {
2979 blk_mq_wait_quiesce_done(&shost->tag_set);
2986 scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2991 shost_for_each_device(sdev, shost) {