Lines Matching defs:shost
81 int scsi_init_sense_cache(struct Scsi_Host *shost)
87 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
91 if (shost->unchecked_isa_dma) {
300 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
303 * host_failed counter or that it notices the shost state change made by
306 static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
312 if (unlikely(scsi_host_in_recovery(shost))) {
313 unsigned int busy = scsi_host_busy(shost);
315 spin_lock_irqsave(shost->host_lock, flags);
316 if (shost->host_failed || shost->host_eh_scheduled)
317 scsi_eh_wakeup(shost, busy);
318 spin_unlock_irqrestore(shost->host_lock, flags);
325 struct Scsi_Host *shost = sdev->host;
328 scsi_dec_host_busy(shost, cmd);
350 struct Scsi_Host *shost = current_sdev->host;
355 spin_lock_irqsave(shost->host_lock, flags);
357 spin_unlock_irqrestore(shost->host_lock, flags);
367 spin_lock_irqsave(shost->host_lock, flags);
377 spin_unlock_irqrestore(shost->host_lock, flags);
379 spin_lock_irqsave(shost->host_lock, flags);
384 spin_unlock_irqrestore(shost->host_lock, flags);
407 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
409 if (atomic_read(&shost->host_blocked) > 0)
411 if (shost->host_self_blocked)
416 static void scsi_starved_list_run(struct Scsi_Host *shost)
422 spin_lock_irqsave(shost->host_lock, flags);
423 list_splice_init(&shost->starved_list, &starved_list);
429 * As long as shost is accepting commands and we have
438 if (scsi_host_is_busy(shost))
446 &shost->starved_list);
463 spin_unlock_irqrestore(shost->host_lock, flags);
468 spin_lock_irqsave(shost->host_lock, flags);
471 list_splice(&starved_list, &shost->starved_list);
472 spin_unlock_irqrestore(shost->host_lock, flags);
503 void scsi_run_host_queues(struct Scsi_Host *shost)
507 shost_for_each_device(sdev, shost)
1291 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1298 spin_lock_irq(shost->host_lock);
1301 spin_unlock_irq(shost->host_lock);
1305 spin_unlock_irq(shost->host_lock);
1332 spin_lock_irq(shost->host_lock);
1333 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1334 spin_unlock_irq(shost->host_lock);
1342 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1347 struct Scsi_Host *shost,
1351 if (scsi_host_in_recovery(shost))
1354 if (atomic_read(&shost->host_blocked) > 0) {
1355 if (scsi_host_busy(shost) > 0)
1361 if (atomic_dec_return(&shost->host_blocked) > 0)
1365 shost_printk(KERN_INFO, shost,
1369 if (shost->host_self_blocked)
1374 spin_lock_irq(shost->host_lock);
1377 spin_unlock_irq(shost->host_lock);
1385 spin_lock_irq(shost->host_lock);
1387 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1388 spin_unlock_irq(shost->host_lock);
1390 scsi_dec_host_busy(shost, cmd);
1398 * shost/starget/sdev, since the returned value is not guaranteed and
1409 struct Scsi_Host *shost;
1414 shost = sdev->host;
1422 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1545 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1547 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1555 struct Scsi_Host *shost = sdev->host;
1568 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1571 if (scsi_host_get_prot(shost)) {
1650 struct Scsi_Host *shost = sdev->host;
1666 if (!scsi_target_queue_ready(shost, sdev))
1668 if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1701 scsi_dec_host_busy(shost, cmd);
1744 struct Scsi_Host *shost = set->driver_data;
1745 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
1758 if (scsi_host_get_prot(shost)) {
1760 shost->hostt->cmd_size;
1761 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1764 if (shost->hostt->init_cmd_priv) {
1765 ret = shost->hostt->init_cmd_priv(shost, cmd);
1777 struct Scsi_Host *shost = set->driver_data;
1780 if (shost->hostt->exit_cmd_priv)
1781 shost->hostt->exit_cmd_priv(shost, cmd);
1788 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1790 if (shost->hostt->map_queues)
1791 return shost->hostt->map_queues(shost);
1795 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1797 struct device *dev = shost->dma_dev;
1802 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1805 if (scsi_host_prot_dma(shost)) {
1806 shost->sg_prot_tablesize =
1807 min_not_zero(shost->sg_prot_tablesize,
1809 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1810 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1814 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1817 blk_queue_max_hw_sectors(q, shost->max_sectors);
1818 if (shost->unchecked_isa_dma)
1820 blk_queue_segment_boundary(q, shost->dma_boundary);
1821 dma_set_seg_boundary(dev, shost->dma_boundary);
1823 blk_queue_max_segment_size(q, shost->max_segment_size);
1824 blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1860 struct Scsi_Host *shost = sdev->host;
1862 shost->hostt->commit_rqs(shost, hctx->queue_num);
1895 int scsi_mq_setup_tags(struct Scsi_Host *shost)
1898 struct blk_mq_tag_set *tag_set = &shost->tag_set;
1901 scsi_mq_inline_sgl_size(shost));
1902 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1903 if (scsi_host_get_prot(shost))
1908 if (shost->hostt->commit_rqs)
1912 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
1913 tag_set->queue_depth = shost->can_queue;
1918 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1919 tag_set->driver_data = shost;
1920 if (shost->host_tagset)
1926 void scsi_mq_destroy_tags(struct Scsi_Host *shost)
1928 blk_mq_free_tag_set(&shost->tag_set);
1954 * @shost: host in question
1959 void scsi_block_requests(struct Scsi_Host *shost)
1961 shost->host_self_blocked = 1;
1968 * @shost: host in question
1975 void scsi_unblock_requests(struct Scsi_Host *shost)
1977 shost->host_self_blocked = 0;
1978 scsi_run_host_queues(shost);
2863 scsi_host_block(struct Scsi_Host *shost)
2872 shost_for_each_device(sdev, shost) {
2886 WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
2896 scsi_host_unblock(struct Scsi_Host *shost, int new_state)
2901 shost_for_each_device(sdev, shost) {