Lines Matching refs:hba
392 static struct status_msg *stex_get_status(struct st_hba *hba)
394 struct status_msg *status = hba->status_buffer + hba->status_tail;
396 ++hba->status_tail;
397 hba->status_tail %= hba->sts_count+1;
413 static struct req_msg *stex_alloc_req(struct st_hba *hba)
415 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
417 ++hba->req_head;
418 hba->req_head %= hba->rq_count+1;
423 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
425 return (struct req_msg *)(hba->dma_mem +
426 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
429 static int stex_map_sg(struct st_hba *hba,
446 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
461 static int stex_ss_map_sg(struct st_hba *hba,
478 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
494 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
499 p = hba->copy_buffer;
502 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
510 p->bus = hba->pdev->bus->number;
511 p->slot = hba->pdev->devfn;
513 p->irq_vec = hba->pdev->irq;
514 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
516 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
522 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
526 hba->ccb[tag].req = req;
527 hba->out_req_cnt++;
529 writel(hba->req_head, hba->mmio_base + IMR0);
530 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
531 readl(hba->mmio_base + IDBL); /* flush */
535 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
543 hba->ccb[tag].req = req;
544 hba->out_req_cnt++;
546 cmd = hba->ccb[tag].cmd;
552 addr = hba->dma_handle + hba->req_head * hba->rq_size;
553 addr += (hba->ccb[tag].sg_count+4)/11;
556 ++hba->req_head;
557 hba->req_head %= hba->rq_count+1;
558 if (hba->cardtype == st_P3) {
559 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
560 writel(addr, hba->mmio_base + YH2I_REQ);
562 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
563 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
564 writel(addr, hba->mmio_base + YH2I_REQ);
565 readl(hba->mmio_base + YH2I_REQ); /* flush */
569 static void return_abnormal_state(struct st_hba *hba, int status)
575 spin_lock_irqsave(hba->host->host_lock, flags);
576 for (tag = 0; tag < hba->host->can_queue; tag++) {
577 ccb = &hba->ccb[tag];
588 spin_unlock_irqrestore(hba->host->host_lock, flags);
603 struct st_hba *hba;
612 hba = (struct st_hba *) &host->hostdata[0];
613 if (hba->mu_status == MU_STATE_NOCONNECT) {
618 if (unlikely(hba->mu_status != MU_STATE_STARTED))
644 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
682 .host_no = hba->host->host_no,
704 req = hba->alloc_rq(hba);
719 hba->ccb[tag].cmd = cmd;
720 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
721 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
723 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
724 hba->ccb[tag].sg_count = 0;
728 hba->send(hba, req, tag);
791 static void stex_check_cmd(struct st_hba *hba,
800 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
802 void __iomem *base = hba->mmio_base;
812 hba->status_head = readl(base + OMR1);
813 if (unlikely(hba->status_head > hba->sts_count)) {
815 pci_name(hba->pdev));
827 if (unlikely(hba->out_req_cnt <= 0 ||
828 (hba->mu_status == MU_STATE_RESETTING &&
829 hba->cardtype != st_yosemite))) {
830 hba->status_tail = hba->status_head;
834 while (hba->status_tail != hba->status_head) {
835 resp = stex_get_status(hba);
837 if (unlikely(tag >= hba->host->can_queue)) {
839 "(%s): invalid tag\n", pci_name(hba->pdev));
843 hba->out_req_cnt--;
844 ccb = &hba->ccb[tag];
845 if (unlikely(hba->wait_ccb == ccb))
846 hba->wait_ccb = NULL;
849 "(%s): lagging req\n", pci_name(hba->pdev));
857 pci_name(hba->pdev));
869 if (hba->cardtype == st_yosemite)
870 stex_check_cmd(hba, ccb, resp);
874 stex_controller_info(hba, ccb);
883 writel(hba->status_head, base + IMR1);
889 struct st_hba *hba = __hba;
890 void __iomem *base = hba->mmio_base;
894 spin_lock_irqsave(hba->host->host_lock, flags);
902 stex_mu_intr(hba, data);
903 spin_unlock_irqrestore(hba->host->host_lock, flags);
905 hba->cardtype == st_shasta))
906 queue_work(hba->work_q, &hba->reset_work);
910 spin_unlock_irqrestore(hba->host->host_lock, flags);
915 static void stex_ss_mu_intr(struct st_hba *hba)
925 if (unlikely(hba->out_req_cnt <= 0 ||
926 hba->mu_status == MU_STATE_RESETTING))
929 while (count < hba->sts_count) {
930 scratch = hba->scratch + hba->status_tail;
935 resp = hba->status_buffer + hba->status_tail;
938 ++hba->status_tail;
939 hba->status_tail %= hba->sts_count+1;
942 if (unlikely(tag >= hba->host->can_queue)) {
944 "(%s): invalid tag\n", pci_name(hba->pdev));
948 hba->out_req_cnt--;
949 ccb = &hba->ccb[tag];
950 if (unlikely(hba->wait_ccb == ccb))
951 hba->wait_ccb = NULL;
954 "(%s): lagging req\n", pci_name(hba->pdev));
970 pci_name(hba->pdev));
977 stex_check_cmd(hba, ccb, resp);
990 struct st_hba *hba = __hba;
991 void __iomem *base = hba->mmio_base;
995 spin_lock_irqsave(hba->host->host_lock, flags);
997 if (hba->cardtype == st_yel) {
1002 stex_ss_mu_intr(hba);
1003 spin_unlock_irqrestore(hba->host->host_lock, flags);
1005 queue_work(hba->work_q, &hba->reset_work);
1016 stex_ss_mu_intr(hba);
1017 spin_unlock_irqrestore(hba->host->host_lock, flags);
1019 queue_work(hba->work_q, &hba->reset_work);
1024 spin_unlock_irqrestore(hba->host->host_lock, flags);
1029 static int stex_common_handshake(struct st_hba *hba)
1031 void __iomem *base = hba->mmio_base;
1045 pci_name(hba->pdev));
1058 if (hba->host->can_queue > data) {
1059 hba->host->can_queue = data;
1060 hba->host->cmd_per_lun = data;
1064 h = (struct handshake_frame *)hba->status_buffer;
1065 h->rb_phy = cpu_to_le64(hba->dma_handle);
1066 h->req_sz = cpu_to_le16(hba->rq_size);
1067 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1069 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1072 if (hba->extra_offset) {
1073 h->extra_offset = cpu_to_le32(hba->extra_offset);
1074 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1078 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1095 pci_name(hba->pdev));
1113 static int stex_ss_handshake(struct st_hba *hba)
1115 void __iomem *base = hba->mmio_base;
1125 if (hba->cardtype == st_yel) {
1131 pci_name(hba->pdev));
1143 pci_name(hba->pdev));
1151 msg_h = (struct st_msg_header *)hba->dma_mem;
1152 msg_h->handle = cpu_to_le64(hba->dma_handle);
1156 h->rb_phy = cpu_to_le64(hba->dma_handle);
1157 h->req_sz = cpu_to_le16(hba->rq_size);
1158 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1160 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1164 scratch_size = (hba->sts_count+1)*sizeof(u32);
1167 if (hba->cardtype == st_yel) {
1171 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1173 writel(hba->dma_handle, base + YH2I_REQ);
1180 if (hba->msi_lock == 0) {
1183 hba->msi_lock = 1;
1185 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1186 writel(hba->dma_handle, base + YH2I_REQ);
1190 scratch = hba->scratch;
1191 if (hba->cardtype == st_yel) {
1196 pci_name(hba->pdev));
1209 pci_name(hba->pdev));
1224 static int stex_handshake(struct st_hba *hba)
1230 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1231 err = stex_ss_handshake(hba);
1233 err = stex_common_handshake(hba);
1234 spin_lock_irqsave(hba->host->host_lock, flags);
1235 mu_status = hba->mu_status;
1237 hba->req_head = 0;
1238 hba->req_tail = 0;
1239 hba->status_head = 0;
1240 hba->status_tail = 0;
1241 hba->out_req_cnt = 0;
1242 hba->mu_status = MU_STATE_STARTED;
1244 hba->mu_status = MU_STATE_FAILED;
1246 wake_up_all(&hba->reset_waitq);
1247 spin_unlock_irqrestore(hba->host->host_lock, flags);
1254 struct st_hba *hba = (struct st_hba *)host->hostdata;
1263 base = hba->mmio_base;
1266 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1267 hba->wait_ccb = &hba->ccb[tag];
1271 if (hba->cardtype == st_yel) {
1277 stex_ss_mu_intr(hba);
1278 } else if (hba->cardtype == st_P3) {
1286 stex_ss_mu_intr(hba);
1294 stex_mu_intr(hba, data);
1296 if (hba->wait_ccb == NULL) {
1298 "(%s): lost interrupt\n", pci_name(hba->pdev));
1304 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1305 hba->wait_ccb = NULL;
1312 static void stex_hard_reset(struct st_hba *hba)
1320 pci_read_config_dword(hba->pdev, i * 4,
1321 &hba->pdev->saved_config_space[i]);
1325 bus = hba->pdev->bus;
1339 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1347 pci_write_config_dword(hba->pdev, i * 4,
1348 hba->pdev->saved_config_space[i]);
1351 static int stex_yos_reset(struct st_hba *hba)
1357 base = hba->mmio_base;
1361 while (hba->out_req_cnt > 0) {
1364 "(%s): reset timeout\n", pci_name(hba->pdev));
1371 spin_lock_irqsave(hba->host->host_lock, flags);
1373 hba->mu_status = MU_STATE_FAILED;
1375 hba->mu_status = MU_STATE_STARTED;
1376 wake_up_all(&hba->reset_waitq);
1377 spin_unlock_irqrestore(hba->host->host_lock, flags);
1382 static void stex_ss_reset(struct st_hba *hba)
1384 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1385 readl(hba->mmio_base + YH2I_INT);
1389 static void stex_p3_reset(struct st_hba *hba)
1391 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1395 static int stex_do_reset(struct st_hba *hba)
1400 spin_lock_irqsave(hba->host->host_lock, flags);
1401 if (hba->mu_status == MU_STATE_STARTING) {
1402 spin_unlock_irqrestore(hba->host->host_lock, flags);
1404 pci_name(hba->pdev));
1407 while (hba->mu_status == MU_STATE_RESETTING) {
1408 spin_unlock_irqrestore(hba->host->host_lock, flags);
1409 wait_event_timeout(hba->reset_waitq,
1410 hba->mu_status != MU_STATE_RESETTING,
1412 spin_lock_irqsave(hba->host->host_lock, flags);
1413 mu_status = hba->mu_status;
1417 spin_unlock_irqrestore(hba->host->host_lock, flags);
1421 hba->mu_status = MU_STATE_RESETTING;
1422 spin_unlock_irqrestore(hba->host->host_lock, flags);
1424 if (hba->cardtype == st_yosemite)
1425 return stex_yos_reset(hba);
1427 if (hba->cardtype == st_shasta)
1428 stex_hard_reset(hba);
1429 else if (hba->cardtype == st_yel)
1430 stex_ss_reset(hba);
1431 else if (hba->cardtype == st_P3)
1432 stex_p3_reset(hba);
1434 return_abnormal_state(hba, DID_RESET);
1436 if (stex_handshake(hba) == 0)
1440 pci_name(hba->pdev));
1446 struct st_hba *hba;
1448 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1453 return stex_do_reset(hba) ? FAILED : SUCCESS;
1458 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1460 stex_do_reset(hba);
1621 static int stex_request_irq(struct st_hba *hba)
1623 struct pci_dev *pdev = hba->pdev;
1626 if (msi || hba->cardtype == st_P3) {
1633 hba->msi_enabled = 1;
1635 hba->msi_enabled = 0;
1638 (hba->cardtype == st_yel || hba->cardtype == st_P3) ?
1639 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1642 if (hba->msi_enabled)
1648 static void stex_free_irq(struct st_hba *hba)
1650 struct pci_dev *pdev = hba->pdev;
1652 free_irq(pdev->irq, hba);
1653 if (hba->msi_enabled)
1659 struct st_hba *hba;
1683 hba = (struct st_hba *)host->hostdata;
1684 memset(hba, 0, sizeof(struct st_hba));
1693 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1694 if ( !hba->mmio_base) {
1710 hba->cardtype = (unsigned int) id->driver_data;
1711 ci = &stex_card_info[hba->cardtype];
1727 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1728 hba->supports_pm = 1;
1732 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1735 hba->dma_size = cp_offset + sizeof(struct st_frame);
1736 if (hba->cardtype == st_seq ||
1737 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1738 hba->extra_offset = hba->dma_size;
1739 hba->dma_size += ST_ADDITIONAL_MEM;
1741 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1742 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1743 if (!hba->dma_mem) {
1745 if (hba->cardtype == st_seq ||
1746 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1750 hba->dma_size = hba->extra_offset
1752 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1753 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1756 if (!hba->dma_mem) {
1764 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1765 if (!hba->ccb) {
1772 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1773 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1774 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1775 hba->copy_buffer = hba->dma_mem + cp_offset;
1776 hba->rq_count = ci->rq_count;
1777 hba->rq_size = ci->rq_size;
1778 hba->sts_count = ci->sts_count;
1779 hba->alloc_rq = ci->alloc_rq;
1780 hba->map_sg = ci->map_sg;
1781 hba->send = ci->send;
1782 hba->mu_status = MU_STATE_STARTING;
1783 hba->msi_lock = 0;
1785 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1797 hba->host = host;
1798 hba->pdev = pdev;
1799 init_waitqueue_head(&hba->reset_waitq);
1801 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1803 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1804 if (!hba->work_q) {
1810 INIT_WORK(&hba->reset_work, stex_reset_work);
1812 err = stex_request_irq(hba);
1819 err = stex_handshake(hba);
1823 pci_set_drvdata(pdev, hba);
1837 stex_free_irq(hba);
1839 destroy_workqueue(hba->work_q);
1841 kfree(hba->ccb);
1843 dma_free_coherent(&pdev->dev, hba->dma_size,
1844 hba->dma_mem, hba->dma_handle);
1846 iounmap(hba->mmio_base);
1857 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1865 spin_lock_irqsave(hba->host->host_lock, flags);
1867 if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
1868 hba->supports_pm == 1) {
1870 spin_unlock_irqrestore(hba->host->host_lock, flags);
1874 req = hba->alloc_rq(hba);
1875 if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
1877 memset(msg_h, 0, hba->rq_size);
1879 memset(req, 0, hba->rq_size);
1881 if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
1882 || hba->cardtype == st_P3)
1888 } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1900 hba->ccb[tag].cmd = NULL;
1901 hba->ccb[tag].sg_count = 0;
1902 hba->ccb[tag].sense_bufflen = 0;
1903 hba->ccb[tag].sense_buffer = NULL;
1904 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1905 hba->send(hba, req, tag);
1906 spin_unlock_irqrestore(hba->host->host_lock, flags);
1908 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1910 hba->ccb[tag].req_type = 0;
1911 hba->mu_status = MU_STATE_STOP;
1916 hba->mu_status = MU_STATE_STOP;
1919 static void stex_hba_free(struct st_hba *hba)
1921 stex_free_irq(hba);
1923 destroy_workqueue(hba->work_q);
1925 iounmap(hba->mmio_base);
1927 pci_release_regions(hba->pdev);
1929 kfree(hba->ccb);
1931 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1932 hba->dma_mem, hba->dma_handle);
1937 struct st_hba *hba = pci_get_drvdata(pdev);
1939 hba->mu_status = MU_STATE_NOCONNECT;
1940 return_abnormal_state(hba, DID_NO_CONNECT);
1941 scsi_remove_host(hba->host);
1943 scsi_block_requests(hba->host);
1945 stex_hba_free(hba);
1947 scsi_host_put(hba->host);
1956 struct st_hba *hba = pci_get_drvdata(pdev);
1958 if (hba->supports_pm == 0) {
1959 stex_hba_stop(hba, ST_IGNORED);
1960 } else if (hba->supports_pm == 1 && S6flag) {
1962 stex_hba_stop(hba, ST_S6);
1964 stex_hba_stop(hba, ST_S5);
1967 static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
1973 hba->msi_lock = 0;
1982 struct st_hba *hba = pci_get_drvdata(pdev);
1984 if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1985 && hba->supports_pm == 1)
1986 stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
1988 stex_hba_stop(hba, ST_IGNORED);
1994 struct st_hba *hba = pci_get_drvdata(pdev);
1996 hba->mu_status = MU_STATE_STARTING;
1997 stex_handshake(hba);