Lines Matching refs:hba

392 static struct status_msg *stex_get_status(struct st_hba *hba)
394 struct status_msg *status = hba->status_buffer + hba->status_tail;
396 ++hba->status_tail;
397 hba->status_tail %= hba->sts_count+1;
410 static struct req_msg *stex_alloc_req(struct st_hba *hba)
412 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
414 ++hba->req_head;
415 hba->req_head %= hba->rq_count+1;
420 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
422 return (struct req_msg *)(hba->dma_mem +
423 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
426 static int stex_map_sg(struct st_hba *hba,
443 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
458 static int stex_ss_map_sg(struct st_hba *hba,
475 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
491 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
496 p = hba->copy_buffer;
499 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
507 p->bus = hba->pdev->bus->number;
508 p->slot = hba->pdev->devfn;
510 p->irq_vec = hba->pdev->irq;
511 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
513 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
519 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
523 hba->ccb[tag].req = req;
524 hba->out_req_cnt++;
526 writel(hba->req_head, hba->mmio_base + IMR0);
527 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
528 readl(hba->mmio_base + IDBL); /* flush */
532 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
540 hba->ccb[tag].req = req;
541 hba->out_req_cnt++;
543 cmd = hba->ccb[tag].cmd;
549 addr = hba->dma_handle + hba->req_head * hba->rq_size;
550 addr += (hba->ccb[tag].sg_count+4)/11;
553 ++hba->req_head;
554 hba->req_head %= hba->rq_count+1;
555 if (hba->cardtype == st_P3) {
556 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
557 writel(addr, hba->mmio_base + YH2I_REQ);
559 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
560 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
561 writel(addr, hba->mmio_base + YH2I_REQ);
562 readl(hba->mmio_base + YH2I_REQ); /* flush */
566 static void return_abnormal_state(struct st_hba *hba, int status)
572 spin_lock_irqsave(hba->host->host_lock, flags);
573 for (tag = 0; tag < hba->host->can_queue; tag++) {
574 ccb = &hba->ccb[tag];
585 spin_unlock_irqrestore(hba->host->host_lock, flags);
600 struct st_hba *hba;
609 hba = (struct st_hba *) &host->hostdata[0];
610 if (hba->mu_status == MU_STATE_NOCONNECT) {
615 if (unlikely(hba->mu_status != MU_STATE_STARTED))
641 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
679 .host_no = hba->host->host_no,
701 req = hba->alloc_rq(hba);
716 hba->ccb[tag].cmd = cmd;
717 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
718 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
720 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
721 hba->ccb[tag].sg_count = 0;
725 hba->send(hba, req, tag);
788 static void stex_check_cmd(struct st_hba *hba,
797 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
799 void __iomem *base = hba->mmio_base;
809 hba->status_head = readl(base + OMR1);
810 if (unlikely(hba->status_head > hba->sts_count)) {
812 pci_name(hba->pdev));
824 if (unlikely(hba->out_req_cnt <= 0 ||
825 (hba->mu_status == MU_STATE_RESETTING &&
826 hba->cardtype != st_yosemite))) {
827 hba->status_tail = hba->status_head;
831 while (hba->status_tail != hba->status_head) {
832 resp = stex_get_status(hba);
834 if (unlikely(tag >= hba->host->can_queue)) {
836 "(%s): invalid tag\n", pci_name(hba->pdev));
840 hba->out_req_cnt--;
841 ccb = &hba->ccb[tag];
842 if (unlikely(hba->wait_ccb == ccb))
843 hba->wait_ccb = NULL;
846 "(%s): lagging req\n", pci_name(hba->pdev));
854 pci_name(hba->pdev));
866 if (hba->cardtype == st_yosemite)
867 stex_check_cmd(hba, ccb, resp);
871 stex_controller_info(hba, ccb);
880 writel(hba->status_head, base + IMR1);
886 struct st_hba *hba = __hba;
887 void __iomem *base = hba->mmio_base;
891 spin_lock_irqsave(hba->host->host_lock, flags);
899 stex_mu_intr(hba, data);
900 spin_unlock_irqrestore(hba->host->host_lock, flags);
902 hba->cardtype == st_shasta))
903 queue_work(hba->work_q, &hba->reset_work);
907 spin_unlock_irqrestore(hba->host->host_lock, flags);
912 static void stex_ss_mu_intr(struct st_hba *hba)
922 if (unlikely(hba->out_req_cnt <= 0 ||
923 hba->mu_status == MU_STATE_RESETTING))
926 while (count < hba->sts_count) {
927 scratch = hba->scratch + hba->status_tail;
932 resp = hba->status_buffer + hba->status_tail;
935 ++hba->status_tail;
936 hba->status_tail %= hba->sts_count+1;
939 if (unlikely(tag >= hba->host->can_queue)) {
941 "(%s): invalid tag\n", pci_name(hba->pdev));
945 hba->out_req_cnt--;
946 ccb = &hba->ccb[tag];
947 if (unlikely(hba->wait_ccb == ccb))
948 hba->wait_ccb = NULL;
951 "(%s): lagging req\n", pci_name(hba->pdev));
967 pci_name(hba->pdev));
974 stex_check_cmd(hba, ccb, resp);
987 struct st_hba *hba = __hba;
988 void __iomem *base = hba->mmio_base;
992 spin_lock_irqsave(hba->host->host_lock, flags);
994 if (hba->cardtype == st_yel) {
999 stex_ss_mu_intr(hba);
1000 spin_unlock_irqrestore(hba->host->host_lock, flags);
1002 queue_work(hba->work_q, &hba->reset_work);
1013 stex_ss_mu_intr(hba);
1014 spin_unlock_irqrestore(hba->host->host_lock, flags);
1016 queue_work(hba->work_q, &hba->reset_work);
1021 spin_unlock_irqrestore(hba->host->host_lock, flags);
1026 static int stex_common_handshake(struct st_hba *hba)
1028 void __iomem *base = hba->mmio_base;
1042 pci_name(hba->pdev));
1055 if (hba->host->can_queue > data) {
1056 hba->host->can_queue = data;
1057 hba->host->cmd_per_lun = data;
1061 h = (struct handshake_frame *)hba->status_buffer;
1062 h->rb_phy = cpu_to_le64(hba->dma_handle);
1063 h->req_sz = cpu_to_le16(hba->rq_size);
1064 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1066 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1069 if (hba->extra_offset) {
1070 h->extra_offset = cpu_to_le32(hba->extra_offset);
1071 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1075 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1092 pci_name(hba->pdev));
1110 static int stex_ss_handshake(struct st_hba *hba)
1112 void __iomem *base = hba->mmio_base;
1122 if (hba->cardtype == st_yel) {
1128 pci_name(hba->pdev));
1140 pci_name(hba->pdev));
1148 msg_h = (struct st_msg_header *)hba->dma_mem;
1149 msg_h->handle = cpu_to_le64(hba->dma_handle);
1153 h->rb_phy = cpu_to_le64(hba->dma_handle);
1154 h->req_sz = cpu_to_le16(hba->rq_size);
1155 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1157 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1161 scratch_size = (hba->sts_count+1)*sizeof(u32);
1164 if (hba->cardtype == st_yel) {
1168 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1170 writel(hba->dma_handle, base + YH2I_REQ);
1177 if (hba->msi_lock == 0) {
1180 hba->msi_lock = 1;
1182 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1183 writel(hba->dma_handle, base + YH2I_REQ);
1187 scratch = hba->scratch;
1188 if (hba->cardtype == st_yel) {
1193 pci_name(hba->pdev));
1206 pci_name(hba->pdev));
1221 static int stex_handshake(struct st_hba *hba)
1227 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1228 err = stex_ss_handshake(hba);
1230 err = stex_common_handshake(hba);
1231 spin_lock_irqsave(hba->host->host_lock, flags);
1232 mu_status = hba->mu_status;
1234 hba->req_head = 0;
1235 hba->req_tail = 0;
1236 hba->status_head = 0;
1237 hba->status_tail = 0;
1238 hba->out_req_cnt = 0;
1239 hba->mu_status = MU_STATE_STARTED;
1241 hba->mu_status = MU_STATE_FAILED;
1243 wake_up_all(&hba->reset_waitq);
1244 spin_unlock_irqrestore(hba->host->host_lock, flags);
1251 struct st_hba *hba = (struct st_hba *)host->hostdata;
1260 base = hba->mmio_base;
1263 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1264 hba->wait_ccb = &hba->ccb[tag];
1268 if (hba->cardtype == st_yel) {
1274 stex_ss_mu_intr(hba);
1275 } else if (hba->cardtype == st_P3) {
1283 stex_ss_mu_intr(hba);
1291 stex_mu_intr(hba, data);
1293 if (hba->wait_ccb == NULL) {
1295 "(%s): lost interrupt\n", pci_name(hba->pdev));
1301 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1302 hba->wait_ccb = NULL;
1309 static void stex_hard_reset(struct st_hba *hba)
1317 pci_read_config_dword(hba->pdev, i * 4,
1318 &hba->pdev->saved_config_space[i]);
1322 bus = hba->pdev->bus;
1336 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1344 pci_write_config_dword(hba->pdev, i * 4,
1345 hba->pdev->saved_config_space[i]);
1348 static int stex_yos_reset(struct st_hba *hba)
1354 base = hba->mmio_base;
1358 while (hba->out_req_cnt > 0) {
1361 "(%s): reset timeout\n", pci_name(hba->pdev));
1368 spin_lock_irqsave(hba->host->host_lock, flags);
1370 hba->mu_status = MU_STATE_FAILED;
1372 hba->mu_status = MU_STATE_STARTED;
1373 wake_up_all(&hba->reset_waitq);
1374 spin_unlock_irqrestore(hba->host->host_lock, flags);
1379 static void stex_ss_reset(struct st_hba *hba)
1381 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1382 readl(hba->mmio_base + YH2I_INT);
1386 static void stex_p3_reset(struct st_hba *hba)
1388 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1392 static int stex_do_reset(struct st_hba *hba)
1397 spin_lock_irqsave(hba->host->host_lock, flags);
1398 if (hba->mu_status == MU_STATE_STARTING) {
1399 spin_unlock_irqrestore(hba->host->host_lock, flags);
1401 pci_name(hba->pdev));
1404 while (hba->mu_status == MU_STATE_RESETTING) {
1405 spin_unlock_irqrestore(hba->host->host_lock, flags);
1406 wait_event_timeout(hba->reset_waitq,
1407 hba->mu_status != MU_STATE_RESETTING,
1409 spin_lock_irqsave(hba->host->host_lock, flags);
1410 mu_status = hba->mu_status;
1414 spin_unlock_irqrestore(hba->host->host_lock, flags);
1418 hba->mu_status = MU_STATE_RESETTING;
1419 spin_unlock_irqrestore(hba->host->host_lock, flags);
1421 if (hba->cardtype == st_yosemite)
1422 return stex_yos_reset(hba);
1424 if (hba->cardtype == st_shasta)
1425 stex_hard_reset(hba);
1426 else if (hba->cardtype == st_yel)
1427 stex_ss_reset(hba);
1428 else if (hba->cardtype == st_P3)
1429 stex_p3_reset(hba);
1431 return_abnormal_state(hba, DID_RESET);
1433 if (stex_handshake(hba) == 0)
1437 pci_name(hba->pdev));
1443 struct st_hba *hba;
1445 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1450 return stex_do_reset(hba) ? FAILED : SUCCESS;
1455 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1457 stex_do_reset(hba);
1618 static int stex_request_irq(struct st_hba *hba)
1620 struct pci_dev *pdev = hba->pdev;
1623 if (msi || hba->cardtype == st_P3) {
1630 hba->msi_enabled = 1;
1632 hba->msi_enabled = 0;
1635 (hba->cardtype == st_yel || hba->cardtype == st_P3) ?
1636 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1639 if (hba->msi_enabled)
1645 static void stex_free_irq(struct st_hba *hba)
1647 struct pci_dev *pdev = hba->pdev;
1649 free_irq(pdev->irq, hba);
1650 if (hba->msi_enabled)
1656 struct st_hba *hba;
1680 hba = (struct st_hba *)host->hostdata;
1681 memset(hba, 0, sizeof(struct st_hba));
1690 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1691 if ( !hba->mmio_base) {
1707 hba->cardtype = (unsigned int) id->driver_data;
1708 ci = &stex_card_info[hba->cardtype];
1724 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1725 hba->supports_pm = 1;
1729 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1732 hba->dma_size = cp_offset + sizeof(struct st_frame);
1733 if (hba->cardtype == st_seq ||
1734 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1735 hba->extra_offset = hba->dma_size;
1736 hba->dma_size += ST_ADDITIONAL_MEM;
1738 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1739 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1740 if (!hba->dma_mem) {
1742 if (hba->cardtype == st_seq ||
1743 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1747 hba->dma_size = hba->extra_offset
1749 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1750 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1753 if (!hba->dma_mem) {
1761 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1762 if (!hba->ccb) {
1769 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1770 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1771 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1772 hba->copy_buffer = hba->dma_mem + cp_offset;
1773 hba->rq_count = ci->rq_count;
1774 hba->rq_size = ci->rq_size;
1775 hba->sts_count = ci->sts_count;
1776 hba->alloc_rq = ci->alloc_rq;
1777 hba->map_sg = ci->map_sg;
1778 hba->send = ci->send;
1779 hba->mu_status = MU_STATE_STARTING;
1780 hba->msi_lock = 0;
1782 if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1794 hba->host = host;
1795 hba->pdev = pdev;
1796 init_waitqueue_head(&hba->reset_waitq);
1798 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1800 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1801 if (!hba->work_q) {
1807 INIT_WORK(&hba->reset_work, stex_reset_work);
1809 err = stex_request_irq(hba);
1816 err = stex_handshake(hba);
1820 pci_set_drvdata(pdev, hba);
1834 stex_free_irq(hba);
1836 destroy_workqueue(hba->work_q);
1838 kfree(hba->ccb);
1840 dma_free_coherent(&pdev->dev, hba->dma_size,
1841 hba->dma_mem, hba->dma_handle);
1843 iounmap(hba->mmio_base);
1854 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1862 spin_lock_irqsave(hba->host->host_lock, flags);
1864 if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
1865 hba->supports_pm == 1) {
1867 spin_unlock_irqrestore(hba->host->host_lock, flags);
1871 req = hba->alloc_rq(hba);
1872 if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
1874 memset(msg_h, 0, hba->rq_size);
1876 memset(req, 0, hba->rq_size);
1878 if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
1879 || hba->cardtype == st_P3)
1885 } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1897 hba->ccb[tag].cmd = NULL;
1898 hba->ccb[tag].sg_count = 0;
1899 hba->ccb[tag].sense_bufflen = 0;
1900 hba->ccb[tag].sense_buffer = NULL;
1901 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1902 hba->send(hba, req, tag);
1903 spin_unlock_irqrestore(hba->host->host_lock, flags);
1905 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1907 hba->ccb[tag].req_type = 0;
1908 hba->mu_status = MU_STATE_STOP;
1913 hba->mu_status = MU_STATE_STOP;
1916 static void stex_hba_free(struct st_hba *hba)
1918 stex_free_irq(hba);
1920 destroy_workqueue(hba->work_q);
1922 iounmap(hba->mmio_base);
1924 pci_release_regions(hba->pdev);
1926 kfree(hba->ccb);
1928 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1929 hba->dma_mem, hba->dma_handle);
1934 struct st_hba *hba = pci_get_drvdata(pdev);
1936 hba->mu_status = MU_STATE_NOCONNECT;
1937 return_abnormal_state(hba, DID_NO_CONNECT);
1938 scsi_remove_host(hba->host);
1940 scsi_block_requests(hba->host);
1942 stex_hba_free(hba);
1944 scsi_host_put(hba->host);
1953 struct st_hba *hba = pci_get_drvdata(pdev);
1955 if (hba->supports_pm == 0) {
1956 stex_hba_stop(hba, ST_IGNORED);
1957 } else if (hba->supports_pm == 1 && S6flag) {
1959 stex_hba_stop(hba, ST_S6);
1961 stex_hba_stop(hba, ST_S5);
1964 static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
1970 hba->msi_lock = 0;
1979 struct st_hba *hba = pci_get_drvdata(pdev);
1981 if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1982 && hba->supports_pm == 1)
1983 stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
1985 stex_hba_stop(hba, ST_IGNORED);
1991 struct st_hba *hba = pci_get_drvdata(pdev);
1993 hba->mu_status = MU_STATE_STARTING;
1994 stex_handshake(hba);