Lines Matching defs:skdev

282 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
284 u32 val = readl(skdev->mem_map[1] + offset);
286 if (unlikely(skdev->dbg_level >= 2))
287 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
291 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
294 writel(val, skdev->mem_map[1] + offset);
295 if (unlikely(skdev->dbg_level >= 2))
296 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
299 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
302 writeq(val, skdev->mem_map[1] + offset);
303 if (unlikely(skdev->dbg_level >= 2))
304 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
353 static void skd_destruct(struct skd_device *skdev);
355 static void skd_send_fitmsg(struct skd_device *skdev,
357 static void skd_send_special_fitmsg(struct skd_device *skdev,
359 static bool skd_preop_sg_list(struct skd_device *skdev,
361 static void skd_postop_sg_list(struct skd_device *skdev,
364 static void skd_restart_device(struct skd_device *skdev);
365 static int skd_quiesce_dev(struct skd_device *skdev);
366 static int skd_unquiesce_dev(struct skd_device *skdev);
367 static void skd_disable_interrupts(struct skd_device *skdev);
368 static void skd_isr_fwstate(struct skd_device *skdev);
369 static void skd_recover_requests(struct skd_device *skdev);
370 static void skd_soft_reset(struct skd_device *skdev);
374 static void skd_log_skdev(struct skd_device *skdev, const char *event);
375 static void skd_log_skreq(struct skd_device *skdev,
391 static int skd_in_flight(struct skd_device *skdev)
395 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
444 struct skd_device *skdev = q->queuedata;
446 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
448 skd_log_skdev(skdev, "req_not_online");
449 switch (skdev->state) {
482 struct skd_device *skdev = q->queuedata;
493 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
508 dev_dbg(&skdev->pdev->dev,
521 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
522 dev_dbg(&skdev->pdev->dev, "error Out\n");
528 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
537 spin_lock_irqsave(&skdev->lock, flags);
538 skmsg = skdev->skmsg;
541 skmsg = &skdev->skmsg_table[tag];
542 skdev->skmsg = skmsg;
580 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
581 skd_in_flight(skdev));
587 skd_send_fitmsg(skdev, skmsg);
591 skd_send_fitmsg(skdev, skmsg);
592 skdev->skmsg = NULL;
594 spin_unlock_irqrestore(&skdev->lock, flags);
603 struct skd_device *skdev = req->q->queuedata;
605 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
618 static bool skd_preop_sg_list(struct skd_device *skdev,
631 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
639 n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
643 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
662 if (unlikely(skdev->dbg_level > 1)) {
663 dev_dbg(&skdev->pdev->dev,
669 dev_dbg(&skdev->pdev->dev,
679 static void skd_postop_sg_list(struct skd_device *skdev,
689 dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
699 static void skd_timer_tick_not_online(struct skd_device *skdev);
703 struct skd_device *skdev = container_of(work, typeof(*skdev),
711 blk_mq_start_hw_queues(skdev->queue);
716 struct skd_device *skdev = from_timer(skdev, t, timer);
720 if (skdev->state == SKD_DRVR_STATE_FAULT)
726 spin_lock_irqsave(&skdev->lock, reqflags);
728 state = SKD_READL(skdev, FIT_STATUS);
730 if (state != skdev->drive_state)
731 skd_isr_fwstate(skdev);
733 if (skdev->state != SKD_DRVR_STATE_ONLINE)
734 skd_timer_tick_not_online(skdev);
736 mod_timer(&skdev->timer, (jiffies + HZ));
738 spin_unlock_irqrestore(&skdev->lock, reqflags);
741 static void skd_timer_tick_not_online(struct skd_device *skdev)
743 switch (skdev->state) {
748 dev_dbg(&skdev->pdev->dev,
750 skdev->drive_state, skdev->state);
754 if (skdev->timer_countdown > 0) {
755 skdev->timer_countdown--;
758 skd_recover_requests(skdev);
764 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
765 skdev->state, skdev->timer_countdown);
766 if (skdev->timer_countdown > 0) {
767 skdev->timer_countdown--;
770 dev_dbg(&skdev->pdev->dev,
772 skdev->state, skdev->timer_countdown);
773 skd_restart_device(skdev);
778 if (skdev->timer_countdown > 0) {
779 skdev->timer_countdown--;
784 skdev->state = SKD_DRVR_STATE_FAULT;
786 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
787 skdev->drive_state);
791 schedule_work(&skdev->start_queue);
792 skdev->gendisk_on = -1;
793 wake_up_interruptible(&skdev->waitq);
805 if (skdev->timer_countdown > 0) {
806 skdev->timer_countdown--;
811 skdev->state = SKD_DRVR_STATE_FAULT;
812 dev_err(&skdev->pdev->dev,
814 skdev->drive_state);
828 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
829 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
830 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
834 skd_recover_requests(skdev);
836 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
837 skdev->drive_state);
838 pci_disable_device(skdev->pdev);
839 skd_disable_interrupts(skdev);
840 skd_recover_requests(skdev);
845 schedule_work(&skdev->start_queue);
846 skdev->gendisk_on = -1;
847 wake_up_interruptible(&skdev->waitq);
860 static int skd_start_timer(struct skd_device *skdev)
864 timer_setup(&skdev->timer, skd_timer_tick, 0);
866 rc = mod_timer(&skdev->timer, (jiffies + HZ));
868 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
872 static void skd_kill_timer(struct skd_device *skdev)
874 del_timer_sync(&skdev->timer);
883 static int skd_format_internal_skspcl(struct skd_device *skdev)
885 struct skd_special_context *skspcl = &skdev->internal_skspcl;
911 static void skd_send_internal_skspcl(struct skd_device *skdev,
989 skd_send_special_fitmsg(skdev, skspcl);
992 static void skd_refresh_device_data(struct skd_device *skdev)
994 struct skd_special_context *skspcl = &skdev->internal_skspcl;
996 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
999 static int skd_chk_read_buf(struct skd_device *skdev,
1013 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1019 dev_err(&skdev->pdev->dev,
1025 static void skd_complete_internal(struct skd_device *skdev,
1035 lockdep_assert_held(&skdev->lock);
1037 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1039 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1041 dma_sync_single_for_cpu(&skdev->pdev->dev,
1051 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1057 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1060 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1062 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1063 dev_dbg(&skdev->pdev->dev,
1065 skdev->state);
1068 dev_dbg(&skdev->pdev->dev,
1070 skd_send_internal_skspcl(skdev, skspcl,
1077 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1079 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1080 dev_dbg(&skdev->pdev->dev,
1082 skdev->state);
1085 dev_dbg(&skdev->pdev->dev,
1087 skd_send_internal_skspcl(skdev, skspcl,
1094 if (skd_chk_read_buf(skdev, skspcl) == 0)
1095 skd_send_internal_skspcl(skdev, skspcl,
1098 dev_err(&skdev->pdev->dev,
1100 skdev->connect_retries);
1101 if (skdev->connect_retries <
1103 skdev->connect_retries++;
1104 skd_soft_reset(skdev);
1106 dev_err(&skdev->pdev->dev,
1113 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1114 dev_dbg(&skdev->pdev->dev,
1116 skdev->state);
1119 dev_dbg(&skdev->pdev->dev,
1121 skd_send_internal_skspcl(skdev, skspcl,
1127 skdev->read_cap_is_valid = 0;
1129 skdev->read_cap_last_lba =
1132 skdev->read_cap_blocksize =
1136 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1137 skdev->read_cap_last_lba,
1138 skdev->read_cap_blocksize);
1140 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1142 skdev->read_cap_is_valid = 1;
1144 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1147 skdev->read_cap_last_lba = ~0;
1148 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1149 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1150 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1152 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1153 skd_send_internal_skspcl(skdev, skspcl,
1159 skdev->inquiry_is_valid = 0;
1161 skdev->inquiry_is_valid = 1;
1164 skdev->inq_serial_num[i] = buf[i + 4];
1165 skdev->inq_serial_num[12] = 0;
1168 if (skd_unquiesce_dev(skdev) < 0)
1169 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1171 skdev->connect_retries = 0;
1176 skdev->sync_done = 1;
1178 skdev->sync_done = -1;
1179 wake_up_interruptible(&skdev->waitq);
1193 static void skd_send_fitmsg(struct skd_device *skdev,
1198 dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
1199 &skmsg->mb_dma_address, skd_in_flight(skdev));
1200 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
1205 if (unlikely(skdev->dbg_level > 1)) {
1209 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1230 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1236 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1239 static void skd_send_special_fitmsg(struct skd_device *skdev,
1246 if (unlikely(skdev->dbg_level > 1)) {
1251 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1257 dev_dbg(&skdev->pdev->dev,
1265 dev_dbg(&skdev->pdev->dev,
1279 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1281 dma_sync_single_for_device(&skdev->pdev->dev,
1285 dma_sync_single_for_device(&skdev->pdev->dev,
1293 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1302 static void skd_complete_other(struct skd_device *skdev,
1352 skd_check_status(struct skd_device *skdev,
1357 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1360 dev_dbg(&skdev->pdev->dev,
1390 dev_err(&skdev->pdev->dev,
1401 dev_dbg(&skdev->pdev->dev, "status check: error\n");
1405 dev_dbg(&skdev->pdev->dev, "status check good default\n");
1409 static void skd_resolve_req_exception(struct skd_device *skdev,
1415 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1424 skd_log_skreq(skdev, skreq, "retry(busy)");
1426 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1427 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1428 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1429 skd_quiesce_dev(skdev);
1434 skd_log_skreq(skdev, skreq, "retry");
1449 static void skd_release_skreq(struct skd_device *skdev,
1458 static int skd_isr_completion_posted(struct skd_device *skdev,
1475 lockdep_assert_held(&skdev->lock);
1478 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1480 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1486 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1488 dev_dbg(&skdev->pdev->dev,
1490 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
1491 cmp_cntxt, cmp_status, skd_in_flight(skdev),
1492 cmp_bytes, skdev->proto_ver);
1494 if (cmp_cycle != skdev->skcomp_cycle) {
1495 dev_dbg(&skdev->pdev->dev, "end of completions\n");
1502 skdev->skcomp_ix++;
1503 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1504 skdev->skcomp_ix = 0;
1505 skdev->skcomp_cycle++;
1517 if (tag >= skdev->num_req_context) {
1521 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1523 skd_complete_other(skdev, skcmp, skerr);
1527 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
1537 dev_err(&skdev->pdev->dev,
1549 skd_log_check_status(skdev, cmp_status, skerr->key,
1555 skd_postop_sg_list(skdev, skreq);
1557 skd_release_skreq(skdev, skreq);
1567 skd_resolve_req_exception(skdev, skreq, rq);
1579 if (skdev->state == SKD_DRVR_STATE_PAUSING &&
1580 skd_in_flight(skdev) == 0) {
1581 skdev->state = SKD_DRVR_STATE_PAUSED;
1582 wake_up_interruptible(&skdev->waitq);
1588 static void skd_complete_other(struct skd_device *skdev,
1597 lockdep_assert_held(&skdev->lock);
1603 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1622 skspcl = &skdev->internal_skspcl;
1625 skd_complete_internal(skdev,
1650 static void skd_reset_skcomp(struct skd_device *skdev)
1652 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
1654 skdev->skcomp_ix = 0;
1655 skdev->skcomp_cycle = 1;
1665 struct skd_device *skdev =
1670 spin_lock_irqsave(&skdev->lock, flags);
1676 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
1677 schedule_work(&skdev->start_queue);
1679 spin_unlock_irqrestore(&skdev->lock, flags);
1682 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1687 struct skd_device *skdev = ptr;
1694 spin_lock(&skdev->lock);
1697 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1702 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1714 if (likely (skdev->state
1722 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1724 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1725 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1733 skd_isr_completion_posted(skdev,
1738 skd_isr_fwstate(skdev);
1739 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1740 skdev->state ==
1742 spin_unlock(&skdev->lock);
1748 skd_isr_msg_from_dev(skdev);
1753 schedule_work(&skdev->start_queue);
1756 schedule_work(&skdev->completion_worker);
1758 schedule_work(&skdev->start_queue);
1760 spin_unlock(&skdev->lock);
1765 static void skd_drive_fault(struct skd_device *skdev)
1767 skdev->state = SKD_DRVR_STATE_FAULT;
1768 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
1771 static void skd_drive_disappeared(struct skd_device *skdev)
1773 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
1774 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
1777 static void skd_isr_fwstate(struct skd_device *skdev)
1782 int prev_driver_state = skdev->state;
1784 sense = SKD_READL(skdev, FIT_STATUS);
1787 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1788 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1791 skdev->drive_state = state;
1793 switch (skdev->drive_state) {
1795 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1796 skd_disable_interrupts(skdev);
1799 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
1800 skd_recover_requests(skdev);
1801 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1802 skdev->timer_countdown = SKD_STARTING_TIMO;
1803 skdev->state = SKD_DRVR_STATE_STARTING;
1804 skd_soft_reset(skdev);
1808 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1809 skdev->last_mtd = mtd;
1813 skdev->cur_max_queue_depth = skd_max_queue_depth;
1814 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1815 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1817 skdev->queue_low_water_mark =
1818 skdev->cur_max_queue_depth * 2 / 3 + 1;
1819 if (skdev->queue_low_water_mark < 1)
1820 skdev->queue_low_water_mark = 1;
1821 dev_info(&skdev->pdev->dev,
1823 skdev->cur_max_queue_depth,
1824 skdev->dev_max_queue_depth,
1825 skdev->queue_low_water_mark);
1827 skd_refresh_device_data(skdev);
1831 skdev->state = SKD_DRVR_STATE_BUSY;
1832 skdev->timer_countdown = SKD_BUSY_TIMO;
1833 skd_quiesce_dev(skdev);
1839 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1840 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1841 schedule_work(&skdev->start_queue);
1844 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1845 skdev->timer_countdown = SKD_BUSY_TIMO;
1848 skdev->state = SKD_DRVR_STATE_IDLE;
1851 switch (skdev->state) {
1857 skdev->state = SKD_DRVR_STATE_RESTARTING;
1862 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1863 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1864 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1873 skd_drive_fault(skdev);
1874 skd_recover_requests(skdev);
1875 schedule_work(&skdev->start_queue);
1880 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1882 skd_drive_disappeared(skdev);
1883 skd_recover_requests(skdev);
1884 schedule_work(&skdev->start_queue);
1892 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1894 skd_skdev_state_to_str(skdev->state), skdev->state);
1899 struct skd_device *const skdev = data;
1905 skd_log_skreq(skdev, skreq, "recover");
1909 skd_postop_sg_list(skdev, skreq);
1917 static void skd_recover_requests(struct skd_device *skdev)
1919 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
1922 static void skd_isr_msg_from_dev(struct skd_device *skdev)
1928 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1930 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1931 skdev->last_mtd);
1934 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1939 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1941 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
1942 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1943 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
1944 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1945 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
1946 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1947 skd_soft_reset(skdev);
1951 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1952 skdev->last_mtd = mtd;
1956 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1959 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1960 skdev->last_mtd = mtd;
1964 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1966 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1967 skdev->last_mtd = mtd;
1971 skd_reset_skcomp(skdev);
1972 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1973 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1974 skdev->last_mtd = mtd;
1979 skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
1980 data = skdev->connect_time_stamp & 0xFFFF;
1982 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1983 skdev->last_mtd = mtd;
1987 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
1988 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
1990 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1991 skdev->last_mtd = mtd;
1995 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
1997 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1998 skdev->last_mtd = mtd;
2000 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
2001 skdev->connect_time_stamp, skdev->drive_jiffies);
2005 skdev->last_mtd = 0;
2016 static void skd_disable_interrupts(struct skd_device *skdev)
2020 sense = SKD_READL(skdev, FIT_CONTROL);
2022 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2023 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
2028 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2031 static void skd_enable_interrupts(struct skd_device *skdev)
2041 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2042 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
2044 val = SKD_READL(skdev, FIT_CONTROL);
2046 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2047 SKD_WRITEL(skdev, val, FIT_CONTROL);
2056 static void skd_soft_reset(struct skd_device *skdev)
2060 val = SKD_READL(skdev, FIT_CONTROL);
2062 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2063 SKD_WRITEL(skdev, val, FIT_CONTROL);
2066 static void skd_start_device(struct skd_device *skdev)
2072 spin_lock_irqsave(&skdev->lock, flags);
2075 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2077 sense = SKD_READL(skdev, FIT_STATUS);
2079 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
2082 skdev->drive_state = state;
2083 skdev->last_mtd = 0;
2085 skdev->state = SKD_DRVR_STATE_STARTING;
2086 skdev->timer_countdown = SKD_STARTING_TIMO;
2088 skd_enable_interrupts(skdev);
2090 switch (skdev->drive_state) {
2092 dev_err(&skdev->pdev->dev, "Drive offline...\n");
2096 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
2097 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2098 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2102 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
2103 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2104 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2108 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
2109 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2110 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2115 skd_soft_reset(skdev);
2119 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
2120 skdev->state = SKD_DRVR_STATE_BUSY;
2121 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2125 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
2133 skd_drive_fault(skdev);
2135 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2136 schedule_work(&skdev->start_queue);
2137 skdev->gendisk_on = -1;
2138 wake_up_interruptible(&skdev->waitq);
2144 skd_drive_disappeared(skdev);
2146 dev_dbg(&skdev->pdev->dev,
2148 schedule_work(&skdev->start_queue);
2149 skdev->gendisk_on = -1;
2150 wake_up_interruptible(&skdev->waitq);
2154 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2155 skdev->drive_state);
2159 state = SKD_READL(skdev, FIT_CONTROL);
2160 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
2162 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2163 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
2165 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2166 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
2168 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2169 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
2171 state = SKD_READL(skdev, FIT_HW_VERSION);
2172 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
2174 spin_unlock_irqrestore(&skdev->lock, flags);
2177 static void skd_stop_device(struct skd_device *skdev)
2180 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2184 spin_lock_irqsave(&skdev->lock, flags);
2186 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
2187 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
2192 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
2196 skdev->state = SKD_DRVR_STATE_SYNCING;
2197 skdev->sync_done = 0;
2199 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2201 spin_unlock_irqrestore(&skdev->lock, flags);
2203 wait_event_interruptible_timeout(skdev->waitq,
2204 (skdev->sync_done), (10 * HZ));
2206 spin_lock_irqsave(&skdev->lock, flags);
2208 switch (skdev->sync_done) {
2210 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
2213 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
2216 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
2220 skdev->state = SKD_DRVR_STATE_STOPPING;
2221 spin_unlock_irqrestore(&skdev->lock, flags);
2223 skd_kill_timer(skdev);
2225 spin_lock_irqsave(&skdev->lock, flags);
2226 skd_disable_interrupts(skdev);
2230 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2231 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2233 spin_unlock_irqrestore(&skdev->lock, flags);
2238 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2246 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2251 static void skd_restart_device(struct skd_device *skdev)
2256 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2258 state = SKD_READL(skdev, FIT_STATUS);
2260 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
2263 skdev->drive_state = state;
2264 skdev->last_mtd = 0;
2266 skdev->state = SKD_DRVR_STATE_RESTARTING;
2267 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2269 skd_soft_reset(skdev);
2273 static int skd_quiesce_dev(struct skd_device *skdev)
2277 switch (skdev->state) {
2280 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2281 blk_mq_stop_hw_queues(skdev->queue);
2293 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2294 skdev->state);
2300 static int skd_unquiesce_dev(struct skd_device *skdev)
2302 int prev_driver_state = skdev->state;
2304 skd_log_skdev(skdev, "unquiesce");
2305 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2306 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
2309 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2318 skdev->state = SKD_DRVR_STATE_BUSY;
2319 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
2327 switch (skdev->state) {
2337 skdev->state = SKD_DRVR_STATE_ONLINE;
2338 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2340 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2341 skdev->state);
2342 dev_dbg(&skdev->pdev->dev,
2344 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2345 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
2346 schedule_work(&skdev->start_queue);
2347 skdev->gendisk_on = 1;
2348 wake_up_interruptible(&skdev->waitq);
2353 dev_dbg(&skdev->pdev->dev,
2355 skdev->state);
2369 struct skd_device *skdev = skd_host_data;
2372 spin_lock_irqsave(&skdev->lock, flags);
2373 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2374 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2375 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2376 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2377 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2378 spin_unlock_irqrestore(&skdev->lock, flags);
2384 struct skd_device *skdev = skd_host_data;
2387 spin_lock_irqsave(&skdev->lock, flags);
2388 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2389 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2390 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2391 skd_isr_fwstate(skdev);
2392 spin_unlock_irqrestore(&skdev->lock, flags);
2398 struct skd_device *skdev = skd_host_data;
2403 spin_lock_irqsave(&skdev->lock, flags);
2404 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2405 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2406 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2407 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2410 schedule_work(&skdev->start_queue);
2413 schedule_work(&skdev->completion_worker);
2415 schedule_work(&skdev->start_queue);
2417 spin_unlock_irqrestore(&skdev->lock, flags);
2424 struct skd_device *skdev = skd_host_data;
2427 spin_lock_irqsave(&skdev->lock, flags);
2428 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2429 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2430 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2431 skd_isr_msg_from_dev(skdev);
2432 spin_unlock_irqrestore(&skdev->lock, flags);
2438 struct skd_device *skdev = skd_host_data;
2441 spin_lock_irqsave(&skdev->lock, flags);
2442 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2443 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2444 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2445 spin_unlock_irqrestore(&skdev->lock, flags);
2484 static int skd_acquire_msix(struct skd_device *skdev)
2487 struct pci_dev *pdev = skdev->pdev;
2492 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
2496 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2498 if (!skdev->msix_entries) {
2500 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
2506 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2509 "%s%d-msix %s", DRV_NAME, skdev->devno,
2512 rc = devm_request_irq(&skdev->pdev->dev,
2513 pci_irq_vector(skdev->pdev, i),
2515 qentry->isr_name, skdev);
2517 dev_err(&skdev->pdev->dev,
2524 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2530 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
2532 kfree(skdev->msix_entries);
2533 skdev->msix_entries = NULL;
2537 static int skd_acquire_irq(struct skd_device *skdev)
2539 struct pci_dev *pdev = skdev->pdev;
2544 rc = skd_acquire_msix(skdev);
2548 dev_err(&skdev->pdev->dev,
2552 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2553 skdev->devno);
2559 dev_err(&skdev->pdev->dev,
2566 skdev->isr_name, skdev);
2569 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2577 static void skd_release_irq(struct skd_device *skdev)
2579 struct pci_dev *pdev = skdev->pdev;
2581 if (skdev->msix_entries) {
2586 skdev);
2589 kfree(skdev->msix_entries);
2590 skdev->msix_entries = NULL;
2592 devm_free_irq(&pdev->dev, pdev->irq, skdev);
2604 static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2608 struct device *dev = &skdev->pdev->dev;
2623 static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2630 dma_unmap_single(&skdev->pdev->dev, dma_handle,
2635 static int skd_cons_skcomp(struct skd_device *skdev)
2640 dev_dbg(&skdev->pdev->dev,
2644 skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2645 &skdev->cq_dma_address, GFP_KERNEL);
2652 skdev->skcomp_table = skcomp;
2653 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2661 static int skd_cons_skmsg(struct skd_device *skdev)
2666 dev_dbg(&skdev->pdev->dev,
2668 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2669 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
2671 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2674 if (skdev->skmsg_table == NULL) {
2679 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2682 skmsg = &skdev->skmsg_table[i];
2686 skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
2705 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2711 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2730 static void skd_free_sg_list(struct skd_device *skdev,
2737 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2744 struct skd_device *skdev = set->driver_data;
2750 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2759 struct skd_device *skdev = set->driver_data;
2762 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
2765 static int skd_cons_sksb(struct skd_device *skdev)
2770 skspcl = &skdev->internal_skspcl;
2775 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2784 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2792 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2799 if (!skd_format_internal_skspcl(skdev)) {
2816 static int skd_cons_disk(struct skd_device *skdev)
2829 skdev->disk = disk;
2830 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2832 disk->major = skdev->major;
2833 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2835 disk->private_data = skdev;
2837 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2838 skdev->tag_set.ops = &skd_mq_ops;
2839 skdev->tag_set.nr_hw_queues = 1;
2840 skdev->tag_set.queue_depth = skd_max_queue_depth;
2841 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2842 skdev->sgs_per_request * sizeof(struct scatterlist);
2843 skdev->tag_set.numa_node = NUMA_NO_NODE;
2844 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2846 skdev->tag_set.driver_data = skdev;
2847 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2850 q = blk_mq_init_queue(&skdev->tag_set);
2852 blk_mq_free_tag_set(&skdev->tag_set);
2856 q->queuedata = skdev;
2858 skdev->queue = q;
2862 blk_queue_max_segments(q, skdev->sgs_per_request);
2873 spin_lock_irqsave(&skdev->lock, flags);
2874 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2875 blk_mq_stop_hw_queues(skdev->queue);
2876 spin_unlock_irqrestore(&skdev->lock, flags);
2887 struct skd_device *skdev;
2892 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
2894 if (!skdev) {
2899 skdev->state = SKD_DRVR_STATE_LOAD;
2900 skdev->pdev = pdev;
2901 skdev->devno = skd_next_devno++;
2902 skdev->major = blk_major;
2903 skdev->dev_max_queue_depth = 0;
2905 skdev->num_req_context = skd_max_queue_depth;
2906 skdev->num_fitmsg_context = skd_max_queue_depth;
2907 skdev->cur_max_queue_depth = 1;
2908 skdev->queue_low_water_mark = 1;
2909 skdev->proto_ver = 99;
2910 skdev->sgs_per_request = skd_sgs_per_request;
2911 skdev->dbg_level = skd_dbg_level;
2913 spin_lock_init(&skdev->lock);
2915 INIT_WORK(&skdev->start_queue, skd_start_queue);
2916 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
2919 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2921 if (!skdev->msgbuf_cache)
2923 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2925 kmem_cache_size(skdev->msgbuf_cache), size);
2927 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2929 if (!skdev->sglist_cache)
2931 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2933 kmem_cache_size(skdev->sglist_cache), size);
2935 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2937 if (!skdev->databuf_cache)
2939 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2941 kmem_cache_size(skdev->databuf_cache), size);
2943 dev_dbg(&skdev->pdev->dev, "skcomp\n");
2944 rc = skd_cons_skcomp(skdev);
2948 dev_dbg(&skdev->pdev->dev, "skmsg\n");
2949 rc = skd_cons_skmsg(skdev);
2953 dev_dbg(&skdev->pdev->dev, "sksb\n");
2954 rc = skd_cons_sksb(skdev);
2958 dev_dbg(&skdev->pdev->dev, "disk\n");
2959 rc = skd_cons_disk(skdev);
2963 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
2964 return skdev;
2967 dev_dbg(&skdev->pdev->dev, "construct failed\n");
2968 skd_destruct(skdev);
2978 static void skd_free_skcomp(struct skd_device *skdev)
2980 if (skdev->skcomp_table)
2981 dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2982 skdev->skcomp_table, skdev->cq_dma_address);
2984 skdev->skcomp_table = NULL;
2985 skdev->cq_dma_address = 0;
2988 static void skd_free_skmsg(struct skd_device *skdev)
2992 if (skdev->skmsg_table == NULL)
2995 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2998 skmsg = &skdev->skmsg_table[i];
3001 dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
3009 kfree(skdev->skmsg_table);
3010 skdev->skmsg_table = NULL;
3013 static void skd_free_sksb(struct skd_device *skdev)
3015 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3017 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3023 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3029 skd_free_sg_list(skdev, skspcl->req.sksg_list,
3036 static void skd_free_disk(struct skd_device *skdev)
3038 struct gendisk *disk = skdev->disk;
3043 if (skdev->queue) {
3044 blk_cleanup_queue(skdev->queue);
3045 skdev->queue = NULL;
3050 if (skdev->tag_set.tags)
3051 blk_mq_free_tag_set(&skdev->tag_set);
3054 skdev->disk = NULL;
3057 static void skd_destruct(struct skd_device *skdev)
3059 if (skdev == NULL)
3062 cancel_work_sync(&skdev->start_queue);
3064 dev_dbg(&skdev->pdev->dev, "disk\n");
3065 skd_free_disk(skdev);
3067 dev_dbg(&skdev->pdev->dev, "sksb\n");
3068 skd_free_sksb(skdev);
3070 dev_dbg(&skdev->pdev->dev, "skmsg\n");
3071 skd_free_skmsg(skdev);
3073 dev_dbg(&skdev->pdev->dev, "skcomp\n");
3074 skd_free_skcomp(skdev);
3076 kmem_cache_destroy(skdev->databuf_cache);
3077 kmem_cache_destroy(skdev->sglist_cache);
3078 kmem_cache_destroy(skdev->msgbuf_cache);
3080 dev_dbg(&skdev->pdev->dev, "skdev\n");
3081 kfree(skdev);
3092 struct skd_device *skdev;
3095 skdev = bdev->bd_disk->private_data;
3097 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3100 if (skdev->read_cap_is_valid) {
3101 capacity = get_capacity(skdev->disk);
3111 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
3113 dev_dbg(&skdev->pdev->dev, "add_disk\n");
3114 device_add_disk(parent, skdev->disk, NULL);
3137 static char *skd_pci_info(struct skd_device *skdev, char *str)
3142 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3150 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3171 struct skd_device *skdev;
3198 skdev = skd_construct(pdev);
3199 if (skdev == NULL) {
3204 skd_pci_info(skdev, pci_str);
3212 skdev->pcie_error_reporting_is_enabled = 0;
3214 skdev->pcie_error_reporting_is_enabled = 1;
3216 pci_set_drvdata(pdev, skdev);
3219 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3220 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3221 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3222 skdev->mem_size[i]);
3223 if (!skdev->mem_map[i]) {
3230 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3231 skdev->mem_size[i]);
3234 rc = skd_acquire_irq(skdev);
3240 rc = skd_start_timer(skdev);
3244 init_waitqueue_head(&skdev->waitq);
3246 skd_start_device(skdev);
3248 rc = wait_event_interruptible_timeout(skdev->waitq,
3249 (skdev->gendisk_on),
3251 if (skdev->gendisk_on > 0) {
3253 skd_bdev_attach(&pdev->dev, skdev);
3269 skd_stop_device(skdev);
3270 skd_release_irq(skdev);
3274 if (skdev->mem_map[i])
3275 iounmap(skdev->mem_map[i]);
3277 if (skdev->pcie_error_reporting_is_enabled)
3280 skd_destruct(skdev);
3294 struct skd_device *skdev;
3296 skdev = pci_get_drvdata(pdev);
3297 if (!skdev) {
3301 skd_stop_device(skdev);
3302 skd_release_irq(skdev);
3305 if (skdev->mem_map[i])
3306 iounmap(skdev->mem_map[i]);
3308 if (skdev->pcie_error_reporting_is_enabled)
3311 skd_destruct(skdev);
3323 struct skd_device *skdev;
3325 skdev = pci_get_drvdata(pdev);
3326 if (!skdev) {
3331 skd_stop_device(skdev);
3333 skd_release_irq(skdev);
3336 if (skdev->mem_map[i])
3337 iounmap(skdev->mem_map[i]);
3339 if (skdev->pcie_error_reporting_is_enabled)
3353 struct skd_device *skdev;
3355 skdev = pci_get_drvdata(pdev);
3356 if (!skdev) {
3384 skdev->pcie_error_reporting_is_enabled = 0;
3386 skdev->pcie_error_reporting_is_enabled = 1;
3390 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3391 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3392 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3393 skdev->mem_size[i]);
3394 if (!skdev->mem_map[i]) {
3400 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3401 skdev->mem_size[i]);
3403 rc = skd_acquire_irq(skdev);
3409 rc = skd_start_timer(skdev);
3413 init_waitqueue_head(&skdev->waitq);
3415 skd_start_device(skdev);
3420 skd_stop_device(skdev);
3421 skd_release_irq(skdev);
3425 if (skdev->mem_map[i])
3426 iounmap(skdev->mem_map[i]);
3428 if (skdev->pcie_error_reporting_is_enabled)
3441 struct skd_device *skdev;
3445 skdev = pci_get_drvdata(pdev);
3446 if (!skdev) {
3452 skd_stop_device(skdev);
3566 static void skd_log_skdev(struct skd_device *skdev, const char *event)
3568 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3569 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3570 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3571 skd_skdev_state_to_str(skdev->state), skdev->state);
3572 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
3573 skd_in_flight(skdev), skdev->cur_max_queue_depth,
3574 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3575 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
3576 skdev->skcomp_cycle, skdev->skcomp_ix);
3579 static void skd_log_skreq(struct skd_device *skdev,
3586 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3587 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3590 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
3593 dev_dbg(&skdev->pdev->dev,