Lines Matching refs:cb
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
109 cb->sg_pool = NULL;
110 shost_printk(KERN_ERR, cb->host,
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 if (!cb->work_q) {
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
122 cb->sg_pool = NULL;
123 shost_printk(KERN_ERR, cb->host,
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
213 mutex_lock(&cb->dcmd_mutex);
218 status = myrb_exec_cmd(cb, cmd_blk);
219 mutex_unlock(&cb->dcmd_mutex);
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
243 mutex_lock(&cb->dcmd_mutex);
250 status = myrb_exec_cmd(cb, cmd_blk);
251 mutex_unlock(&cb->dcmd_mutex);
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
279 * @cb: pointer to the hba structure
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
305 status = myrb_exec_cmd(cb, cmd_blk);
307 shost_printk(KERN_INFO, cb->host,
320 shost_printk(KERN_CRIT, cb->host,
325 shost_printk(KERN_CRIT, cb->host,
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
340 static void myrb_get_errtable(struct myrb_hba *cb)
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
347 memcpy(&old_table, cb->err_table, sizeof(old_table));
352 mbox->type3.addr = cb->err_table_addr;
353 status = myrb_exec_cmd(cb, cmd_blk);
355 struct myrb_error_entry *table = cb->err_table;
360 shost_for_each_device(sdev, cb->host) {
361 if (sdev->channel >= myrb_logical_channel(cb->host))
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
391 struct Scsi_Host *shost = cb->host;
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
394 cb->ldev_info_addr);
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
457 status = myrb_exec_cmd(cb, cmd_blk);
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
470 static void myrb_update_rbld_progress(struct myrb_hba *cb)
475 status = myrb_get_rbld_progress(cb, &rbld_buf);
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
484 sdev = scsi_device_lookup(cb->host,
485 myrb_logical_channel(cb->host),
522 cb->last_rbld_status = status;
531 static void myrb_get_cc_progress(struct myrb_hba *cb)
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
543 cb->need_cc_status = true;
550 status = myrb_exec_cmd(cb, cmd_blk);
558 sdev = scsi_device_lookup(cb->host,
559 myrb_logical_channel(cb->host),
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
578 static void myrb_bgi_control(struct myrb_hba *cb)
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 shost_printk(KERN_ERR, cb->host,
599 status = myrb_exec_cmd(cb, cmd_blk);
600 last_bgi = &cb->bgi_status;
601 sdev = scsi_device_lookup(cb->host,
602 myrb_logical_channel(cb->host),
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 cb->bgi_status.status = MYRB_BGI_INVALID;
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
653 cb->bgi_status.status = MYRB_BGI_INVALID;
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
680 new = cb->enquiry;
685 shost_printk(KERN_CRIT, cb->host,
693 shost_printk(KERN_CRIT, cb->host,
698 shost_printk(KERN_CRIT, cb->host,
702 cb->new_ev_seq = new->ev_seq;
703 cb->need_err_info = true;
704 shost_printk(KERN_INFO, cb->host,
706 cb->old_ev_seq, cb->new_ev_seq,
714 shost_printk(KERN_INFO, cb->host,
719 cb->need_ldev_info = true;
723 time_after_eq(jiffies, cb->secondary_monitor_time
725 cb->need_bgi_status = cb->bgi_status_supported;
726 cb->secondary_monitor_time = jiffies;
732 cb->need_rbld = true;
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
738 shost_printk(KERN_INFO, cb->host,
745 cb->need_cc_status = true;
748 shost_printk(KERN_INFO, cb->host,
752 shost_printk(KERN_INFO, cb->host,
756 shost_printk(KERN_INFO, cb->host,
760 shost_printk(KERN_INFO, cb->host,
764 shost_printk(KERN_INFO, cb->host,
769 cb->need_cc_status = true;
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
786 mutex_lock(&cb->dcmd_mutex);
792 status = myrb_exec_cmd(cb, cmd_blk);
793 mutex_unlock(&cb->dcmd_mutex);
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
808 void __iomem *base = cb->io_base;
809 struct pci_dev *pdev = cb->pdev;
824 cb->enquiry = dma_alloc_coherent(&pdev->dev,
826 &cb->enquiry_addr, GFP_KERNEL);
827 if (!cb->enquiry)
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
833 &cb->err_table_addr, GFP_KERNEL);
834 if (!cb->err_table)
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
839 &cb->ldev_info_addr, GFP_KERNEL);
840 if (!cb->ldev_info_buf)
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
852 cb->cmd_mbox_size,
853 &cb->cmd_mbox_addr,
855 if (!cb->first_cmd_mbox)
858 cmd_mbox_mem = cb->first_cmd_mbox;
860 cb->last_cmd_mbox = cmd_mbox_mem;
861 cb->next_cmd_mbox = cb->first_cmd_mbox;
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
869 cb->stat_mbox_size,
870 &cb->stat_mbox_addr,
872 if (!cb->first_stat_mbox)
875 stat_mbox_mem = cb->first_stat_mbox;
877 cb->last_stat_mbox = stat_mbox_mem;
878 cb->next_stat_mbox = cb->first_stat_mbox;
881 cb->dual_mode_interface = true;
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
890 cb->dual_mode_interface = false;
911 static int myrb_get_hba_config(struct myrb_hba *cb)
917 struct Scsi_Host *shost = cb->host;
918 struct pci_dev *pdev = cb->pdev;
926 shost_printk(KERN_ERR, cb->host,
933 shost_printk(KERN_ERR, cb->host,
939 mutex_lock(&cb->dma_mutex);
940 status = myrb_hba_enquiry(cb);
941 mutex_unlock(&cb->dma_mutex);
943 shost_printk(KERN_WARNING, cb->host,
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
950 shost_printk(KERN_WARNING, cb->host,
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
957 shost_printk(KERN_WARNING, cb->host,
962 status = myrb_get_ldev_info(cb);
964 shost_printk(KERN_WARNING, cb->host,
975 strcpy(cb->model_name, "DAC960PU");
977 strcpy(cb->model_name, "DAC960PD");
980 strcpy(cb->model_name, "DAC960PL");
983 strcpy(cb->model_name, "DAC960PG");
986 strcpy(cb->model_name, "DAC960PJ");
989 strcpy(cb->model_name, "DAC960PR");
992 strcpy(cb->model_name, "DAC960PT");
995 strcpy(cb->model_name, "DAC960PTL0");
998 strcpy(cb->model_name, "DAC960PRL");
1001 strcpy(cb->model_name, "DAC960PTL1");
1004 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 shost_printk(KERN_WARNING, cb->host,
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1050 snprintf(cb->fw_version, sizeof(cb->fw_version),
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1064 shost_printk(KERN_WARNING, cb->host,
1066 cb->fw_version);
1090 cb->bus_width = 32;
1092 cb->bus_width = 16;
1094 cb->bus_width = 8;
1095 cb->ldev_block_size = enquiry2->ldev_block_size;
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1108 shost->can_queue = cb->enquiry->max_tcq;
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129 cb->ldev_geom_heads = 255;
1130 cb->ldev_geom_sectors = 63;
1132 cb->ldev_geom_heads = 128;
1133 cb->ldev_geom_sectors = 32;
1139 if ((cb->fw_version[0] == '4' &&
1140 strcmp(cb->fw_version, "4.08") >= 0) ||
1141 (cb->fw_version[0] == '5' &&
1142 strcmp(cb->fw_version, "5.08") >= 0)) {
1143 cb->bgi_status_supported = true;
1144 myrb_bgi_control(cb);
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1150 shost_printk(KERN_INFO, cb->host,
1151 "Configuring %s PCI RAID Controller\n", cb->model_name);
1152 shost_printk(KERN_INFO, cb->host,
1154 cb->fw_version, memsize);
1155 if (cb->io_addr == 0)
1156 shost_printk(KERN_INFO, cb->host,
1158 (unsigned long)cb->pci_addr, cb->irq);
1160 shost_printk(KERN_INFO, cb->host,
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1163 cb->irq);
1164 shost_printk(KERN_INFO, cb->host,
1166 cb->host->can_queue, cb->host->max_sectors);
1167 shost_printk(KERN_INFO, cb->host,
1169 cb->host->can_queue, cb->host->sg_tablesize,
1171 shost_printk(KERN_INFO, cb->host,
1173 cb->stripe_size, cb->segment_size,
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1175 cb->safte_enabled ?
1177 shost_printk(KERN_INFO, cb->host,
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1180 cb->host->max_id);
1182 shost_printk(KERN_INFO, cb->host,
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1198 static void myrb_unmap(struct myrb_hba *cb)
1200 if (cb->ldev_info_buf) {
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1204 cb->ldev_info_buf, cb->ldev_info_addr);
1205 cb->ldev_info_buf = NULL;
1207 if (cb->err_table) {
1210 dma_free_coherent(&cb->pdev->dev, err_table_size,
1211 cb->err_table, cb->err_table_addr);
1212 cb->err_table = NULL;
1214 if (cb->enquiry) {
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1216 cb->enquiry, cb->enquiry_addr);
1217 cb->enquiry = NULL;
1219 if (cb->first_stat_mbox) {
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1221 cb->first_stat_mbox, cb->stat_mbox_addr);
1222 cb->first_stat_mbox = NULL;
1224 if (cb->first_cmd_mbox) {
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1227 cb->first_cmd_mbox = NULL;
1234 static void myrb_cleanup(struct myrb_hba *cb)
1236 struct pci_dev *pdev = cb->pdev;
1239 myrb_unmap(cb);
1241 if (cb->mmio_base) {
1242 if (cb->disable_intr)
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1246 if (cb->irq)
1247 free_irq(cb->irq, cb);
1248 if (cb->io_addr)
1249 release_region(cb->io_addr, 0x80);
1252 scsi_host_put(cb->host);
1258 struct myrb_hba *cb = shost_priv(shost);
1260 cb->reset(cb->io_base);
1268 struct myrb_hba *cb = shost_priv(shost);
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1333 spin_lock_irqsave(&cb->queue_lock, flags);
1334 cb->qcmd(cb, cmd_blk);
1335 spin_unlock_irqrestore(&cb->queue_lock, flags);
1339 static void myrb_inquiry(struct myrb_hba *cb,
1350 if (cb->bus_width > 16)
1352 if (cb->bus_width > 8)
1354 memcpy(&inq[16], cb->model_name, 16);
1355 memcpy(&inq[32], cb->fw_version, 1);
1356 memcpy(&inq[33], &cb->fw_version[2], 2);
1357 memcpy(&inq[35], &cb->fw_version[7], 1);
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1391 if (cb->segment_size) {
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1399 static void myrb_request_sense(struct myrb_hba *cb,
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1414 ldev_info->size, cb->ldev_block_size);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1423 struct myrb_hba *cb = shost_priv(shost);
1453 myrb_inquiry(cb, scmd);
1468 myrb_mode_sense(cb, scmd, ldev_info);
1488 myrb_read_capacity(cb, scmd, ldev_info);
1492 myrb_request_sense(cb, scmd);
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1600 spin_lock_irqsave(&cb->queue_lock, flags);
1601 cb->qcmd(cb, cmd_blk);
1602 spin_unlock_irqrestore(&cb->queue_lock, flags);
1625 struct myrb_hba *cb = shost_priv(sdev->host);
1630 ldev_info = cb->ldev_info_buf + ldev_num;
1671 struct myrb_hba *cb = shost_priv(sdev->host);
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1753 struct myrb_hba *cb = shost_priv(sdev->host);
1755 geom[0] = cb->ldev_geom_heads;
1756 geom[1] = cb->ldev_geom_sectors;
1766 struct myrb_hba *cb = shost_priv(sdev->host);
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1811 struct myrb_hba *cb = shost_priv(sdev->host);
1841 status = myrb_set_pdev_state(cb, sdev, new_state);
1901 struct myrb_hba *cb = shost_priv(sdev->host);
1908 status = myrb_get_rbld_progress(cb, &rbld_buf);
1923 struct myrb_hba *cb = shost_priv(sdev->host);
1937 status = myrb_get_rbld_progress(cb, NULL);
1944 mutex_lock(&cb->dcmd_mutex);
1945 cmd_blk = &cb->dcmd_blk;
1952 status = myrb_exec_cmd(cb, cmd_blk);
1953 mutex_unlock(&cb->dcmd_mutex);
1955 struct pci_dev *pdev = cb->pdev;
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1982 mutex_unlock(&cb->dcmd_mutex);
2028 struct myrb_hba *cb = shost_priv(sdev->host);
2044 status = myrb_get_rbld_progress(cb, &rbld_buf);
2051 mutex_lock(&cb->dcmd_mutex);
2052 cmd_blk = &cb->dcmd_blk;
2060 status = myrb_exec_cmd(cb, cmd_blk);
2061 mutex_unlock(&cb->dcmd_mutex);
2063 struct pci_dev *pdev = cb->pdev;
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2087 status = myrb_exec_cmd(cb, cmd_blk);
2089 mutex_unlock(&cb->dcmd_mutex);
2141 struct myrb_hba *cb = shost_priv(shost);
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2151 struct myrb_hba *cb = shost_priv(shost);
2153 return snprintf(buf, 16, "%s\n", cb->fw_version);
2161 struct myrb_hba *cb = shost_priv(shost);
2163 return snprintf(buf, 16, "%s\n", cb->model_name);
2171 struct myrb_hba *cb = shost_priv(shost);
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2240 struct myrb_hba *cb = shost_priv(sdev->host);
2248 status = myrb_get_rbld_progress(cb, &rbld_buf);
2267 struct myrb_hba *cb = shost_priv(sdev->host);
2275 status = myrb_get_rbld_progress(cb, NULL);
2302 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2314 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2319 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2372 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2385 struct myrb_hba *cb = container_of(work,
2387 struct Scsi_Host *shost = cb->host;
2392 if (cb->new_ev_seq > cb->old_ev_seq) {
2393 int event = cb->old_ev_seq;
2397 cb->new_ev_seq, event);
2398 myrb_get_event(cb, event);
2399 cb->old_ev_seq = event + 1;
2401 } else if (cb->need_err_info) {
2402 cb->need_err_info = false;
2404 myrb_get_errtable(cb);
2406 } else if (cb->need_rbld && cb->rbld_first) {
2407 cb->need_rbld = false;
2410 myrb_update_rbld_progress(cb);
2412 } else if (cb->need_ldev_info) {
2413 cb->need_ldev_info = false;
2416 myrb_get_ldev_info(cb);
2418 } else if (cb->need_rbld) {
2419 cb->need_rbld = false;
2422 myrb_update_rbld_progress(cb);
2424 } else if (cb->need_cc_status) {
2425 cb->need_cc_status = false;
2428 myrb_get_cc_progress(cb);
2430 } else if (cb->need_bgi_status) {
2431 cb->need_bgi_status = false;
2433 myrb_bgi_control(cb);
2437 mutex_lock(&cb->dma_mutex);
2438 myrb_hba_enquiry(cb);
2439 mutex_unlock(&cb->dma_mutex);
2440 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2441 cb->need_err_info || cb->need_rbld ||
2442 cb->need_ldev_info || cb->need_cc_status ||
2443 cb->need_bgi_status) {
2450 cb->primary_monitor_time = jiffies;
2451 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2462 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2465 struct pci_dev *pdev = cb->pdev;
2668 struct myrb_hba *cb, void __iomem *base)
2680 myrb_err_status(cb, error, parm0, parm1))
2690 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2697 cb->qcmd = myrb_qcmd;
2698 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2699 if (cb->dual_mode_interface)
2700 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2702 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2703 cb->disable_intr = DAC960_LA_disable_intr;
2704 cb->reset = DAC960_LA_reset_ctrl;
2711 struct myrb_hba *cb = arg;
2712 void __iomem *base = cb->io_base;
2716 spin_lock_irqsave(&cb->queue_lock, flags);
2718 next_stat_mbox = cb->next_stat_mbox;
2725 cmd_blk = &cb->dcmd_blk;
2727 cmd_blk = &cb->mcmd_blk;
2729 scmd = scsi_host_find_tag(cb->host, id - 3);
2736 dev_err(&cb->pdev->dev,
2740 if (++next_stat_mbox > cb->last_stat_mbox)
2741 next_stat_mbox = cb->first_stat_mbox;
2745 myrb_handle_cmdblk(cb, cmd_blk);
2747 myrb_handle_scsi(cb, cmd_blk, scmd);
2750 cb->next_stat_mbox = next_stat_mbox;
2751 spin_unlock_irqrestore(&cb->queue_lock, flags);
2916 struct myrb_hba *cb, void __iomem *base)
2928 myrb_err_status(cb, error, parm0, parm1))
2938 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2945 cb->qcmd = myrb_qcmd;
2946 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2947 if (cb->dual_mode_interface)
2948 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2950 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2951 cb->disable_intr = DAC960_PG_disable_intr;
2952 cb->reset = DAC960_PG_reset_ctrl;
2959 struct myrb_hba *cb = arg;
2960 void __iomem *base = cb->io_base;
2964 spin_lock_irqsave(&cb->queue_lock, flags);
2966 next_stat_mbox = cb->next_stat_mbox;
2973 cmd_blk = &cb->dcmd_blk;
2975 cmd_blk = &cb->mcmd_blk;
2977 scmd = scsi_host_find_tag(cb->host, id - 3);
2984 dev_err(&cb->pdev->dev,
2988 if (++next_stat_mbox > cb->last_stat_mbox)
2989 next_stat_mbox = cb->first_stat_mbox;
2992 myrb_handle_cmdblk(cb, cmd_blk);
2994 myrb_handle_scsi(cb, cmd_blk, scmd);
2996 cb->next_stat_mbox = next_stat_mbox;
2997 spin_unlock_irqrestore(&cb->queue_lock, flags);
3100 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3102 void __iomem *base = cb->io_base;
3112 struct myrb_hba *cb, void __iomem *base)
3117 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3119 (unsigned long)cb->io_addr);
3129 myrb_err_status(cb, error, parm0, parm1))
3139 if (!myrb_enable_mmio(cb, NULL)) {
3146 cb->qcmd = DAC960_PD_qcmd;
3147 cb->disable_intr = DAC960_PD_disable_intr;
3148 cb->reset = DAC960_PD_reset_ctrl;
3155 struct myrb_hba *cb = arg;
3156 void __iomem *base = cb->io_base;
3159 spin_lock_irqsave(&cb->queue_lock, flags);
3166 cmd_blk = &cb->dcmd_blk;
3168 cmd_blk = &cb->mcmd_blk;
3170 scmd = scsi_host_find_tag(cb->host, id - 3);
3177 dev_err(&cb->pdev->dev,
3184 myrb_handle_cmdblk(cb, cmd_blk);
3186 myrb_handle_scsi(cb, cmd_blk, scmd);
3188 spin_unlock_irqrestore(&cb->queue_lock, flags);
3239 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3241 void __iomem *base = cb->io_base;
3278 struct myrb_hba *cb, void __iomem *base)
3283 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3285 (unsigned long)cb->io_addr);
3295 myrb_err_status(cb, error, parm0, parm1))
3305 if (!myrb_enable_mmio(cb, NULL)) {
3312 cb->qcmd = DAC960_P_qcmd;
3313 cb->disable_intr = DAC960_PD_disable_intr;
3314 cb->reset = DAC960_PD_reset_ctrl;
3321 struct myrb_hba *cb = arg;
3322 void __iomem *base = cb->io_base;
3325 spin_lock_irqsave(&cb->queue_lock, flags);
3335 cmd_blk = &cb->dcmd_blk;
3337 cmd_blk = &cb->mcmd_blk;
3339 scmd = scsi_host_find_tag(cb->host, id - 3);
3346 dev_err(&cb->pdev->dev,
3360 myrb_translate_enquiry(cb->enquiry);
3382 myrb_handle_cmdblk(cb, cmd_blk);
3384 myrb_handle_scsi(cb, cmd_blk, scmd);
3386 spin_unlock_irqrestore(&cb->queue_lock, flags);
3404 struct myrb_hba *cb = NULL;
3413 cb = shost_priv(shost);
3414 mutex_init(&cb->dcmd_mutex);
3415 mutex_init(&cb->dma_mutex);
3416 cb->pdev = pdev;
3417 cb->host = shost;
3427 cb->io_addr = pci_resource_start(pdev, 0);
3428 cb->pci_addr = pci_resource_start(pdev, 1);
3430 cb->pci_addr = pci_resource_start(pdev, 0);
3432 pci_set_drvdata(pdev, cb);
3433 spin_lock_init(&cb->queue_lock);
3436 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3437 if (cb->mmio_base == NULL) {
3443 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3444 if (privdata->hw_init(pdev, cb, cb->io_base))
3447 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3452 cb->irq = pdev->irq;
3453 return cb;
3458 myrb_cleanup(cb);
3464 struct myrb_hba *cb;
3467 cb = myrb_detect(dev, entry);
3468 if (!cb)
3471 ret = myrb_get_hba_config(cb);
3473 myrb_cleanup(cb);
3477 if (!myrb_create_mempools(dev, cb)) {
3482 ret = scsi_add_host(cb->host, &dev->dev);
3485 myrb_destroy_mempools(cb);
3488 scsi_scan_host(cb->host);
3491 myrb_cleanup(cb);
3498 struct myrb_hba *cb = pci_get_drvdata(pdev);
3500 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3501 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3502 myrb_cleanup(cb);
3503 myrb_destroy_mempools(cb);