Lines Matching refs:cb
90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
109 cb->sg_pool = NULL;
110 shost_printk(KERN_ERR, cb->host,
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 if (!cb->work_q) {
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
122 cb->sg_pool = NULL;
123 shost_printk(KERN_ERR, cb->host,
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
140 static void myrb_destroy_mempools(struct myrb_hba *cb)
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
165 void __iomem *base = cb->io_base;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
210 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
214 mutex_lock(&cb->dcmd_mutex);
219 status = myrb_exec_cmd(cb, cmd_blk);
220 mutex_unlock(&cb->dcmd_mutex);
229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
233 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
238 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
241 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
244 mutex_lock(&cb->dcmd_mutex);
251 status = myrb_exec_cmd(cb, cmd_blk);
252 mutex_unlock(&cb->dcmd_mutex);
253 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
280 * @cb: pointer to the hba structure
285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
287 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
293 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
306 status = myrb_exec_cmd(cb, cmd_blk);
308 shost_printk(KERN_INFO, cb->host,
321 shost_printk(KERN_CRIT, cb->host,
326 shost_printk(KERN_CRIT, cb->host,
332 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
341 static void myrb_get_errtable(struct myrb_hba *cb)
343 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
348 memcpy(&old_table, cb->err_table, sizeof(old_table));
353 mbox->type3.addr = cb->err_table_addr;
354 status = myrb_exec_cmd(cb, cmd_blk);
356 struct myrb_error_entry *table = cb->err_table;
361 shost_for_each_device(sdev, cb->host) {
362 if (sdev->channel >= myrb_logical_channel(cb->host))
388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
391 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 struct Scsi_Host *shost = cb->host;
394 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 cb->ldev_info_addr);
401 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
442 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
448 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
458 status = myrb_exec_cmd(cb, cmd_blk);
461 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
477 status = myrb_get_rbld_progress(cb, &rbld_buf);
479 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
486 sdev = scsi_device_lookup(cb->host,
487 myrb_logical_channel(cb->host),
524 cb->last_rbld_status = status;
533 static void myrb_get_cc_progress(struct myrb_hba *cb)
535 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
541 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
545 cb->need_cc_status = true;
552 status = myrb_exec_cmd(cb, cmd_blk);
560 sdev = scsi_device_lookup(cb->host,
561 myrb_logical_channel(cb->host),
571 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
580 static void myrb_bgi_control(struct myrb_hba *cb)
582 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
589 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
592 shost_printk(KERN_ERR, cb->host,
601 status = myrb_exec_cmd(cb, cmd_blk);
602 last_bgi = &cb->bgi_status;
603 sdev = scsi_device_lookup(cb->host,
604 myrb_logical_channel(cb->host),
641 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
644 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
647 cb->bgi_status.status = MYRB_BGI_INVALID;
650 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
655 cb->bgi_status.status = MYRB_BGI_INVALID;
660 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
676 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
678 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
682 new = cb->enquiry;
687 shost_printk(KERN_CRIT, cb->host,
695 shost_printk(KERN_CRIT, cb->host,
700 shost_printk(KERN_CRIT, cb->host,
704 cb->new_ev_seq = new->ev_seq;
705 cb->need_err_info = true;
706 shost_printk(KERN_INFO, cb->host,
708 cb->old_ev_seq, cb->new_ev_seq,
716 shost_printk(KERN_INFO, cb->host,
721 cb->need_ldev_info = true;
725 time_after_eq(jiffies, cb->secondary_monitor_time
727 cb->need_bgi_status = cb->bgi_status_supported;
728 cb->secondary_monitor_time = jiffies;
734 cb->need_rbld = true;
735 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
740 shost_printk(KERN_INFO, cb->host,
747 cb->need_cc_status = true;
750 shost_printk(KERN_INFO, cb->host,
754 shost_printk(KERN_INFO, cb->host,
758 shost_printk(KERN_INFO, cb->host,
762 shost_printk(KERN_INFO, cb->host,
766 shost_printk(KERN_INFO, cb->host,
771 cb->need_cc_status = true;
781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
784 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
788 mutex_lock(&cb->dcmd_mutex);
794 status = myrb_exec_cmd(cb, cmd_blk);
795 mutex_unlock(&cb->dcmd_mutex);
808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
810 void __iomem *base = cb->io_base;
811 struct pci_dev *pdev = cb->pdev;
826 cb->enquiry = dma_alloc_coherent(&pdev->dev,
828 &cb->enquiry_addr, GFP_KERNEL);
829 if (!cb->enquiry)
834 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 &cb->err_table_addr, GFP_KERNEL);
836 if (!cb->err_table)
840 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 &cb->ldev_info_addr, GFP_KERNEL);
842 if (!cb->ldev_info_buf)
852 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 cb->cmd_mbox_size,
855 &cb->cmd_mbox_addr,
857 if (!cb->first_cmd_mbox)
860 cmd_mbox_mem = cb->first_cmd_mbox;
862 cb->last_cmd_mbox = cmd_mbox_mem;
863 cb->next_cmd_mbox = cb->first_cmd_mbox;
864 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
868 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
870 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 cb->stat_mbox_size,
872 &cb->stat_mbox_addr,
874 if (!cb->first_stat_mbox)
877 stat_mbox_mem = cb->first_stat_mbox;
879 cb->last_stat_mbox = stat_mbox_mem;
880 cb->next_stat_mbox = cb->first_stat_mbox;
883 cb->dual_mode_interface = true;
887 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
892 cb->dual_mode_interface = false;
913 static int myrb_get_hba_config(struct myrb_hba *cb)
919 struct Scsi_Host *shost = cb->host;
920 struct pci_dev *pdev = cb->pdev;
928 shost_printk(KERN_ERR, cb->host,
935 shost_printk(KERN_ERR, cb->host,
941 mutex_lock(&cb->dma_mutex);
942 status = myrb_hba_enquiry(cb);
943 mutex_unlock(&cb->dma_mutex);
945 shost_printk(KERN_WARNING, cb->host,
950 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
952 shost_printk(KERN_WARNING, cb->host,
957 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
959 shost_printk(KERN_WARNING, cb->host,
964 status = myrb_get_ldev_info(cb);
966 shost_printk(KERN_WARNING, cb->host,
977 strcpy(cb->model_name, "DAC960PU");
979 strcpy(cb->model_name, "DAC960PD");
982 strcpy(cb->model_name, "DAC960PL");
985 strcpy(cb->model_name, "DAC960PG");
988 strcpy(cb->model_name, "DAC960PJ");
991 strcpy(cb->model_name, "DAC960PR");
994 strcpy(cb->model_name, "DAC960PT");
997 strcpy(cb->model_name, "DAC960PTL0");
1000 strcpy(cb->model_name, "DAC960PRL");
1003 strcpy(cb->model_name, "DAC960PTL1");
1006 strcpy(cb->model_name, "eXtremeRAID 1100");
1009 shost_printk(KERN_WARNING, cb->host,
1047 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1052 snprintf(cb->fw_version, sizeof(cb->fw_version),
1065 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066 shost_printk(KERN_WARNING, cb->host,
1068 cb->fw_version);
1092 cb->bus_width = 32;
1094 cb->bus_width = 16;
1096 cb->bus_width = 8;
1097 cb->ldev_block_size = enquiry2->ldev_block_size;
1101 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1110 shost->can_queue = cb->enquiry->max_tcq;
1126 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1128 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1131 cb->ldev_geom_heads = 255;
1132 cb->ldev_geom_sectors = 63;
1134 cb->ldev_geom_heads = 128;
1135 cb->ldev_geom_sectors = 32;
1141 if ((cb->fw_version[0] == '4' &&
1142 strcmp(cb->fw_version, "4.08") >= 0) ||
1143 (cb->fw_version[0] == '5' &&
1144 strcmp(cb->fw_version, "5.08") >= 0)) {
1145 cb->bgi_status_supported = true;
1146 myrb_bgi_control(cb);
1148 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1152 shost_printk(KERN_INFO, cb->host,
1153 "Configuring %s PCI RAID Controller\n", cb->model_name);
1154 shost_printk(KERN_INFO, cb->host,
1156 cb->fw_version, memsize);
1157 if (cb->io_addr == 0)
1158 shost_printk(KERN_INFO, cb->host,
1160 (unsigned long)cb->pci_addr, cb->irq);
1162 shost_printk(KERN_INFO, cb->host,
1164 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165 cb->irq);
1166 shost_printk(KERN_INFO, cb->host,
1168 cb->host->can_queue, cb->host->max_sectors);
1169 shost_printk(KERN_INFO, cb->host,
1171 cb->host->can_queue, cb->host->sg_tablesize,
1173 shost_printk(KERN_INFO, cb->host,
1175 cb->stripe_size, cb->segment_size,
1176 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177 cb->safte_enabled ?
1179 shost_printk(KERN_INFO, cb->host,
1181 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 cb->host->max_id);
1184 shost_printk(KERN_INFO, cb->host,
1186 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1200 static void myrb_unmap(struct myrb_hba *cb)
1202 if (cb->ldev_info_buf) {
1205 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206 cb->ldev_info_buf, cb->ldev_info_addr);
1207 cb->ldev_info_buf = NULL;
1209 if (cb->err_table) {
1212 dma_free_coherent(&cb->pdev->dev, err_table_size,
1213 cb->err_table, cb->err_table_addr);
1214 cb->err_table = NULL;
1216 if (cb->enquiry) {
1217 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218 cb->enquiry, cb->enquiry_addr);
1219 cb->enquiry = NULL;
1221 if (cb->first_stat_mbox) {
1222 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223 cb->first_stat_mbox, cb->stat_mbox_addr);
1224 cb->first_stat_mbox = NULL;
1226 if (cb->first_cmd_mbox) {
1227 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229 cb->first_cmd_mbox = NULL;
1236 static void myrb_cleanup(struct myrb_hba *cb)
1238 struct pci_dev *pdev = cb->pdev;
1241 myrb_unmap(cb);
1243 if (cb->mmio_base) {
1244 if (cb->disable_intr)
1245 cb->disable_intr(cb->io_base);
1246 iounmap(cb->mmio_base);
1248 if (cb->irq)
1249 free_irq(cb->irq, cb);
1250 if (cb->io_addr)
1251 release_region(cb->io_addr, 0x80);
1254 scsi_host_put(cb->host);
1260 struct myrb_hba *cb = shost_priv(shost);
1262 cb->reset(cb->io_base);
1269 struct myrb_hba *cb = shost_priv(shost);
1280 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1285 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1334 spin_lock_irqsave(&cb->queue_lock, flags);
1335 cb->qcmd(cb, cmd_blk);
1336 spin_unlock_irqrestore(&cb->queue_lock, flags);
1340 static void myrb_inquiry(struct myrb_hba *cb,
1351 if (cb->bus_width > 16)
1353 if (cb->bus_width > 8)
1355 memcpy(&inq[16], cb->model_name, 16);
1356 memcpy(&inq[32], cb->fw_version, 1);
1357 memcpy(&inq[33], &cb->fw_version[2], 2);
1358 memcpy(&inq[35], &cb->fw_version[7], 1);
1364 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1386 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1392 if (cb->segment_size) {
1394 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1400 static void myrb_request_sense(struct myrb_hba *cb,
1409 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1416 ldev_info->size, cb->ldev_block_size);
1418 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1425 struct myrb_hba *cb = shost_priv(shost);
1458 myrb_inquiry(cb, scmd);
1476 myrb_mode_sense(cb, scmd, ldev_info);
1502 myrb_read_capacity(cb, scmd, ldev_info);
1506 myrb_request_sense(cb, scmd);
1603 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1628 spin_lock_irqsave(&cb->queue_lock, flags);
1629 cb->qcmd(cb, cmd_blk);
1630 spin_unlock_irqrestore(&cb->queue_lock, flags);
1653 struct myrb_hba *cb = shost_priv(sdev->host);
1658 ldev_info = cb->ldev_info_buf + ldev_num;
1699 struct myrb_hba *cb = shost_priv(sdev->host);
1710 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1781 struct myrb_hba *cb = shost_priv(sdev->host);
1783 geom[0] = cb->ldev_geom_heads;
1784 geom[1] = cb->ldev_geom_sectors;
1794 struct myrb_hba *cb = shost_priv(sdev->host);
1815 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1839 struct myrb_hba *cb = shost_priv(sdev->host);
1869 status = myrb_set_pdev_state(cb, sdev, new_state);
1929 struct myrb_hba *cb = shost_priv(sdev->host);
1936 status = myrb_get_rbld_progress(cb, &rbld_buf);
1951 struct myrb_hba *cb = shost_priv(sdev->host);
1965 status = myrb_get_rbld_progress(cb, NULL);
1972 mutex_lock(&cb->dcmd_mutex);
1973 cmd_blk = &cb->dcmd_blk;
1980 status = myrb_exec_cmd(cb, cmd_blk);
1981 mutex_unlock(&cb->dcmd_mutex);
1983 struct pci_dev *pdev = cb->pdev;
2000 mutex_lock(&cb->dcmd_mutex);
2001 cmd_blk = &cb->dcmd_blk;
2008 status = myrb_exec_cmd(cb, cmd_blk);
2010 mutex_unlock(&cb->dcmd_mutex);
2056 struct myrb_hba *cb = shost_priv(sdev->host);
2072 status = myrb_get_rbld_progress(cb, &rbld_buf);
2079 mutex_lock(&cb->dcmd_mutex);
2080 cmd_blk = &cb->dcmd_blk;
2088 status = myrb_exec_cmd(cb, cmd_blk);
2089 mutex_unlock(&cb->dcmd_mutex);
2091 struct pci_dev *pdev = cb->pdev;
2107 mutex_lock(&cb->dcmd_mutex);
2108 cmd_blk = &cb->dcmd_blk;
2115 status = myrb_exec_cmd(cb, cmd_blk);
2117 mutex_unlock(&cb->dcmd_mutex);
2169 struct myrb_hba *cb = shost_priv(shost);
2171 return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2179 struct myrb_hba *cb = shost_priv(shost);
2181 return snprintf(buf, 16, "%s\n", cb->fw_version);
2189 struct myrb_hba *cb = shost_priv(shost);
2191 return snprintf(buf, 16, "%s\n", cb->model_name);
2199 struct myrb_hba *cb = shost_priv(shost);
2202 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2264 struct myrb_hba *cb = shost_priv(sdev->host);
2272 status = myrb_get_rbld_progress(cb, &rbld_buf);
2291 struct myrb_hba *cb = shost_priv(sdev->host);
2299 status = myrb_get_rbld_progress(cb, NULL);
2326 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2338 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2343 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2403 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2416 struct myrb_hba *cb = container_of(work,
2418 struct Scsi_Host *shost = cb->host;
2423 if (cb->new_ev_seq > cb->old_ev_seq) {
2424 int event = cb->old_ev_seq;
2428 cb->new_ev_seq, event);
2429 myrb_get_event(cb, event);
2430 cb->old_ev_seq = event + 1;
2432 } else if (cb->need_err_info) {
2433 cb->need_err_info = false;
2435 myrb_get_errtable(cb);
2437 } else if (cb->need_rbld && cb->rbld_first) {
2438 cb->need_rbld = false;
2441 myrb_update_rbld_progress(cb);
2443 } else if (cb->need_ldev_info) {
2444 cb->need_ldev_info = false;
2447 myrb_get_ldev_info(cb);
2449 } else if (cb->need_rbld) {
2450 cb->need_rbld = false;
2453 myrb_update_rbld_progress(cb);
2455 } else if (cb->need_cc_status) {
2456 cb->need_cc_status = false;
2459 myrb_get_cc_progress(cb);
2461 } else if (cb->need_bgi_status) {
2462 cb->need_bgi_status = false;
2464 myrb_bgi_control(cb);
2468 mutex_lock(&cb->dma_mutex);
2469 myrb_hba_enquiry(cb);
2470 mutex_unlock(&cb->dma_mutex);
2471 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2472 cb->need_err_info || cb->need_rbld ||
2473 cb->need_ldev_info || cb->need_cc_status ||
2474 cb->need_bgi_status) {
2481 cb->primary_monitor_time = jiffies;
2482 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2493 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2496 struct pci_dev *pdev = cb->pdev;
2728 struct myrb_hba *cb, void __iomem *base)
2740 myrb_err_status(cb, error, parm0, parm1))
2750 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2757 cb->qcmd = myrb_qcmd;
2758 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2759 if (cb->dual_mode_interface)
2760 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2762 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2763 cb->disable_intr = DAC960_LA_disable_intr;
2764 cb->reset = DAC960_LA_reset_ctrl;
2771 struct myrb_hba *cb = arg;
2772 void __iomem *base = cb->io_base;
2776 spin_lock_irqsave(&cb->queue_lock, flags);
2778 next_stat_mbox = cb->next_stat_mbox;
2785 cmd_blk = &cb->dcmd_blk;
2787 cmd_blk = &cb->mcmd_blk;
2789 scmd = scsi_host_find_tag(cb->host, id - 3);
2796 dev_err(&cb->pdev->dev,
2800 if (++next_stat_mbox > cb->last_stat_mbox)
2801 next_stat_mbox = cb->first_stat_mbox;
2805 myrb_handle_cmdblk(cb, cmd_blk);
2807 myrb_handle_scsi(cb, cmd_blk, scmd);
2810 cb->next_stat_mbox = next_stat_mbox;
2811 spin_unlock_irqrestore(&cb->queue_lock, flags);
3006 struct myrb_hba *cb, void __iomem *base)
3018 myrb_err_status(cb, error, parm0, parm1))
3028 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3035 cb->qcmd = myrb_qcmd;
3036 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3037 if (cb->dual_mode_interface)
3038 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3040 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3041 cb->disable_intr = DAC960_PG_disable_intr;
3042 cb->reset = DAC960_PG_reset_ctrl;
3049 struct myrb_hba *cb = arg;
3050 void __iomem *base = cb->io_base;
3054 spin_lock_irqsave(&cb->queue_lock, flags);
3056 next_stat_mbox = cb->next_stat_mbox;
3063 cmd_blk = &cb->dcmd_blk;
3065 cmd_blk = &cb->mcmd_blk;
3067 scmd = scsi_host_find_tag(cb->host, id - 3);
3074 dev_err(&cb->pdev->dev,
3078 if (++next_stat_mbox > cb->last_stat_mbox)
3079 next_stat_mbox = cb->first_stat_mbox;
3082 myrb_handle_cmdblk(cb, cmd_blk);
3084 myrb_handle_scsi(cb, cmd_blk, scmd);
3086 cb->next_stat_mbox = next_stat_mbox;
3087 spin_unlock_irqrestore(&cb->queue_lock, flags);
3202 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3204 void __iomem *base = cb->io_base;
3214 struct myrb_hba *cb, void __iomem *base)
3219 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3221 (unsigned long)cb->io_addr);
3231 myrb_err_status(cb, error, parm0, parm1))
3241 if (!myrb_enable_mmio(cb, NULL)) {
3248 cb->qcmd = DAC960_PD_qcmd;
3249 cb->disable_intr = DAC960_PD_disable_intr;
3250 cb->reset = DAC960_PD_reset_ctrl;
3257 struct myrb_hba *cb = arg;
3258 void __iomem *base = cb->io_base;
3261 spin_lock_irqsave(&cb->queue_lock, flags);
3268 cmd_blk = &cb->dcmd_blk;
3270 cmd_blk = &cb->mcmd_blk;
3272 scmd = scsi_host_find_tag(cb->host, id - 3);
3279 dev_err(&cb->pdev->dev,
3286 myrb_handle_cmdblk(cb, cmd_blk);
3288 myrb_handle_scsi(cb, cmd_blk, scmd);
3290 spin_unlock_irqrestore(&cb->queue_lock, flags);
3341 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3343 void __iomem *base = cb->io_base;
3380 struct myrb_hba *cb, void __iomem *base)
3385 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3387 (unsigned long)cb->io_addr);
3397 myrb_err_status(cb, error, parm0, parm1))
3407 if (!myrb_enable_mmio(cb, NULL)) {
3414 cb->qcmd = DAC960_P_qcmd;
3415 cb->disable_intr = DAC960_PD_disable_intr;
3416 cb->reset = DAC960_PD_reset_ctrl;
3423 struct myrb_hba *cb = arg;
3424 void __iomem *base = cb->io_base;
3427 spin_lock_irqsave(&cb->queue_lock, flags);
3437 cmd_blk = &cb->dcmd_blk;
3439 cmd_blk = &cb->mcmd_blk;
3441 scmd = scsi_host_find_tag(cb->host, id - 3);
3448 dev_err(&cb->pdev->dev,
3462 myrb_translate_enquiry(cb->enquiry);
3484 myrb_handle_cmdblk(cb, cmd_blk);
3486 myrb_handle_scsi(cb, cmd_blk, scmd);
3488 spin_unlock_irqrestore(&cb->queue_lock, flags);
3506 struct myrb_hba *cb = NULL;
3515 cb = shost_priv(shost);
3516 mutex_init(&cb->dcmd_mutex);
3517 mutex_init(&cb->dma_mutex);
3518 cb->pdev = pdev;
3519 cb->host = shost;
3529 cb->io_addr = pci_resource_start(pdev, 0);
3530 cb->pci_addr = pci_resource_start(pdev, 1);
3532 cb->pci_addr = pci_resource_start(pdev, 0);
3534 pci_set_drvdata(pdev, cb);
3535 spin_lock_init(&cb->queue_lock);
3538 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3539 if (cb->mmio_base == NULL) {
3545 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3546 if (privdata->hw_init(pdev, cb, cb->io_base))
3549 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3554 cb->irq = pdev->irq;
3555 return cb;
3560 myrb_cleanup(cb);
3566 struct myrb_hba *cb;
3569 cb = myrb_detect(dev, entry);
3570 if (!cb)
3573 ret = myrb_get_hba_config(cb);
3575 myrb_cleanup(cb);
3579 if (!myrb_create_mempools(dev, cb)) {
3584 ret = scsi_add_host(cb->host, &dev->dev);
3587 myrb_destroy_mempools(cb);
3590 scsi_scan_host(cb->host);
3593 myrb_cleanup(cb);
3600 struct myrb_hba *cb = pci_get_drvdata(pdev);
3602 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3603 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3604 myrb_cleanup(cb);
3605 myrb_destroy_mempools(cb);