Lines Matching refs:hba
39 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
40 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
42 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
43 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
44 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
46 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
52 req = readl(&hba->u.itl.iop->inbound_queue);
59 writel(req, &hba->u.itl.iop->outbound_queue);
60 readl(&hba->u.itl.iop->outbound_intstatus);
67 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
69 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
72 static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
74 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
77 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
80 hptiop_host_request_callback_itl(hba,
83 hptiop_iop_request_callback_itl(hba, tag);
86 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
90 while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
94 hptiop_request_callback_itl(hba, req);
99 ((char __iomem *)hba->u.itl.iop + req);
103 hptiop_request_callback_itl(hba, req);
108 hptiop_request_callback_itl(hba, req);
113 static int iop_intr_itl(struct hptiop_hba *hba)
115 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
116 void __iomem *plx = hba->u.itl.plx;
130 hptiop_message_callback(hba, msg);
135 hptiop_drain_outbound_queue_itl(hba);
161 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
163 u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
169 memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
170 writel(head, &hba->u.mv.mu->inbound_head);
172 &hba->u.mv.regs->inbound_doorbell);
175 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
187 hba->msg_done = 1;
191 req = hba->reqs[tag >> 8].req_virt;
195 hptiop_finish_scsi_req(hba, tag>>8, req);
203 static int iop_intr_mv(struct hptiop_hba *hba)
208 status = readl(&hba->u.mv.regs->outbound_doorbell);
209 writel(~status, &hba->u.mv.regs->outbound_doorbell);
213 msg = readl(&hba->u.mv.mu->outbound_msg);
215 hptiop_message_callback(hba, msg);
222 while ((tag = mv_outbound_read(hba->u.mv.mu)))
223 hptiop_request_callback_mv(hba, tag);
230 static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
238 hba->msg_done = 1;
242 req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
245 hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
253 static int iop_intr_mvfrey(struct hptiop_hba *hba)
258 if (hba->initialized)
259 writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
261 status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
263 writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
265 u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
267 hptiop_message_callback(hba, msg);
272 status = readl(&(hba->u.mvfrey.mu->isr_cause));
274 writel(status, &(hba->u.mvfrey.mu->isr_cause));
276 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
277 cur_rptr = hba->u.mvfrey.outlist_rptr;
280 if (cur_rptr == hba->u.mvfrey.list_count)
283 _tag = hba->u.mvfrey.outlist[cur_rptr].val;
285 hptiop_request_callback_mvfrey(hba, _tag);
288 hba->u.mvfrey.outlist_rptr = cur_rptr;
289 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
292 if (hba->initialized)
293 writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
298 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
306 writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
307 &hba->u.itl.iop->inbound_queue);
308 readl(&hba->u.itl.iop->outbound_intstatus);
311 iop_intr_itl(hba);
320 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
323 struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
326 hba->msg_done = 0;
328 mv_inbound_write(hba->u.mv.internal_req_phy |
329 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
332 iop_intr_mv(hba);
333 if (hba->msg_done)
340 static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
344 hba->u.mvfrey.internal_req.req_virt;
347 hba->msg_done = 0;
349 hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
352 iop_intr_mvfrey(hba);
353 if (hba->msg_done)
357 return hba->msg_done ? 0 : -1;
360 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
362 writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
363 readl(&hba->u.itl.iop->outbound_intstatus);
366 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
368 writel(msg, &hba->u.mv.mu->inbound_msg);
369 writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
370 readl(&hba->u.mv.regs->inbound_doorbell);
373 static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
375 writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
376 readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
379 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
383 hba->msg_done = 0;
384 hba->ops->disable_intr(hba);
385 hba->ops->post_msg(hba, msg);
388 spin_lock_irq(hba->host->host_lock);
389 hba->ops->iop_intr(hba);
390 spin_unlock_irq(hba->host->host_lock);
391 if (hba->msg_done)
396 hba->ops->enable_intr(hba);
397 return hba->msg_done? 0 : -1;
400 static int iop_get_config_itl(struct hptiop_hba *hba,
406 req32 = readl(&hba->u.itl.iop->inbound_queue);
411 ((unsigned long)hba->u.itl.iop + req32);
418 if (iop_send_sync_request_itl(hba, req, 20000)) {
424 writel(req32, &hba->u.itl.iop->outbound_queue);
428 static int iop_get_config_mv(struct hptiop_hba *hba,
431 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
441 if (iop_send_sync_request_mv(hba, 0, 20000)) {
450 static int iop_get_config_mvfrey(struct hptiop_hba *hba,
453 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
472 static int iop_set_config_itl(struct hptiop_hba *hba,
478 req32 = readl(&hba->u.itl.iop->inbound_queue);
483 ((unsigned long)hba->u.itl.iop + req32);
495 if (iop_send_sync_request_itl(hba, req, 20000)) {
500 writel(req32, &hba->u.itl.iop->outbound_queue);
504 static int iop_set_config_mv(struct hptiop_hba *hba,
507 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
518 if (iop_send_sync_request_mv(hba, 0, 20000)) {
526 static int iop_set_config_mvfrey(struct hptiop_hba *hba,
530 hba->u.mvfrey.internal_req.req_virt;
541 if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
549 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
552 &hba->u.itl.iop->outbound_intmask);
555 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
558 &hba->u.mv.regs->outbound_intmask);
561 static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
563 writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
564 writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
565 writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
568 static int hptiop_initialize_iop(struct hptiop_hba *hba)
571 hba->ops->enable_intr(hba);
573 hba->initialized = 1;
576 if (iop_send_sync_msg(hba,
579 hba->host->host_no);
585 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
590 struct pci_dev *pcidev = hba->pcidev;
595 hba->host->host_no);
605 hba->host->host_no);
611 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
613 struct pci_dev *pcidev = hba->pcidev;
614 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
615 if (hba->u.itl.iop == NULL)
618 hba->u.itl.plx = hba->u.itl.iop;
619 hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
620 if (hba->u.itl.iop == NULL) {
621 iounmap(hba->u.itl.plx);
628 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
630 if (hba->u.itl.plx)
631 iounmap(hba->u.itl.plx);
632 iounmap(hba->u.itl.iop);
635 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
637 hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
638 if (hba->u.mv.regs == NULL)
641 hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
642 if (hba->u.mv.mu == NULL) {
643 iounmap(hba->u.mv.regs);
650 static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
652 hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
653 if (hba->u.mvfrey.config == NULL)
656 hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
657 if (hba->u.mvfrey.mu == NULL) {
658 iounmap(hba->u.mvfrey.config);
665 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
667 iounmap(hba->u.mv.regs);
668 iounmap(hba->u.mv.mu);
671 static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
673 iounmap(hba->u.mvfrey.config);
674 iounmap(hba->u.mvfrey.mu);
677 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
683 hba->msg_done = 1;
685 if (!hba->initialized)
689 atomic_set(&hba->resetting, 0);
690 wake_up(&hba->reset_wq);
693 hba->msg_done = 1;
696 static struct hptiop_request *get_req(struct hptiop_hba *hba)
700 dprintk("get_req : req=%p\n", hba->req_list);
702 ret = hba->req_list;
704 hba->req_list = ret->next;
709 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
712 req->next = hba->req_list;
713 hba->req_list = req;
716 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
729 scp = hba->reqs[tag].scp;
773 free_req(hba, &hba->reqs[tag]);
776 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
781 if (hba->iopintf_v2) {
783 req = hba->reqs[tag].req_virt;
788 req = hba->reqs[tag].req_virt;
791 hptiop_finish_scsi_req(hba, tag, req);
794 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
801 ((unsigned long)hba->u.itl.iop + tag);
830 writel(tag, &hba->u.itl.iop->outbound_queue);
835 struct hptiop_hba *hba = dev_id;
839 spin_lock_irqsave(hba->host->host_lock, flags);
840 handled = hba->ops->iop_intr(hba);
841 spin_unlock_irqrestore(hba->host->host_lock, flags);
849 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
861 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
865 hba->ops->host_phy_flag;
873 static void hptiop_post_req_itl(struct hptiop_hba *hba,
882 if (hba->iopintf_v2) {
894 &hba->u.itl.iop->inbound_queue);
897 &hba->u.itl.iop->inbound_queue);
900 static void hptiop_post_req_mv(struct hptiop_hba *hba,
921 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
924 static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
938 hba->u.mvfrey.inlist_wptr++;
939 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
941 if (index == hba->u.mvfrey.list_count) {
943 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
944 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
947 hba->u.mvfrey.inlist[index].addr =
949 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
950 writel(hba->u.mvfrey.inlist_wptr,
951 &(hba->u.mvfrey.mu->inbound_write_ptr));
952 readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
955 static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
960 static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
965 static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
967 u32 list_count = hba->u.mvfrey.list_count;
969 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
975 writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
976 &(hba->u.mvfrey.mu->inbound_base));
977 writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
978 &(hba->u.mvfrey.mu->inbound_base_high));
980 writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
981 &(hba->u.mvfrey.mu->outbound_base));
982 writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
983 &(hba->u.mvfrey.mu->outbound_base_high));
985 writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
986 &(hba->u.mvfrey.mu->outbound_shadow_base));
987 writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
988 &(hba->u.mvfrey.mu->outbound_shadow_base_high));
990 hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
991 *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
992 hba->u.mvfrey.outlist_rptr = list_count - 1;
999 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1004 _req = get_req(hba);
1026 (scp->device->id > hba->max_devices) ||
1027 ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
1029 free_req(hba, _req);
1050 hba->ops->post_req(hba, _req);
1066 static int hptiop_reset_hba(struct hptiop_hba *hba)
1068 if (atomic_xchg(&hba->resetting, 1) == 0) {
1069 atomic_inc(&hba->reset_count);
1070 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
1073 wait_event_timeout(hba->reset_wq,
1074 atomic_read(&hba->resetting) == 0, 60 * HZ);
1076 if (atomic_read(&hba->resetting)) {
1078 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
1082 if (iop_send_sync_msg(hba,
1085 hba->host->host_no);
1093 struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
1098 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
1104 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1106 if (queue_depth > hba->max_requests)
1107 queue_depth = hba->max_requests;
1121 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1124 hba->firmware_version >> 24,
1125 (hba->firmware_version >> 16) & 0xff,
1126 (hba->firmware_version >> 8) & 0xff,
1127 hba->firmware_version & 0xff);
1177 static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
1182 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
1184 hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
1185 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
1186 if (hba->u.mv.internal_req)
1192 static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
1194 u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
1198 BUG_ON(hba->max_request_size == 0);
1207 hba->u.mvfrey.list_count = list_count;
1208 hba->u.mvfrey.internal_mem_size = 0x800 +
1213 p = dma_alloc_coherent(&hba->pcidev->dev,
1214 hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
1218 hba->u.mvfrey.internal_req.req_virt = p;
1219 hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
1220 hba->u.mvfrey.internal_req.scp = NULL;
1221 hba->u.mvfrey.internal_req.next = NULL;
1226 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
1227 hba->u.mvfrey.inlist_phy = phy;
1232 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
1233 hba->u.mvfrey.outlist_phy = phy;
1238 hba->u.mvfrey.outlist_cptr = (__le32 *)p;
1239 hba->u.mvfrey.outlist_cptr_phy = phy;
1244 static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
1249 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
1251 if (hba->u.mv.internal_req) {
1252 dma_free_coherent(&hba->pcidev->dev, 0x800,
1253 hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
1259 static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
1261 if (hba->u.mvfrey.internal_req.req_virt) {
1262 dma_free_coherent(&hba->pcidev->dev,
1263 hba->u.mvfrey.internal_mem_size,
1264 hba->u.mvfrey.internal_req.req_virt,
1266 hba->u.mvfrey.internal_req.req_shifted_phy << 5);
1275 struct hptiop_hba *hba;
1320 hba = (struct hptiop_hba *)host->hostdata;
1321 memset(hba, 0, sizeof(struct hptiop_hba));
1323 hba->ops = iop_ops;
1324 hba->pcidev = pcidev;
1325 hba->host = host;
1326 hba->initialized = 0;
1327 hba->iopintf_v2 = 0;
1329 atomic_set(&hba->resetting, 0);
1330 atomic_set(&hba->reset_count, 0);
1332 init_waitqueue_head(&hba->reset_wq);
1333 init_waitqueue_head(&hba->ioctl_wq);
1341 if (hba->ops->map_pci_bar(hba))
1344 if (hba->ops->iop_wait_ready(hba, 20000)) {
1346 hba->host->host_no);
1350 if (hba->ops->family == MV_BASED_IOP) {
1351 if (hba->ops->internal_memalloc(hba)) {
1353 hba->host->host_no);
1358 if (hba->ops->get_config(hba, &iop_config)) {
1360 hba->host->host_no);
1364 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1366 hba->max_devices = le32_to_cpu(iop_config.max_devices);
1367 hba->max_request_size = le32_to_cpu(iop_config.request_size);
1368 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1369 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1370 hba->interface_version = le32_to_cpu(iop_config.interface_version);
1371 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1373 if (hba->ops->family == MVFREY_BASED_IOP) {
1374 if (hba->ops->internal_memalloc(hba)) {
1376 hba->host->host_no);
1379 if (hba->ops->reset_comm(hba)) {
1381 hba->host->host_no);
1386 if (hba->firmware_version > 0x01020000 ||
1387 hba->interface_version > 0x01020000)
1388 hba->iopintf_v2 = 1;
1398 sg_list, hba->max_sg_descriptors);
1407 if (hba->ops->set_config(hba, &set_config)) {
1409 hba->host->host_no);
1416 driver_name, hba)) {
1418 hba->host->host_no, pcidev->irq);
1424 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1426 hba->req_size = req_size;
1427 hba->req_list = NULL;
1429 for (i = 0; i < hba->max_requests; i++) {
1431 hba->req_size + 0x20,
1436 hba->host->host_no);
1440 hba->dma_coherent[i] = start_virt;
1441 hba->dma_coherent_handle[i] = start_phy;
1449 hba->reqs[i].next = NULL;
1450 hba->reqs[i].req_virt = start_virt;
1451 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1452 hba->reqs[i].index = i;
1453 free_req(hba, &hba->reqs[i]);
1457 if (hptiop_initialize_iop(hba))
1462 hba->host->host_no);
1468 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1472 for (i = 0; i < hba->max_requests; i++) {
1473 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1474 dma_free_coherent(&hba->pcidev->dev,
1475 hba->req_size + 0x20,
1476 hba->dma_coherent[i],
1477 hba->dma_coherent_handle[i]);
1482 free_irq(hba->pcidev->irq, hba);
1485 hba->ops->internal_memfree(hba);
1487 hba->ops->unmap_pci_bar(hba);
1505 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1507 dprintk("hptiop_shutdown(%p)\n", hba);
1510 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1512 hba->host->host_no);
1515 hba->ops->disable_intr(hba);
1518 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1522 int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1525 &hba->u.itl.iop->outbound_intmask);
1526 readl(&hba->u.itl.iop->outbound_intmask);
1529 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1531 writel(0, &hba->u.mv.regs->outbound_intmask);
1532 readl(&hba->u.mv.regs->outbound_intmask);
1535 static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
1537 writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
1538 readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
1539 writel(0, &(hba->u.mvfrey.mu->isr_enable));
1540 readl(&(hba->u.mvfrey.mu->isr_enable));
1541 writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
1542 readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
1548 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1551 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1557 free_irq(hba->pcidev->irq, hba);
1559 for (i = 0; i < hba->max_requests; i++) {
1560 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1561 dma_free_coherent(&hba->pcidev->dev,
1562 hba->req_size + 0x20,
1563 hba->dma_coherent[i],
1564 hba->dma_coherent_handle[i]);
1569 hba->ops->internal_memfree(hba);
1571 hba->ops->unmap_pci_bar(hba);
1573 pci_release_regions(hba->pcidev);
1574 pci_set_drvdata(hba->pcidev, NULL);
1575 pci_disable_device(hba->pcidev);