Lines Matching refs:ioa_cfg
589 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
592 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
593 trace_entry = &ioa_cfg->trace[trace_index];
616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
645 if (ipr_cmd->ioa_cfg->sis64) {
704 * @ioa_cfg: ioa config struct
710 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
713 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
720 * @ioa_cfg: ioa config struct
729 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
735 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
736 spin_lock(&ioa_cfg->hrrq[i]._lock);
737 ioa_cfg->hrrq[i].allow_interrupts = 0;
738 spin_unlock(&ioa_cfg->hrrq[i]._lock);
742 if (ioa_cfg->sis64)
743 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
745 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
748 if (ioa_cfg->sis64)
749 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
750 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
751 readl(ioa_cfg->regs.sense_interrupt_reg);
756 * @ioa_cfg: ioa config struct
761 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
763 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
768 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
769 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
770 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
774 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
780 * @ioa_cfg: ioa config struct
785 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
790 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
846 * @ioa_cfg: ioa config struct
853 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
859 for_each_hrrq(hrrq, ioa_cfg) {
896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
899 if (ioa_cfg->sis64) {
907 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
909 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
982 if (ipr_cmd->ioa_cfg->sis64) {
1019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1024 spin_unlock_irq(ioa_cfg->host->host_lock);
1026 spin_lock_irq(ioa_cfg->host->host_lock);
1029 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1033 if (ioa_cfg->hrrq_num == 1)
1036 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1037 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1044 * @ioa_cfg: ioa config struct
1055 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1061 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1062 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1064 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1104 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1115 if (ioa_cfg->sis64) {
1130 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1138 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1139 ioa_cfg->max_devs_supported);
1140 set_bit(res->target, ioa_cfg->target_ids);
1147 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1148 ioa_cfg->max_devs_supported);
1149 set_bit(res->target, ioa_cfg->array_ids);
1152 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->vset_ids);
1156 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1157 ioa_cfg->max_devs_supported);
1158 set_bit(res->target, ioa_cfg->target_ids);
1186 if (res->ioa_cfg->sis64) {
1227 * @ioa_cfg: ioa config struct
1235 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1241 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1260 if (res->ioa_cfg->sis64) {
1284 ipr_format_res_path(res->ioa_cfg,
1312 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1314 if (!ioa_cfg->sis64)
1318 clear_bit(res->target, ioa_cfg->array_ids);
1320 clear_bit(res->target, ioa_cfg->vset_ids);
1322 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1325 clear_bit(res->target, ioa_cfg->target_ids);
1328 clear_bit(res->target, ioa_cfg->target_ids);
1333 * @ioa_cfg: ioa config struct
1339 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1348 if (ioa_cfg->sis64) {
1356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1364 if (list_empty(&ioa_cfg->free_res_q)) {
1365 ipr_send_hcam(ioa_cfg,
1371 res = list_entry(ioa_cfg->free_res_q.next,
1376 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1385 schedule_work(&ioa_cfg->work_q);
1388 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1392 schedule_work(&ioa_cfg->work_q);
1395 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1410 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1420 dev_err(&ioa_cfg->pdev->dev,
1423 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1425 ipr_handle_config_change(ioa_cfg, hostrcb);
1533 * @ioa_cfg: ioa config struct
1539 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1544 if (ioa_cfg->sis64)
1569 * @ioa_cfg: ioa config struct
1575 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1601 * @ioa_cfg: ioa config struct
1607 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1625 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1641 * @ioa_cfg: ioa config struct
1647 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1684 * @ioa_cfg: ioa config struct
1690 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1708 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1731 * @ioa_cfg: ioa config struct
1737 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1751 ioa_cfg->host->host_no,
1772 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1773 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1782 * @ioa_cfg: ioa config struct
1788 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1802 ioa_cfg->host->host_no,
1822 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1823 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1837 * @ioa_cfg: ioa config struct
1844 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1851 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1865 * @ioa_cfg: ioa config struct
1871 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1876 if (ioa_cfg->sis64)
1887 ipr_log_hex_data(ioa_cfg, error->data,
1895 * @ioa_cfg: ioa config struct
1901 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1913 ipr_log_hex_data(ioa_cfg, error->data,
2014 ipr_format_res_path(hostrcb->ioa_cfg,
2022 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2164 ipr_format_res_path(hostrcb->ioa_cfg,
2174 ipr_format_res_path(hostrcb->ioa_cfg,
2182 * @ioa_cfg: ioa config struct
2188 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2214 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2219 * @ioa_cfg: ioa config struct
2225 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2240 ipr_format_res_path(ioa_cfg, error->last_res_path,
2262 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2265 ipr_format_res_path(ioa_cfg,
2275 * @ioa_cfg: ioa config struct
2281 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2308 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2313 * @ioa_cfg: ioa config struct
2319 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2328 ipr_log_hex_data(ioa_cfg, error->data,
2335 * @ioa_cfg: ioa config struct
2341 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2344 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2350 * @ioa_cfg: ioa config struct
2356 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2376 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2378 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2381 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2408 * @ioa_cfg: ioa config struct
2416 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2427 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2429 if (ioa_cfg->sis64)
2434 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2437 scsi_report_bus_reset(ioa_cfg->host,
2451 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2458 ioa_cfg->errors_logged++;
2460 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2467 ipr_log_cache_error(ioa_cfg, hostrcb);
2470 ipr_log_config_error(ioa_cfg, hostrcb);
2474 ipr_log_array_error(ioa_cfg, hostrcb);
2477 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2480 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2483 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2487 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2490 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2493 ipr_log_fabric_error(ioa_cfg, hostrcb);
2496 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2499 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2503 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2506 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2509 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2514 ipr_log_generic_error(ioa_cfg, hostrcb);
2549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2554 if (ioa_cfg->sis64)
2563 ipr_handle_log_data(ioa_cfg, hostrcb);
2565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568 dev_err(&ioa_cfg->pdev->dev,
2572 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2573 schedule_work(&ioa_cfg->work_q);
2574 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2576 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2596 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2598 ioa_cfg->errors_logged++;
2599 dev_err(&ioa_cfg->pdev->dev,
2602 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2603 ioa_cfg->sdt_state = GET_DUMP;
2605 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2606 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2626 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2631 ioa_cfg->errors_logged++;
2632 dev_err(&ioa_cfg->pdev->dev,
2635 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2636 ioa_cfg->sdt_state = GET_DUMP;
2638 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2640 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2683 * @ioa_cfg: ioa config struct
2693 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2700 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2718 * @ioa_cfg: ioa config struct
2726 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2733 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2751 * @ioa_cfg: ioa config struct
2759 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2766 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2767 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2776 * @ioa_cfg: ioa config struct
2784 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2791 if (ioa_cfg->sis64)
2792 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2797 ioa_cfg->regs.set_uproc_interrupt_reg32);
2800 if (ipr_wait_iodbg_ack(ioa_cfg,
2802 dev_err(&ioa_cfg->pdev->dev,
2809 ioa_cfg->regs.clr_interrupt_reg);
2812 writel(start_addr, ioa_cfg->ioa_mailbox);
2816 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2820 if (ipr_wait_iodbg_ack(ioa_cfg,
2822 dev_err(&ioa_cfg->pdev->dev,
2828 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2835 ioa_cfg->regs.clr_interrupt_reg);
2841 ioa_cfg->regs.set_uproc_interrupt_reg32);
2844 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2848 ioa_cfg->regs.clr_interrupt_reg);
2853 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2868 * @ioa_cfg: ioa config struct
2877 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2884 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2886 if (ioa_cfg->sis64)
2912 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2913 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2916 rc = ipr_get_ldump_data_section(ioa_cfg,
2921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2953 * @ioa_cfg: ioa config struct
2959 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2962 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2970 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2979 * @ioa_cfg: ioa config struct
2985 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3000 * @ioa_cfg: ioa config struct
3006 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3015 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3021 * @ioa_cfg: ioa config struct
3027 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3036 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3042 * @ioa_cfg: ioa config struct
3048 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3062 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3064 if (ioa_cfg->sdt_state != READ_DUMP) {
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3069 if (ioa_cfg->sis64) {
3070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3075 start_addr = readl(ioa_cfg->ioa_mailbox);
3077 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3078 dev_err(&ioa_cfg->pdev->dev,
3080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3096 ipr_dump_version_data(ioa_cfg, driver_dump);
3097 ipr_dump_location_data(ioa_cfg, driver_dump);
3098 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3099 ipr_dump_trace_data(ioa_cfg, driver_dump);
3116 if (ioa_cfg->sis64) {
3126 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3132 dev_err(&ioa_cfg->pdev->dev,
3136 ioa_cfg->sdt_state = DUMP_OBTAINED;
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 if (ioa_cfg->sis64)
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163 if (ioa_cfg->sis64)
3181 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3194 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3199 ioa_cfg->sdt_state = DUMP_OBTAINED;
3204 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3217 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3223 ioa_cfg->dump = NULL;
3224 ioa_cfg->sdt_state = INACTIVE;
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3240 struct ipr_ioa_cfg *ioa_cfg =
3246 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3251 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3256 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3262 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3275 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 scsi_add_device(ioa_cfg->host, bus, target, lun);
3283 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3288 ioa_cfg->scan_done = 1;
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3290 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3309 struct ipr_ioa_cfg *ioa_cfg =
3313 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3315 if (ioa_cfg->sdt_state == READ_DUMP) {
3316 dump = ioa_cfg->dump;
3318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323 ipr_get_ioa_dump(ioa_cfg, dump);
3326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3328 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3333 if (ioa_cfg->scsi_unblock) {
3334 ioa_cfg->scsi_unblock = 0;
3335 ioa_cfg->scsi_blocked = 0;
3336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337 scsi_unblock_requests(ioa_cfg->host);
3338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3339 if (ioa_cfg->scsi_blocked)
3340 scsi_block_requests(ioa_cfg->host);
3343 if (!ioa_cfg->scan_enabled) {
3344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3348 schedule_work(&ioa_cfg->scsi_add_work_q);
3350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3373 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3377 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3409 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3448 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3468 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3473 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3512 while (ioa_cfg->in_reset_reload) {
3513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3514 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3518 ioa_cfg->errors_logged = 0;
3519 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3521 if (ioa_cfg->in_reset_reload) {
3522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3523 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3528 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3533 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3561 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3566 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3591 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3599 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3601 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3602 spin_lock(&ioa_cfg->hrrq[i]._lock);
3603 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3604 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3607 ioa_cfg->reset_retries = 0;
3608 ioa_cfg->in_ioa_bringdown = 0;
3609 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3651 if (!ioa_cfg->in_reset_reload)
3652 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3654 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3681 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3686 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3707 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3712 if (!ioa_cfg->sis64) {
3713 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3720 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3724 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3725 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3729 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3730 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3731 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3735 ioa_cfg->iopoll_weight = user_iopoll_weight;
3736 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3737 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3738 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3739 ioa_cfg->iopoll_weight, ipr_iopoll);
3928 * @ioa_cfg: ioa config struct
3936 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942 while (ioa_cfg->in_reset_reload) {
3943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3944 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3948 if (ioa_cfg->ucode_sglist) {
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 dev_err(&ioa_cfg->pdev->dev,
3955 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3961 dev_err(&ioa_cfg->pdev->dev,
3966 ioa_cfg->ucode_sglist = sglist;
3967 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3969 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3971 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972 ioa_cfg->ucode_sglist = NULL;
3973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4012 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4013 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4024 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4032 dev_err(&ioa_cfg->pdev->dev,
4039 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4070 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4075 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4094 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4099 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4100 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4123 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4126 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4131 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4179 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4189 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4190 dump = ioa_cfg->dump;
4192 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4223 if (ioa_cfg->sis64)
4264 * @ioa_cfg: ioa config struct
4269 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4282 if (ioa_cfg->sis64)
4298 dump->ioa_cfg = ioa_cfg;
4300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4302 if (INACTIVE != ioa_cfg->sdt_state) {
4303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4309 ioa_cfg->dump = dump;
4310 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4312 ioa_cfg->dump_taken = 1;
4313 schedule_work(&ioa_cfg->work_q);
4315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4322 * @ioa_cfg: ioa config struct
4327 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4335 dump = ioa_cfg->dump;
4337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4341 ioa_cfg->dump = NULL;
4342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4368 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4375 rc = ipr_alloc_dump(ioa_cfg);
4377 rc = ipr_free_dump(ioa_cfg);
4397 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4426 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4460 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4468 if (res && ioa_cfg->sis64)
4473 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4507 if (res && ioa_cfg->sis64)
4512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4536 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4547 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4692 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4710 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4712 if (ioa_cfg->sis64) {
4715 clear_bit(starget->id, ioa_cfg->array_ids);
4717 clear_bit(starget->id, ioa_cfg->vset_ids);
4719 clear_bit(starget->id, ioa_cfg->target_ids);
4733 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4736 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4756 struct ipr_ioa_cfg *ioa_cfg;
4759 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4761 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4767 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4781 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4802 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4804 if (ioa_cfg->sis64)
4806 ipr_format_res_path(ioa_cfg,
4810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4828 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4895 * @ioa_cfg: ioa config struct
4902 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4916 for_each_hrrq(hrrq, ioa_cfg) {
4919 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4936 for_each_hrrq(hrrq, ioa_cfg) {
4939 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4951 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4964 struct ipr_ioa_cfg *ioa_cfg;
4969 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4972 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4973 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4974 dev_err(&ioa_cfg->pdev->dev,
4977 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4978 ioa_cfg->sdt_state = GET_DUMP;
4981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4982 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4983 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4987 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4999 * @ioa_cfg: ioa config struct
5010 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5019 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5023 if (ipr_cmd->ioa_cfg->sis64)
5051 struct ipr_ioa_cfg *ioa_cfg;
5056 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5064 if (ioa_cfg->in_reset_reload)
5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5072 rc = ipr_device_reset(ioa_cfg, res);
5083 struct ipr_ioa_cfg *ioa_cfg;
5086 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5097 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5113 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5117 if (!ioa_cfg->sis64)
5118 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5120 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5153 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5158 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5159 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5165 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5191 struct ipr_ioa_cfg *ioa_cfg;
5199 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5206 if (ioa_cfg->in_reset_reload ||
5207 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5217 readl(ioa_cfg->regs.sense_interrupt_reg);
5222 for_each_hrrq(hrrq, ioa_cfg) {
5225 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5226 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5238 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5278 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5282 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5284 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5301 struct ipr_ioa_cfg *ioa_cfg;
5305 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5312 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5319 * @ioa_cfg: ioa config struct
5325 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5331 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5338 if (ioa_cfg->sis64) {
5339 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5340 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5344 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5345 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5346 list_del(&ioa_cfg->reset_cmd->queue);
5347 del_timer(&ioa_cfg->reset_cmd->timer);
5348 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5358 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5359 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5361 list_del(&ioa_cfg->reset_cmd->queue);
5362 del_timer(&ioa_cfg->reset_cmd->timer);
5363 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5365 if (ioa_cfg->clear_isr) {
5367 dev_err(&ioa_cfg->pdev->dev,
5369 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5370 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5375 ioa_cfg->ioa_unit_checked = 1;
5377 dev_err(&ioa_cfg->pdev->dev,
5380 dev_err(&ioa_cfg->pdev->dev,
5383 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5384 ioa_cfg->sdt_state = GET_DUMP;
5386 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5387 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5395 * @ioa_cfg: ioa config struct
5402 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5404 ioa_cfg->errors_logged++;
5405 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5407 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5408 ioa_cfg->sdt_state = GET_DUMP;
5410 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5419 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5435 ipr_isr_eh(ioa_cfg,
5441 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5499 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5519 if (!ioa_cfg->clear_isr)
5526 ioa_cfg->regs.clr_interrupt_reg32);
5527 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5532 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5536 ipr_isr_eh(ioa_cfg,
5545 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5567 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5581 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5608 * @ioa_cfg: ioa config struct
5614 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5632 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5660 * @ioa_cfg: ioa config struct
5666 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5683 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5797 if (ipr_cmd->ioa_cfg->sis64)
5900 * @ioa_cfg: ioa config struct
5911 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5927 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5935 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5947 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5950 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5952 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6074 if (ipr_cmd->ioa_cfg->sis64)
6087 * @ioa_cfg: ioa config struct
6096 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6112 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6151 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6205 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6224 ipr_erp_start(ioa_cfg, ipr_cmd);
6226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6245 struct ipr_ioa_cfg *ioa_cfg;
6254 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6259 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6260 hrrq = &ioa_cfg->hrrq[hrrq_id];
6327 if (ioa_cfg->sis64)
6328 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6330 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6378 struct ipr_ioa_cfg *ioa_cfg;
6381 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6384 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6429 * @ioa_cfg: ioa cfg struct
6438 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6442 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6451 #define ipr_invalid_adapter(ioa_cfg) 0
6466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6470 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6472 ioa_cfg->scsi_unblock = 1;
6473 schedule_work(&ioa_cfg->work_q);
6476 ioa_cfg->in_reset_reload = 0;
6477 ioa_cfg->reset_retries = 0;
6478 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6479 spin_lock(&ioa_cfg->hrrq[i]._lock);
6480 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6481 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6486 wake_up_all(&ioa_cfg->reset_wait_q);
6505 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6510 ioa_cfg->in_reset_reload = 0;
6511 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6512 spin_lock(&ioa_cfg->hrrq[j]._lock);
6513 ioa_cfg->hrrq[j].allow_cmds = 1;
6514 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6517 ioa_cfg->reset_cmd = NULL;
6518 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6520 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6526 schedule_work(&ioa_cfg->work_q);
6529 list_del_init(&ioa_cfg->hostrcb[j]->queue);
6531 ipr_send_hcam(ioa_cfg,
6533 ioa_cfg->hostrcb[j]);
6535 ipr_send_hcam(ioa_cfg,
6537 ioa_cfg->hostrcb[j]);
6540 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6541 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6543 ioa_cfg->reset_retries = 0;
6545 wake_up_all(&ioa_cfg->reset_wait_q);
6547 ioa_cfg->scsi_unblock = 1;
6548 schedule_work(&ioa_cfg->work_q);
6583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6584 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6590 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6607 ioa_cfg->vpd_cbs_dma +
6615 if (!ioa_cfg->sis64)
6666 * @ioa_cfg: ioa config struct
6674 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6691 dev_err(&ioa_cfg->pdev->dev,
6702 * @ioa_cfg: ioa config struct
6711 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6717 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6718 ioa_cfg->bus_attr[i].bus_width);
6720 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6721 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6727 * @ioa_cfg: ioa config struct
6735 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6753 dev_err(&ioa_cfg->pdev->dev,
6759 bus_attr = &ioa_cfg->bus_attr[i];
6810 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6811 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6815 ipr_scsi_bus_speed_limit(ioa_cfg);
6816 ipr_check_term_power(ioa_cfg, mode_pages);
6817 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6822 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6826 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6871 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6874 dev_err(&ioa_cfg->pdev->dev,
6878 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6900 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6920 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6924 0x28, ioa_cfg->vpd_cbs_dma +
6948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6949 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6964 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7012 0x24, ioa_cfg->vpd_cbs_dma +
7039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7046 if (ioa_cfg->sis64)
7047 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7049 flag = ioa_cfg->u.cfg_table->hdr.flags;
7052 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7054 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7057 if (ioa_cfg->sis64)
7058 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7060 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7063 if (ioa_cfg->sis64)
7064 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7066 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7071 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7078 if (list_empty(&ioa_cfg->free_res_q)) {
7079 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7084 res = list_entry(ioa_cfg->free_res_q.next,
7086 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7100 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7106 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7109 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7132 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7133 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7137 ioa_cfg->dual_raid = 1;
7138 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7145 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7146 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7147 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7149 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7193 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7284 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7285 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7286 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7294 (ioa_cfg->vpd_cbs_dma
7317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7318 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7319 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7327 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7355 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7374 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7380 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7382 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7384 if (ipr_invalid_adapter(ioa_cfg)) {
7385 dev_err(&ioa_cfg->pdev->dev,
7389 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7390 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7392 &ioa_cfg->hrrq->hrrq_free_q);
7400 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7418 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7424 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7449 if (ioa_cfg->identify_hrrq_index == 0)
7450 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7452 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7453 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7459 if (ioa_cfg->sis64)
7462 if (ioa_cfg->nvectors == 1)
7482 ioa_cfg->identify_hrrq_index;
7484 if (ioa_cfg->sis64) {
7497 ioa_cfg->identify_hrrq_index;
7502 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7529 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7534 if (ioa_cfg->reset_cmd == ipr_cmd) {
7539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7570 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7571 * @ioa_cfg: ioa cfg struct
7576 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7580 for_each_hrrq(hrrq, ioa_cfg) {
7593 ioa_cfg->identify_hrrq_index = 0;
7594 if (ioa_cfg->hrrq_num == 1)
7595 atomic_set(&ioa_cfg->hrrq_index, 0);
7597 atomic_set(&ioa_cfg->hrrq_index, 1);
7600 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7633 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7634 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7635 stage_time = ioa_cfg->transop_timeout;
7638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7643 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7644 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7678 ipr_init_ioa_mem(ioa_cfg);
7680 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7681 spin_lock(&ioa_cfg->hrrq[i]._lock);
7682 ioa_cfg->hrrq[i].allow_interrupts = 1;
7683 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7685 if (ioa_cfg->sis64) {
7687 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7688 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7691 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7695 ioa_cfg->regs.clr_interrupt_mask_reg32);
7696 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7701 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7703 if (ioa_cfg->sis64) {
7706 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7708 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7710 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7712 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7714 if (ioa_cfg->sis64) {
7719 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7743 if (ioa_cfg->sdt_state == GET_DUMP)
7744 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7745 else if (ioa_cfg->sdt_state == READ_DUMP)
7746 ioa_cfg->sdt_state = ABORT_DUMP;
7748 ioa_cfg->dump_timeout = 1;
7756 * @ioa_cfg: ioa config struct
7764 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7766 ioa_cfg->errors_logged++;
7767 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7772 * @ioa_cfg: ioa config struct
7780 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7788 mailbox = readl(ioa_cfg->ioa_mailbox);
7790 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7791 ipr_unit_check_no_data(ioa_cfg);
7796 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7802 ipr_unit_check_no_data(ioa_cfg);
7814 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7819 rc = ipr_get_ldump_data_section(ioa_cfg,
7825 ipr_handle_log_data(ioa_cfg, hostrcb);
7828 ioa_cfg->sdt_state == GET_DUMP)
7829 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7831 ipr_unit_check_no_data(ioa_cfg);
7833 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7847 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7850 ioa_cfg->ioa_unit_checked = 0;
7851 ipr_get_unit_check_buffer(ioa_cfg);
7861 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7865 if (ioa_cfg->sdt_state != GET_DUMP)
7868 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
7869 (readl(ioa_cfg->regs.sense_interrupt_reg) &
7873 dev_err(&ioa_cfg->pdev->dev,
7876 ioa_cfg->sdt_state = READ_DUMP;
7877 ioa_cfg->dump_timeout = 0;
7878 if (ioa_cfg->sis64)
7883 schedule_work(&ioa_cfg->work_q);
7908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7911 ioa_cfg->pdev->state_saved = true;
7912 pci_restore_state(ioa_cfg->pdev);
7914 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7919 ipr_fail_all_ops(ioa_cfg);
7921 if (ioa_cfg->sis64) {
7923 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7924 readl(ioa_cfg->regs.endian_swap_reg);
7927 if (ioa_cfg->ioa_unit_checked) {
7928 if (ioa_cfg->sis64) {
7933 ioa_cfg->ioa_unit_checked = 0;
7934 ipr_get_unit_check_buffer(ioa_cfg);
7941 if (ioa_cfg->in_ioa_bringdown) {
7943 } else if (ioa_cfg->sdt_state == GET_DUMP) {
7965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7968 if (ioa_cfg->cfg_locked)
7969 pci_cfg_access_unlock(ioa_cfg->pdev);
7970 ioa_cfg->cfg_locked = 0;
7987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7991 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7993 ioa_cfg->regs.set_uproc_interrupt_reg32);
7995 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8002 if (ioa_cfg->cfg_locked)
8003 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8004 ioa_cfg->cfg_locked = 0;
8041 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8042 struct pci_dev *pdev = ioa_cfg->pdev;
8050 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8051 if (ioa_cfg->reset_cmd == ipr_cmd)
8053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8068 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8072 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8089 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8092 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8093 ioa_cfg->cfg_locked = 1;
8094 ipr_cmd->job_step = ioa_cfg->reset;
8102 ipr_cmd->job_step = ioa_cfg->reset;
8103 dev_err(&ioa_cfg->pdev->dev,
8122 ipr_cmd->ioa_cfg->cfg_locked = 0;
8130 * @ioa_cfg: ioa config struct
8135 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8139 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8160 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8163 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8188 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8193 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8196 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8197 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8225 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8251 for_each_hrrq(hrrq, ioa_cfg) {
8255 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8285 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8291 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8338 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8340 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8359 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8360 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8376 if (ioa_cfg->sis64)
8402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8411 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8421 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8449 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8454 if (ioa_cfg->reset_cmd != ipr_cmd) {
8478 * @ioa_cfg: ioa config struct
8490 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8497 ioa_cfg->in_reset_reload = 1;
8498 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8499 spin_lock(&ioa_cfg->hrrq[i]._lock);
8500 ioa_cfg->hrrq[i].allow_cmds = 0;
8501 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8504 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8505 ioa_cfg->scsi_unblock = 0;
8506 ioa_cfg->scsi_blocked = 1;
8507 scsi_block_requests(ioa_cfg->host);
8510 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8511 ioa_cfg->reset_cmd = ipr_cmd;
8520 * @ioa_cfg: ioa config struct
8530 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8535 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8538 if (ioa_cfg->in_reset_reload) {
8539 if (ioa_cfg->sdt_state == GET_DUMP)
8540 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8541 else if (ioa_cfg->sdt_state == READ_DUMP)
8542 ioa_cfg->sdt_state = ABORT_DUMP;
8545 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8546 dev_err(&ioa_cfg->pdev->dev,
8549 ioa_cfg->reset_retries = 0;
8550 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8551 spin_lock(&ioa_cfg->hrrq[i]._lock);
8552 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8553 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8557 if (ioa_cfg->in_ioa_bringdown) {
8558 ioa_cfg->reset_cmd = NULL;
8559 ioa_cfg->in_reset_reload = 0;
8560 ipr_fail_all_ops(ioa_cfg);
8561 wake_up_all(&ioa_cfg->reset_wait_q);
8563 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8564 ioa_cfg->scsi_unblock = 1;
8565 schedule_work(&ioa_cfg->work_q);
8569 ioa_cfg->in_ioa_bringdown = 1;
8574 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8588 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8592 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8593 spin_lock(&ioa_cfg->hrrq[i]._lock);
8594 ioa_cfg->hrrq[i].allow_interrupts = 0;
8595 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8613 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8615 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8616 if (!ioa_cfg->probe_done)
8618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8633 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8635 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8636 if (ioa_cfg->probe_done)
8637 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8638 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8652 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8654 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8655 if (ioa_cfg->probe_done) {
8656 if (ioa_cfg->needs_warm_reset)
8657 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8659 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8662 wake_up_all(&ioa_cfg->eeh_wait_q);
8663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8677 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8680 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8681 if (ioa_cfg->probe_done) {
8682 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8683 ioa_cfg->sdt_state = ABORT_DUMP;
8684 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8685 ioa_cfg->in_ioa_bringdown = 1;
8686 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8687 spin_lock(&ioa_cfg->hrrq[i]._lock);
8688 ioa_cfg->hrrq[i].allow_cmds = 0;
8689 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8692 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8694 wake_up_all(&ioa_cfg->eeh_wait_q);
8695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8726 * @ioa_cfg: ioa cfg struct
8734 static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8740 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8741 ioa_cfg->probe_done = 1;
8742 if (ioa_cfg->needs_hard_reset) {
8743 ioa_cfg->needs_hard_reset = 0;
8744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8746 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8755 * @ioa_cfg: ioa config struct
8760 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8764 if (ioa_cfg->ipr_cmnd_list) {
8766 if (ioa_cfg->ipr_cmnd_list[i])
8767 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8768 ioa_cfg->ipr_cmnd_list[i],
8769 ioa_cfg->ipr_cmnd_list_dma[i]);
8771 ioa_cfg->ipr_cmnd_list[i] = NULL;
8775 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8777 kfree(ioa_cfg->ipr_cmnd_list);
8778 kfree(ioa_cfg->ipr_cmnd_list_dma);
8779 ioa_cfg->ipr_cmnd_list = NULL;
8780 ioa_cfg->ipr_cmnd_list_dma = NULL;
8781 ioa_cfg->ipr_cmd_pool = NULL;
8786 * @ioa_cfg: ioa cfg struct
8791 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8795 kfree(ioa_cfg->res_entries);
8796 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8797 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8798 ipr_free_cmd_blks(ioa_cfg);
8800 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8801 dma_free_coherent(&ioa_cfg->pdev->dev,
8802 sizeof(u32) * ioa_cfg->hrrq[i].size,
8803 ioa_cfg->hrrq[i].host_rrq,
8804 ioa_cfg->hrrq[i].host_rrq_dma);
8806 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8807 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8810 dma_free_coherent(&ioa_cfg->pdev->dev,
8812 ioa_cfg->hostrcb[i],
8813 ioa_cfg->hostrcb_dma[i]);
8816 ipr_free_dump(ioa_cfg);
8817 kfree(ioa_cfg->trace);
8822 * @ioa_cfg: ipr cfg struct
8830 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
8832 struct pci_dev *pdev = ioa_cfg->pdev;
8835 for (i = 0; i < ioa_cfg->nvectors; i++)
8836 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8842 * @ioa_cfg: ioa config struct
8850 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8852 struct pci_dev *pdev = ioa_cfg->pdev;
8855 ipr_free_irqs(ioa_cfg);
8856 if (ioa_cfg->reset_work_q)
8857 destroy_workqueue(ioa_cfg->reset_work_q);
8858 iounmap(ioa_cfg->hdw_dma_regs);
8860 ipr_free_mem(ioa_cfg);
8861 scsi_host_put(ioa_cfg->host);
8868 * @ioa_cfg: ioa config struct
8873 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8880 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8883 if (!ioa_cfg->ipr_cmd_pool)
8886 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8887 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8889 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8890 ipr_free_cmd_blks(ioa_cfg);
8894 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8895 if (ioa_cfg->hrrq_num > 1) {
8898 ioa_cfg->hrrq[i].min_cmd_id = 0;
8899 ioa_cfg->hrrq[i].max_cmd_id =
8904 (ioa_cfg->hrrq_num - 1);
8905 ioa_cfg->hrrq[i].min_cmd_id =
8908 ioa_cfg->hrrq[i].max_cmd_id =
8914 ioa_cfg->hrrq[i].min_cmd_id = 0;
8915 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8917 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8920 BUG_ON(ioa_cfg->hrrq_num == 0);
8923 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8925 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8926 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8930 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
8934 ipr_free_cmd_blks(ioa_cfg);
8938 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8939 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8943 if (ioa_cfg->sis64)
8949 if (ioa_cfg->sis64) {
8963 ipr_cmd->ioa_cfg = ioa_cfg;
8968 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8970 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8979 * @ioa_cfg: ioa config struct
8984 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8986 struct pci_dev *pdev = ioa_cfg->pdev;
8990 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
8994 if (!ioa_cfg->res_entries)
8997 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8998 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8999 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9002 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9004 &ioa_cfg->vpd_cbs_dma,
9007 if (!ioa_cfg->vpd_cbs)
9010 if (ipr_alloc_cmd_blks(ioa_cfg))
9013 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9014 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9015 sizeof(u32) * ioa_cfg->hrrq[i].size,
9016 &ioa_cfg->hrrq[i].host_rrq_dma,
9019 if (!ioa_cfg->hrrq[i].host_rrq) {
9022 sizeof(u32) * ioa_cfg->hrrq[i].size,
9023 ioa_cfg->hrrq[i].host_rrq,
9024 ioa_cfg->hrrq[i].host_rrq_dma);
9027 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9030 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9031 ioa_cfg->cfg_table_size,
9032 &ioa_cfg->cfg_table_dma,
9035 if (!ioa_cfg->u.cfg_table)
9039 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9041 &ioa_cfg->hostrcb_dma[i],
9044 if (!ioa_cfg->hostrcb[i])
9047 ioa_cfg->hostrcb[i]->hostrcb_dma =
9048 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9049 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9050 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9053 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9057 if (!ioa_cfg->trace)
9068 ioa_cfg->hostrcb[i],
9069 ioa_cfg->hostrcb_dma[i]);
9071 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9072 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9076 sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 ioa_cfg->hrrq[i].host_rrq,
9078 ioa_cfg->hrrq[i].host_rrq_dma);
9081 ipr_free_cmd_blks(ioa_cfg);
9084 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9086 kfree(ioa_cfg->res_entries);
9092 * @ioa_cfg: ioa config struct
9097 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9102 ioa_cfg->bus_attr[i].bus = i;
9103 ioa_cfg->bus_attr[i].qas_enabled = 0;
9104 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9106 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9108 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9114 * @ioa_cfg: ioa config struct
9119 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9125 p = &ioa_cfg->chip_cfg->regs;
9126 t = &ioa_cfg->regs;
9127 base = ioa_cfg->hdw_dma_regs;
9146 if (ioa_cfg->sis64) {
9156 * @ioa_cfg: ioa config struct
9163 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9168 ioa_cfg->host = host;
9169 ioa_cfg->pdev = pdev;
9170 ioa_cfg->log_level = ipr_log_level;
9171 ioa_cfg->doorbell = IPR_DOORBELL;
9172 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9173 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9174 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9175 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9176 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9177 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9179 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9180 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9181 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9182 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9183 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9184 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9185 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9186 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9187 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9188 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9189 ioa_cfg->sdt_state = INACTIVE;
9191 ipr_initialize_bus_attr(ioa_cfg);
9192 ioa_cfg->max_devs_supported = ipr_max_devs;
9194 if (ioa_cfg->sis64) {
9199 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9200 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9202 * ioa_cfg->max_devs_supported)));
9208 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9209 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9211 * ioa_cfg->max_devs_supported)));
9216 host->can_queue = ioa_cfg->max_cmds;
9217 pci_set_drvdata(pdev, ioa_cfg);
9219 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9220 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9221 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9222 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9224 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9226 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9252 * @ioa_cfg: ioa config struct
9257 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9259 struct pci_dev *pdev = ioa_cfg->pdev;
9262 wait_event_timeout(ioa_cfg->eeh_wait_q,
9269 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9271 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9273 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9274 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9275 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9276 ioa_cfg->vectors_info[vec_idx].
9277 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9281 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9286 for (i = 1; i < ioa_cfg->nvectors; i++) {
9290 ioa_cfg->vectors_info[i].desc,
9291 &ioa_cfg->hrrq[i]);
9295 &ioa_cfg->hrrq[i]);
9315 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9318 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9319 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9321 ioa_cfg->msi_received = 1;
9322 wake_up(&ioa_cfg->msi_wait_q);
9324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9330 * @ioa_cfg: ioa config struct
9340 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9349 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9350 ioa_cfg->msi_received = 0;
9351 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9352 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9353 readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9356 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9363 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9364 readl(ioa_cfg->regs.sense_interrupt_reg);
9365 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9367 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9369 if (!ioa_cfg->msi_received) {
9376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9378 free_irq(irq, ioa_cfg);
9395 struct ipr_ioa_cfg *ioa_cfg;
9407 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9415 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9416 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9418 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9420 if (!ioa_cfg->ipr_chip) {
9427 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9428 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9429 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9430 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9433 ioa_cfg->transop_timeout = ipr_transop_timeout;
9435 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9437 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9439 ioa_cfg->revid = pdev->revision;
9441 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9456 ipr_wait_for_pci_err_recovery(ioa_cfg);
9462 ipr_wait_for_pci_err_recovery(ioa_cfg);
9476 ioa_cfg->hdw_dma_regs = ipr_regs;
9477 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9478 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9480 ipr_init_regs(ioa_cfg);
9482 if (ioa_cfg->sis64) {
9498 ioa_cfg->chip_cfg->cache_line_size);
9502 ipr_wait_for_pci_err_recovery(ioa_cfg);
9508 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9509 ipr_wait_for_pci_err_recovery(ioa_cfg);
9518 if (ioa_cfg->ipr_chip->has_msi)
9522 ipr_wait_for_pci_err_recovery(ioa_cfg);
9525 ioa_cfg->nvectors = rc;
9528 ioa_cfg->clear_isr = 1;
9533 ipr_wait_for_pci_err_recovery(ioa_cfg);
9542 rc = ipr_test_msi(ioa_cfg, pdev);
9546 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
9550 ipr_wait_for_pci_err_recovery(ioa_cfg);
9553 ioa_cfg->nvectors = 1;
9554 ioa_cfg->clear_isr = 1;
9561 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9565 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9568 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9571 rc = ipr_alloc_mem(ioa_cfg);
9591 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9592 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9593 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9595 ioa_cfg->needs_hard_reset = 1;
9597 ioa_cfg->needs_hard_reset = 1;
9599 ioa_cfg->ioa_unit_checked = 1;
9601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9602 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9606 name_msi_vectors(ioa_cfg);
9608 ioa_cfg->vectors_info[0].desc,
9609 &ioa_cfg->hrrq[0]);
9611 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
9615 IPR_NAME, &ioa_cfg->hrrq[0]);
9624 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9625 ioa_cfg->needs_warm_reset = 1;
9626 ioa_cfg->reset = ipr_reset_slot_reset;
9628 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9631 if (!ioa_cfg->reset_work_q) {
9637 ioa_cfg->reset = ipr_reset_start_bist;
9640 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9648 ipr_free_irqs(ioa_cfg);
9650 ipr_free_mem(ioa_cfg);
9652 ipr_wait_for_pci_err_recovery(ioa_cfg);
9667 * @ioa_cfg: ioa config struct
9679 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9683 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9684 ioa_cfg->sdt_state = ABORT_DUMP;
9685 ioa_cfg->reset_retries = 0;
9686 ioa_cfg->in_ioa_bringdown = 1;
9687 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9703 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9708 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9709 while (ioa_cfg->in_reset_reload) {
9710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9711 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9712 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9715 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9716 spin_lock(&ioa_cfg->hrrq[i]._lock);
9717 ioa_cfg->hrrq[i].removing_ioa = 1;
9718 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9721 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9725 flush_work(&ioa_cfg->work_q);
9726 if (ioa_cfg->reset_work_q)
9727 flush_workqueue(ioa_cfg->reset_work_q);
9728 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9729 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9732 list_del(&ioa_cfg->queue);
9735 if (ioa_cfg->sdt_state == ABORT_DUMP)
9736 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9739 ipr_free_all_resources(ioa_cfg);
9755 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9759 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9761 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9763 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9765 scsi_remove_host(ioa_cfg->host);
9782 struct ipr_ioa_cfg *ioa_cfg;
9791 ioa_cfg = pci_get_drvdata(pdev);
9792 ipr_probe_ioa_part2(ioa_cfg);
9794 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9801 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9805 scsi_remove_host(ioa_cfg->host);
9810 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
9814 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9816 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9818 scsi_remove_host(ioa_cfg->host);
9823 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9827 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9829 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9831 scsi_remove_host(ioa_cfg->host);
9835 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9836 ioa_cfg->scan_enabled = 1;
9837 schedule_work(&ioa_cfg->work_q);
9838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9840 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9842 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9843 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9844 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9845 ioa_cfg->iopoll_weight, ipr_iopoll);
9849 scsi_scan_host(ioa_cfg->host);
9866 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9872 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9873 ioa_cfg->iopoll_weight = 0;
9874 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9875 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
9878 while (ioa_cfg->in_reset_reload) {
9879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9880 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9881 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9884 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
9887 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
9888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9889 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9890 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
9891 ipr_free_irqs(ioa_cfg);
9892 pci_disable_device(ioa_cfg->pdev);
10049 struct ipr_ioa_cfg *ioa_cfg;
10057 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10058 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10059 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10060 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10065 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);