Lines Matching refs:ioa_cfg
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
594 trace_entry = &ioa_cfg->trace[trace_index];
598 if (ipr_cmd->ioa_cfg->sis64)
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
651 if (ipr_cmd->ioa_cfg->sis64) {
713 * @ioa_cfg: ioa config struct
719 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
729 * @ioa_cfg: ioa config struct
738 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
744 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
745 spin_lock(&ioa_cfg->hrrq[i]._lock);
746 ioa_cfg->hrrq[i].allow_interrupts = 0;
747 spin_unlock(&ioa_cfg->hrrq[i]._lock);
751 if (ioa_cfg->sis64)
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
757 if (ioa_cfg->sis64)
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
760 readl(ioa_cfg->regs.sense_interrupt_reg);
765 * @ioa_cfg: ioa config struct
770 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
789 * @ioa_cfg: ioa config struct
794 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
897 * @ioa_cfg: ioa config struct
904 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
910 for_each_hrrq(hrrq, ioa_cfg) {
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
952 if (ioa_cfg->sis64) {
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
1035 if (ipr_cmd->ioa_cfg->sis64) {
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1077 spin_unlock_irq(ioa_cfg->host->host_lock);
1079 spin_lock_irq(ioa_cfg->host->host_lock);
1082 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1086 if (ioa_cfg->hrrq_num == 1)
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1097 * @ioa_cfg: ioa config struct
1108 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1195 if (ioa_cfg->sis64) {
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220 ioa_cfg->max_devs_supported);
1221 set_bit(res->target, ioa_cfg->target_ids);
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229 ioa_cfg->max_devs_supported);
1230 set_bit(res->target, ioa_cfg->array_ids);
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->vset_ids);
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238 ioa_cfg->max_devs_supported);
1239 set_bit(res->target, ioa_cfg->target_ids);
1270 if (res->ioa_cfg->sis64) {
1311 * @ioa_cfg: ioa config struct
1319 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1345 if (res->ioa_cfg->sis64) {
1370 ipr_format_res_path(res->ioa_cfg,
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1403 if (!ioa_cfg->sis64)
1407 clear_bit(res->target, ioa_cfg->array_ids);
1409 clear_bit(res->target, ioa_cfg->vset_ids);
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1414 clear_bit(res->target, ioa_cfg->target_ids);
1417 clear_bit(res->target, ioa_cfg->target_ids);
1422 * @ioa_cfg: ioa config struct
1428 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1437 if (ioa_cfg->sis64) {
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1453 if (list_empty(&ioa_cfg->free_res_q)) {
1454 ipr_send_hcam(ioa_cfg,
1460 res = list_entry(ioa_cfg->free_res_q.next,
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1474 schedule_work(&ioa_cfg->work_q);
1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1481 schedule_work(&ioa_cfg->work_q);
1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1509 dev_err(&ioa_cfg->pdev->dev,
1512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1514 ipr_handle_config_change(ioa_cfg, hostrcb);
1622 * @ioa_cfg: ioa config struct
1628 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1633 if (ioa_cfg->sis64)
1658 * @ioa_cfg: ioa config struct
1664 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1690 * @ioa_cfg: ioa config struct
1696 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1714 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 * @ioa_cfg: ioa config struct
1736 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1773 * @ioa_cfg: ioa config struct
1779 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1797 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1820 * @ioa_cfg: ioa config struct
1826 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1840 ioa_cfg->host->host_no,
1861 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1862 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1871 * @ioa_cfg: ioa config struct
1877 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1891 ioa_cfg->host->host_no,
1911 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1912 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1926 * @ioa_cfg: ioa config struct
1933 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1940 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1954 * @ioa_cfg: ioa config struct
1960 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1965 if (ioa_cfg->sis64)
1976 ipr_log_hex_data(ioa_cfg, error->data,
1984 * @ioa_cfg: ioa config struct
1990 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2002 ipr_log_hex_data(ioa_cfg, error->data,
2103 ipr_format_res_path(hostrcb->ioa_cfg,
2111 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2253 ipr_format_res_path(hostrcb->ioa_cfg,
2263 ipr_format_res_path(hostrcb->ioa_cfg,
2271 * @ioa_cfg: ioa config struct
2277 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2303 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2308 * @ioa_cfg: ioa config struct
2314 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329 ipr_format_res_path(ioa_cfg, error->last_res_path,
2351 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2354 ipr_format_res_path(ioa_cfg,
2364 * @ioa_cfg: ioa config struct
2370 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2397 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2402 * @ioa_cfg: ioa config struct
2408 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2417 ipr_log_hex_data(ioa_cfg, error->data,
2424 * @ioa_cfg: ioa config struct
2430 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2433 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2439 * @ioa_cfg: ioa config struct
2445 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2465 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2467 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2470 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2497 * @ioa_cfg: ioa config struct
2505 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2516 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2518 if (ioa_cfg->sis64)
2523 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2526 scsi_report_bus_reset(ioa_cfg->host,
2540 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2547 ioa_cfg->errors_logged++;
2549 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2556 ipr_log_cache_error(ioa_cfg, hostrcb);
2559 ipr_log_config_error(ioa_cfg, hostrcb);
2563 ipr_log_array_error(ioa_cfg, hostrcb);
2566 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2569 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2572 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2576 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2579 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2582 ipr_log_fabric_error(ioa_cfg, hostrcb);
2585 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2588 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2592 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2595 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2598 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2603 ipr_log_generic_error(ioa_cfg, hostrcb);
2638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2643 if (ioa_cfg->sis64)
2652 ipr_handle_log_data(ioa_cfg, hostrcb);
2654 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2657 dev_err(&ioa_cfg->pdev->dev,
2661 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2662 schedule_work(&ioa_cfg->work_q);
2663 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2665 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687 ioa_cfg->errors_logged++;
2688 dev_err(&ioa_cfg->pdev->dev,
2691 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2692 ioa_cfg->sdt_state = GET_DUMP;
2694 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2695 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2715 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2718 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2720 ioa_cfg->errors_logged++;
2721 dev_err(&ioa_cfg->pdev->dev,
2724 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2725 ioa_cfg->sdt_state = GET_DUMP;
2727 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2729 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2730 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2772 * @ioa_cfg: ioa config struct
2782 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2807 * @ioa_cfg: ioa config struct
2815 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2822 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2840 * @ioa_cfg: ioa config struct
2848 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2855 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2856 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2865 * @ioa_cfg: ioa config struct
2873 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2880 if (ioa_cfg->sis64)
2881 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2886 ioa_cfg->regs.set_uproc_interrupt_reg32);
2889 if (ipr_wait_iodbg_ack(ioa_cfg,
2891 dev_err(&ioa_cfg->pdev->dev,
2898 ioa_cfg->regs.clr_interrupt_reg);
2901 writel(start_addr, ioa_cfg->ioa_mailbox);
2905 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2909 if (ipr_wait_iodbg_ack(ioa_cfg,
2911 dev_err(&ioa_cfg->pdev->dev,
2917 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2924 ioa_cfg->regs.clr_interrupt_reg);
2930 ioa_cfg->regs.set_uproc_interrupt_reg32);
2933 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2937 ioa_cfg->regs.clr_interrupt_reg);
2942 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2957 * @ioa_cfg: ioa config struct
2966 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2973 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2975 if (ioa_cfg->sis64)
3001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3002 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3005 rc = ipr_get_ldump_data_section(ioa_cfg,
3010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3042 * @ioa_cfg: ioa config struct
3048 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3051 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3059 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3068 * @ioa_cfg: ioa config struct
3074 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089 * @ioa_cfg: ioa config struct
3095 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3104 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3110 * @ioa_cfg: ioa config struct
3116 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3125 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3131 * @ioa_cfg: ioa config struct
3137 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153 if (ioa_cfg->sdt_state != READ_DUMP) {
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158 if (ioa_cfg->sis64) {
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3164 start_addr = readl(ioa_cfg->ioa_mailbox);
3166 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3167 dev_err(&ioa_cfg->pdev->dev,
3169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3185 ipr_dump_version_data(ioa_cfg, driver_dump);
3186 ipr_dump_location_data(ioa_cfg, driver_dump);
3187 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3188 ipr_dump_trace_data(ioa_cfg, driver_dump);
3205 if (ioa_cfg->sis64) {
3215 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3221 dev_err(&ioa_cfg->pdev->dev,
3225 ioa_cfg->sdt_state = DUMP_OBTAINED;
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237 if (ioa_cfg->sis64)
3242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3252 if (ioa_cfg->sis64)
3270 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3283 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3288 ioa_cfg->sdt_state = DUMP_OBTAINED;
3293 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3306 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312 ioa_cfg->dump = NULL;
3313 ioa_cfg->sdt_state = INACTIVE;
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 struct ipr_ioa_cfg *ioa_cfg =
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3351 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3357 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371 scsi_add_device(ioa_cfg->host, bus, target, lun);
3372 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3377 ioa_cfg->scan_done = 1;
3378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3379 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3398 struct ipr_ioa_cfg *ioa_cfg =
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404 if (ioa_cfg->sdt_state == READ_DUMP) {
3405 dump = ioa_cfg->dump;
3407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412 ipr_get_ioa_dump(ioa_cfg, dump);
3415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3417 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 if (ioa_cfg->scsi_unblock) {
3423 ioa_cfg->scsi_unblock = 0;
3424 ioa_cfg->scsi_blocked = 0;
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426 scsi_unblock_requests(ioa_cfg->host);
3427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428 if (ioa_cfg->scsi_blocked)
3429 scsi_block_requests(ioa_cfg->host);
3432 if (!ioa_cfg->scan_enabled) {
3433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3437 schedule_work(&ioa_cfg->scsi_add_work_q);
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3467 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3497 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3498 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3502 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3537 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3557 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3560 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3561 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3593 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3601 while (ioa_cfg->in_reset_reload) {
3602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3603 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3607 ioa_cfg->errors_logged = 0;
3608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610 if (ioa_cfg->in_reset_reload) {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3622 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3624 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3687 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3688 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3690 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3691 spin_lock(&ioa_cfg->hrrq[i]._lock);
3692 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3693 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3696 ioa_cfg->reset_retries = 0;
3697 ioa_cfg->in_ioa_bringdown = 0;
3698 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3701 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3732 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3739 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3740 if (!ioa_cfg->in_reset_reload)
3741 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3743 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3770 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3775 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3796 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3801 if (!ioa_cfg->sis64) {
3802 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3809 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3813 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3814 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3818 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3819 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3820 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3824 ioa_cfg->iopoll_weight = user_iopoll_weight;
3825 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3826 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3827 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3828 ioa_cfg->iopoll_weight, ipr_iopoll);
4022 * @ioa_cfg: ioa config struct
4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4036 while (ioa_cfg->in_reset_reload) {
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4042 if (ioa_cfg->ucode_sglist) {
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044 dev_err(&ioa_cfg->pdev->dev,
4049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055 dev_err(&ioa_cfg->pdev->dev,
4060 ioa_cfg->ucode_sglist = sglist;
4061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066 ioa_cfg->ucode_sglist = NULL;
4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4106 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4107 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4118 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4126 dev_err(&ioa_cfg->pdev->dev,
4133 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4164 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4169 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4188 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4194 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4217 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4271 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4282 dump = ioa_cfg->dump;
4284 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4315 if (ioa_cfg->sis64)
4356 * @ioa_cfg: ioa config struct
4361 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4374 if (ioa_cfg->sis64)
4390 dump->ioa_cfg = ioa_cfg;
4392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4394 if (INACTIVE != ioa_cfg->sdt_state) {
4395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4401 ioa_cfg->dump = dump;
4402 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4403 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4404 ioa_cfg->dump_taken = 1;
4405 schedule_work(&ioa_cfg->work_q);
4407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4414 * @ioa_cfg: ioa config struct
4419 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4427 dump = ioa_cfg->dump;
4429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4433 ioa_cfg->dump = NULL;
4434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4460 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4467 rc = ipr_alloc_dump(ioa_cfg);
4469 rc = ipr_free_dump(ioa_cfg);
4489 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4534 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4571 if (res && ioa_cfg->sis64)
4576 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4610 if (res && ioa_cfg->sis64)
4615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4675 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4705 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4710 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4790 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4793 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4818 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4829 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4836 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4837 sata_port->ioa_cfg = ioa_cfg;
4849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4866 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4868 if (ioa_cfg->sis64) {
4871 clear_bit(starget->id, ioa_cfg->array_ids);
4873 clear_bit(starget->id, ioa_cfg->vset_ids);
4875 clear_bit(starget->id, ioa_cfg->target_ids);
4895 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4898 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4918 struct ipr_ioa_cfg *ioa_cfg;
4921 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4946 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4977 if (ioa_cfg->sis64)
4979 ipr_format_res_path(ioa_cfg,
4983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5032 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5114 * @ioa_cfg: ioa config struct
5121 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5135 for_each_hrrq(hrrq, ioa_cfg) {
5138 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5155 for_each_hrrq(hrrq, ioa_cfg) {
5158 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5170 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5183 struct ipr_ioa_cfg *ioa_cfg;
5188 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5189 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5191 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5192 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5193 dev_err(&ioa_cfg->pdev->dev,
5196 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5197 ioa_cfg->sdt_state = GET_DUMP;
5200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5201 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5206 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5218 * @ioa_cfg: ioa config struct
5230 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5240 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5244 if (ipr_cmd->ioa_cfg->sis64) {
5263 if (ipr_cmd->ioa_cfg->sis64)
5290 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5296 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297 while (ioa_cfg->in_reset_reload) {
5298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5299 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5305 rc = ipr_device_reset(ioa_cfg, res);
5307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5309 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5312 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5315 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5338 struct ipr_ioa_cfg *ioa_cfg;
5345 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5353 if (ioa_cfg->in_reset_reload)
5355 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5358 for_each_hrrq(hrrq, ioa_cfg) {
5361 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5387 rc = ipr_device_reset(ioa_cfg, res);
5398 struct ipr_ioa_cfg *ioa_cfg;
5401 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5413 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5415 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5436 if (!ioa_cfg->sis64)
5437 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5439 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5472 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5478 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5484 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5510 struct ipr_ioa_cfg *ioa_cfg;
5518 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5525 if (ioa_cfg->in_reset_reload ||
5526 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5536 readl(ioa_cfg->regs.sense_interrupt_reg);
5541 for_each_hrrq(hrrq, ioa_cfg) {
5544 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5545 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5557 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5601 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5603 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5620 struct ipr_ioa_cfg *ioa_cfg;
5624 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5631 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5638 * @ioa_cfg: ioa config struct
5644 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5650 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5657 if (ioa_cfg->sis64) {
5658 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5659 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5663 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5664 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5665 list_del(&ioa_cfg->reset_cmd->queue);
5666 del_timer(&ioa_cfg->reset_cmd->timer);
5667 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5677 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5678 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5680 list_del(&ioa_cfg->reset_cmd->queue);
5681 del_timer(&ioa_cfg->reset_cmd->timer);
5682 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5684 if (ioa_cfg->clear_isr) {
5686 dev_err(&ioa_cfg->pdev->dev,
5688 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5689 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5694 ioa_cfg->ioa_unit_checked = 1;
5696 dev_err(&ioa_cfg->pdev->dev,
5699 dev_err(&ioa_cfg->pdev->dev,
5702 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5703 ioa_cfg->sdt_state = GET_DUMP;
5705 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5706 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5714 * @ioa_cfg: ioa config struct
5721 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5723 ioa_cfg->errors_logged++;
5724 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5726 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5727 ioa_cfg->sdt_state = GET_DUMP;
5729 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5738 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5754 ipr_isr_eh(ioa_cfg,
5760 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5818 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5838 if (!ioa_cfg->clear_isr)
5845 ioa_cfg->regs.clr_interrupt_reg32);
5846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5851 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5855 ipr_isr_eh(ioa_cfg,
5864 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5886 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5900 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5927 * @ioa_cfg: ioa config struct
5933 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5951 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5979 * @ioa_cfg: ioa config struct
5985 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
6002 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6116 if (ipr_cmd->ioa_cfg->sis64)
6219 * @ioa_cfg: ioa config struct
6230 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6246 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6254 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6266 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6269 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6271 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6393 if (ipr_cmd->ioa_cfg->sis64)
6406 * @ioa_cfg: ioa config struct
6415 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6431 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6470 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6543 ipr_erp_start(ioa_cfg, ipr_cmd);
6545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6564 struct ipr_ioa_cfg *ioa_cfg;
6573 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6585 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6586 hrrq = &ioa_cfg->hrrq[hrrq_id];
6653 if (ioa_cfg->sis64)
6654 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6656 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6728 struct ipr_ioa_cfg *ioa_cfg;
6731 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6734 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6781 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6785 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6786 while (ioa_cfg->in_reset_reload) {
6787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6788 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6789 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6792 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6795 rc = ipr_device_reset(ioa_cfg, res);
6807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6821 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6826 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6827 while (ioa_cfg->in_reset_reload) {
6828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6829 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6830 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6833 for_each_hrrq(hrrq, ioa_cfg) {
6837 ipr_device_reset(ioa_cfg, sata_port->res);
6843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6891 if (ipr_cmd->ioa_cfg->sis64)
6897 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6900 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
7013 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7018 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7019 hrrq = &ioa_cfg->hrrq[hrrq_id];
7056 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7080 if (ioa_cfg->sis64) {
7098 if (ioa_cfg->sis64)
7202 * @ioa_cfg: ioa cfg struct
7211 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7215 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7224 #define ipr_invalid_adapter(ioa_cfg) 0
7239 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7245 ioa_cfg->scsi_unblock = 1;
7246 schedule_work(&ioa_cfg->work_q);
7249 ioa_cfg->in_reset_reload = 0;
7250 ioa_cfg->reset_retries = 0;
7251 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7252 spin_lock(&ioa_cfg->hrrq[i]._lock);
7253 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7254 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7259 wake_up_all(&ioa_cfg->reset_wait_q);
7278 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7283 ioa_cfg->in_reset_reload = 0;
7284 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7285 spin_lock(&ioa_cfg->hrrq[j]._lock);
7286 ioa_cfg->hrrq[j].allow_cmds = 1;
7287 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7290 ioa_cfg->reset_cmd = NULL;
7291 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7293 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7299 schedule_work(&ioa_cfg->work_q);
7302 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7304 ipr_send_hcam(ioa_cfg,
7306 ioa_cfg->hostrcb[j]);
7308 ipr_send_hcam(ioa_cfg,
7310 ioa_cfg->hostrcb[j]);
7313 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7314 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7316 ioa_cfg->reset_retries = 0;
7318 wake_up_all(&ioa_cfg->reset_wait_q);
7320 ioa_cfg->scsi_unblock = 1;
7321 schedule_work(&ioa_cfg->work_q);
7356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7357 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7363 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7380 ioa_cfg->vpd_cbs_dma +
7388 if (!ioa_cfg->sis64)
7439 * @ioa_cfg: ioa config struct
7447 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7464 dev_err(&ioa_cfg->pdev->dev,
7475 * @ioa_cfg: ioa config struct
7484 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7490 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7491 ioa_cfg->bus_attr[i].bus_width);
7493 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7494 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7500 * @ioa_cfg: ioa config struct
7508 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7526 dev_err(&ioa_cfg->pdev->dev,
7532 bus_attr = &ioa_cfg->bus_attr[i];
7583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7584 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7588 ipr_scsi_bus_speed_limit(ioa_cfg);
7589 ipr_check_term_power(ioa_cfg, mode_pages);
7590 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7595 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7599 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647 dev_err(&ioa_cfg->pdev->dev,
7651 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7697 0x28, ioa_cfg->vpd_cbs_dma +
7721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7722 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7737 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7785 0x24, ioa_cfg->vpd_cbs_dma +
7812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7819 if (ioa_cfg->sis64)
7820 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7822 flag = ioa_cfg->u.cfg_table->hdr.flags;
7825 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7827 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7830 if (ioa_cfg->sis64)
7831 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7833 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7836 if (ioa_cfg->sis64)
7837 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7839 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7844 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7851 if (list_empty(&ioa_cfg->free_res_q)) {
7852 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7857 res = list_entry(ioa_cfg->free_res_q.next,
7859 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7873 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7879 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7882 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7903 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7905 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7906 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7910 ioa_cfg->dual_raid = 1;
7911 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7918 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7919 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7920 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7922 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7966 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8057 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8058 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8059 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8067 (ioa_cfg->vpd_cbs_dma
8090 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8091 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8092 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8100 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8121 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8128 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8153 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8155 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8157 if (ipr_invalid_adapter(ioa_cfg)) {
8158 dev_err(&ioa_cfg->pdev->dev,
8162 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8163 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8165 &ioa_cfg->hrrq->hrrq_free_q);
8173 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8191 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8197 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8222 if (ioa_cfg->identify_hrrq_index == 0)
8223 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8225 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8226 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8232 if (ioa_cfg->sis64)
8235 if (ioa_cfg->nvectors == 1)
8255 ioa_cfg->identify_hrrq_index;
8257 if (ioa_cfg->sis64) {
8270 ioa_cfg->identify_hrrq_index;
8275 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8302 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8305 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8307 if (ioa_cfg->reset_cmd == ipr_cmd) {
8312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8343 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8344 * @ioa_cfg: ioa cfg struct
8349 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8353 for_each_hrrq(hrrq, ioa_cfg) {
8366 ioa_cfg->identify_hrrq_index = 0;
8367 if (ioa_cfg->hrrq_num == 1)
8368 atomic_set(&ioa_cfg->hrrq_index, 0);
8370 atomic_set(&ioa_cfg->hrrq_index, 1);
8373 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8388 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8391 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8406 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8407 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8408 stage_time = ioa_cfg->transop_timeout;
8411 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8416 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8417 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8444 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8451 ipr_init_ioa_mem(ioa_cfg);
8453 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8454 spin_lock(&ioa_cfg->hrrq[i]._lock);
8455 ioa_cfg->hrrq[i].allow_interrupts = 1;
8456 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8458 if (ioa_cfg->sis64) {
8460 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8461 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8464 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8468 ioa_cfg->regs.clr_interrupt_mask_reg32);
8469 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8474 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8476 if (ioa_cfg->sis64) {
8479 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8481 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8483 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8485 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8487 if (ioa_cfg->sis64) {
8492 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8516 if (ioa_cfg->sdt_state == GET_DUMP)
8517 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8518 else if (ioa_cfg->sdt_state == READ_DUMP)
8519 ioa_cfg->sdt_state = ABORT_DUMP;
8521 ioa_cfg->dump_timeout = 1;
8529 * @ioa_cfg: ioa config struct
8537 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8539 ioa_cfg->errors_logged++;
8540 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8545 * @ioa_cfg: ioa config struct
8553 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8561 mailbox = readl(ioa_cfg->ioa_mailbox);
8563 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8564 ipr_unit_check_no_data(ioa_cfg);
8569 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8575 ipr_unit_check_no_data(ioa_cfg);
8587 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8592 rc = ipr_get_ldump_data_section(ioa_cfg,
8598 ipr_handle_log_data(ioa_cfg, hostrcb);
8601 ioa_cfg->sdt_state == GET_DUMP)
8602 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8604 ipr_unit_check_no_data(ioa_cfg);
8606 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8623 ioa_cfg->ioa_unit_checked = 0;
8624 ipr_get_unit_check_buffer(ioa_cfg);
8634 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8638 if (ioa_cfg->sdt_state != GET_DUMP)
8641 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8642 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8646 dev_err(&ioa_cfg->pdev->dev,
8649 ioa_cfg->sdt_state = READ_DUMP;
8650 ioa_cfg->dump_timeout = 0;
8651 if (ioa_cfg->sis64)
8656 schedule_work(&ioa_cfg->work_q);
8681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8684 ioa_cfg->pdev->state_saved = true;
8685 pci_restore_state(ioa_cfg->pdev);
8687 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8692 ipr_fail_all_ops(ioa_cfg);
8694 if (ioa_cfg->sis64) {
8696 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8697 readl(ioa_cfg->regs.endian_swap_reg);
8700 if (ioa_cfg->ioa_unit_checked) {
8701 if (ioa_cfg->sis64) {
8706 ioa_cfg->ioa_unit_checked = 0;
8707 ipr_get_unit_check_buffer(ioa_cfg);
8714 if (ioa_cfg->in_ioa_bringdown) {
8716 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8741 if (ioa_cfg->cfg_locked)
8742 pci_cfg_access_unlock(ioa_cfg->pdev);
8743 ioa_cfg->cfg_locked = 0;
8760 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8764 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8766 ioa_cfg->regs.set_uproc_interrupt_reg32);
8768 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8775 if (ioa_cfg->cfg_locked)
8776 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8777 ioa_cfg->cfg_locked = 0;
8814 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8815 struct pci_dev *pdev = ioa_cfg->pdev;
8823 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8824 if (ioa_cfg->reset_cmd == ipr_cmd)
8826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8845 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8862 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8865 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8866 ioa_cfg->cfg_locked = 1;
8867 ipr_cmd->job_step = ioa_cfg->reset;
8875 ipr_cmd->job_step = ioa_cfg->reset;
8876 dev_err(&ioa_cfg->pdev->dev,
8895 ipr_cmd->ioa_cfg->cfg_locked = 0;
8903 * @ioa_cfg: ioa config struct
8908 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8912 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8933 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8936 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8961 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8966 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8969 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8970 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8998 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9015 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9024 for_each_hrrq(hrrq, ioa_cfg) {
9028 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9054 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9058 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9064 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9110 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9111 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9113 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9132 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9133 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9149 if (ioa_cfg->sis64)
9175 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9184 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9194 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9227 if (ioa_cfg->reset_cmd != ipr_cmd) {
9251 * @ioa_cfg: ioa config struct
9263 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9270 ioa_cfg->in_reset_reload = 1;
9271 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9272 spin_lock(&ioa_cfg->hrrq[i]._lock);
9273 ioa_cfg->hrrq[i].allow_cmds = 0;
9274 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9277 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9278 ioa_cfg->scsi_unblock = 0;
9279 ioa_cfg->scsi_blocked = 1;
9280 scsi_block_requests(ioa_cfg->host);
9283 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9284 ioa_cfg->reset_cmd = ipr_cmd;
9293 * @ioa_cfg: ioa config struct
9303 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9308 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9311 if (ioa_cfg->in_reset_reload) {
9312 if (ioa_cfg->sdt_state == GET_DUMP)
9313 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9314 else if (ioa_cfg->sdt_state == READ_DUMP)
9315 ioa_cfg->sdt_state = ABORT_DUMP;
9318 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9319 dev_err(&ioa_cfg->pdev->dev,
9322 ioa_cfg->reset_retries = 0;
9323 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9324 spin_lock(&ioa_cfg->hrrq[i]._lock);
9325 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9326 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9330 if (ioa_cfg->in_ioa_bringdown) {
9331 ioa_cfg->reset_cmd = NULL;
9332 ioa_cfg->in_reset_reload = 0;
9333 ipr_fail_all_ops(ioa_cfg);
9334 wake_up_all(&ioa_cfg->reset_wait_q);
9336 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9337 ioa_cfg->scsi_unblock = 1;
9338 schedule_work(&ioa_cfg->work_q);
9342 ioa_cfg->in_ioa_bringdown = 1;
9347 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9365 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9366 spin_lock(&ioa_cfg->hrrq[i]._lock);
9367 ioa_cfg->hrrq[i].allow_interrupts = 0;
9368 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9386 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9388 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9389 if (!ioa_cfg->probe_done)
9391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9406 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9408 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9409 if (ioa_cfg->probe_done)
9410 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9425 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9427 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9428 if (ioa_cfg->probe_done) {
9429 if (ioa_cfg->needs_warm_reset)
9430 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9432 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9435 wake_up_all(&ioa_cfg->eeh_wait_q);
9436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9450 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9453 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9454 if (ioa_cfg->probe_done) {
9455 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9456 ioa_cfg->sdt_state = ABORT_DUMP;
9457 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9458 ioa_cfg->in_ioa_bringdown = 1;
9459 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9460 spin_lock(&ioa_cfg->hrrq[i]._lock);
9461 ioa_cfg->hrrq[i].allow_cmds = 0;
9462 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9465 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9467 wake_up_all(&ioa_cfg->eeh_wait_q);
9468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9500 * @ioa_cfg: ioa cfg struct
9508 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9514 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9515 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9516 ioa_cfg->probe_done = 1;
9517 if (ioa_cfg->needs_hard_reset) {
9518 ioa_cfg->needs_hard_reset = 0;
9519 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9521 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9531 * @ioa_cfg: ioa config struct
9536 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9540 if (ioa_cfg->ipr_cmnd_list) {
9542 if (ioa_cfg->ipr_cmnd_list[i])
9543 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9544 ioa_cfg->ipr_cmnd_list[i],
9545 ioa_cfg->ipr_cmnd_list_dma[i]);
9547 ioa_cfg->ipr_cmnd_list[i] = NULL;
9551 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9553 kfree(ioa_cfg->ipr_cmnd_list);
9554 kfree(ioa_cfg->ipr_cmnd_list_dma);
9555 ioa_cfg->ipr_cmnd_list = NULL;
9556 ioa_cfg->ipr_cmnd_list_dma = NULL;
9557 ioa_cfg->ipr_cmd_pool = NULL;
9562 * @ioa_cfg: ioa cfg struct
9567 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9571 kfree(ioa_cfg->res_entries);
9572 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9573 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9574 ipr_free_cmd_blks(ioa_cfg);
9576 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9577 dma_free_coherent(&ioa_cfg->pdev->dev,
9578 sizeof(u32) * ioa_cfg->hrrq[i].size,
9579 ioa_cfg->hrrq[i].host_rrq,
9580 ioa_cfg->hrrq[i].host_rrq_dma);
9582 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9583 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9586 dma_free_coherent(&ioa_cfg->pdev->dev,
9588 ioa_cfg->hostrcb[i],
9589 ioa_cfg->hostrcb_dma[i]);
9592 ipr_free_dump(ioa_cfg);
9593 kfree(ioa_cfg->trace);
9598 * @ioa_cfg: ipr cfg struct
9606 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9608 struct pci_dev *pdev = ioa_cfg->pdev;
9611 for (i = 0; i < ioa_cfg->nvectors; i++)
9612 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9618 * @ioa_cfg: ioa config struct
9626 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9628 struct pci_dev *pdev = ioa_cfg->pdev;
9631 ipr_free_irqs(ioa_cfg);
9632 if (ioa_cfg->reset_work_q)
9633 destroy_workqueue(ioa_cfg->reset_work_q);
9634 iounmap(ioa_cfg->hdw_dma_regs);
9636 ipr_free_mem(ioa_cfg);
9637 scsi_host_put(ioa_cfg->host);
9644 * @ioa_cfg: ioa config struct
9649 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9656 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9659 if (!ioa_cfg->ipr_cmd_pool)
9662 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9663 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9665 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9666 ipr_free_cmd_blks(ioa_cfg);
9670 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9671 if (ioa_cfg->hrrq_num > 1) {
9674 ioa_cfg->hrrq[i].min_cmd_id = 0;
9675 ioa_cfg->hrrq[i].max_cmd_id =
9680 (ioa_cfg->hrrq_num - 1);
9681 ioa_cfg->hrrq[i].min_cmd_id =
9684 ioa_cfg->hrrq[i].max_cmd_id =
9690 ioa_cfg->hrrq[i].min_cmd_id = 0;
9691 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9693 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9696 BUG_ON(ioa_cfg->hrrq_num == 0);
9699 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9701 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9706 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9710 ipr_free_cmd_blks(ioa_cfg);
9714 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9715 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9719 if (ioa_cfg->sis64)
9725 if (ioa_cfg->sis64) {
9739 ipr_cmd->ioa_cfg = ioa_cfg;
9744 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9746 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9755 * @ioa_cfg: ioa config struct
9760 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9762 struct pci_dev *pdev = ioa_cfg->pdev;
9766 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9770 if (!ioa_cfg->res_entries)
9773 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9774 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9775 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9778 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9780 &ioa_cfg->vpd_cbs_dma,
9783 if (!ioa_cfg->vpd_cbs)
9786 if (ipr_alloc_cmd_blks(ioa_cfg))
9789 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9790 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9791 sizeof(u32) * ioa_cfg->hrrq[i].size,
9792 &ioa_cfg->hrrq[i].host_rrq_dma,
9795 if (!ioa_cfg->hrrq[i].host_rrq) {
9798 sizeof(u32) * ioa_cfg->hrrq[i].size,
9799 ioa_cfg->hrrq[i].host_rrq,
9800 ioa_cfg->hrrq[i].host_rrq_dma);
9803 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9806 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9807 ioa_cfg->cfg_table_size,
9808 &ioa_cfg->cfg_table_dma,
9811 if (!ioa_cfg->u.cfg_table)
9815 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9817 &ioa_cfg->hostrcb_dma[i],
9820 if (!ioa_cfg->hostrcb[i])
9823 ioa_cfg->hostrcb[i]->hostrcb_dma =
9824 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9825 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9826 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9829 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9833 if (!ioa_cfg->trace)
9844 ioa_cfg->hostrcb[i],
9845 ioa_cfg->hostrcb_dma[i]);
9847 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9848 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9850 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9852 sizeof(u32) * ioa_cfg->hrrq[i].size,
9853 ioa_cfg->hrrq[i].host_rrq,
9854 ioa_cfg->hrrq[i].host_rrq_dma);
9857 ipr_free_cmd_blks(ioa_cfg);
9860 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9862 kfree(ioa_cfg->res_entries);
9868 * @ioa_cfg: ioa config struct
9873 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9878 ioa_cfg->bus_attr[i].bus = i;
9879 ioa_cfg->bus_attr[i].qas_enabled = 0;
9880 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9882 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9884 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9890 * @ioa_cfg: ioa config struct
9895 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9901 p = &ioa_cfg->chip_cfg->regs;
9902 t = &ioa_cfg->regs;
9903 base = ioa_cfg->hdw_dma_regs;
9922 if (ioa_cfg->sis64) {
9932 * @ioa_cfg: ioa config struct
9939 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9944 ioa_cfg->host = host;
9945 ioa_cfg->pdev = pdev;
9946 ioa_cfg->log_level = ipr_log_level;
9947 ioa_cfg->doorbell = IPR_DOORBELL;
9948 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9949 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9950 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9951 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9952 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9953 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9955 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9956 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9957 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9958 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9959 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9960 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9961 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9962 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9963 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9964 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9965 ioa_cfg->sdt_state = INACTIVE;
9967 ipr_initialize_bus_attr(ioa_cfg);
9968 ioa_cfg->max_devs_supported = ipr_max_devs;
9970 if (ioa_cfg->sis64) {
9975 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9976 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9978 * ioa_cfg->max_devs_supported)));
9984 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9985 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9987 * ioa_cfg->max_devs_supported)));
9992 host->can_queue = ioa_cfg->max_cmds;
9993 pci_set_drvdata(pdev, ioa_cfg);
9995 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9997 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9998 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
10000 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
10002 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10028 * @ioa_cfg: ioa config struct
10033 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10035 struct pci_dev *pdev = ioa_cfg->pdev;
10038 wait_event_timeout(ioa_cfg->eeh_wait_q,
10045 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10047 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10049 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10050 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10051 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10052 ioa_cfg->vectors_info[vec_idx].
10053 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10057 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10062 for (i = 1; i < ioa_cfg->nvectors; i++) {
10066 ioa_cfg->vectors_info[i].desc,
10067 &ioa_cfg->hrrq[i]);
10071 &ioa_cfg->hrrq[i]);
10091 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10095 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10098 ioa_cfg->msi_received = 1;
10099 wake_up(&ioa_cfg->msi_wait_q);
10101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10107 * @ioa_cfg: ioa config struct
10117 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10125 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10126 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10127 ioa_cfg->msi_received = 0;
10128 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10129 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10130 readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10131 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10133 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10140 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10141 readl(ioa_cfg->regs.sense_interrupt_reg);
10142 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10143 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10144 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10146 if (!ioa_cfg->msi_received) {
10153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10155 free_irq(irq, ioa_cfg);
10172 struct ipr_ioa_cfg *ioa_cfg;
10184 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10192 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10193 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10194 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10196 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10198 if (!ioa_cfg->ipr_chip) {
10205 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10206 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10207 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10208 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10211 ioa_cfg->transop_timeout = ipr_transop_timeout;
10213 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10215 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10217 ioa_cfg->revid = pdev->revision;
10219 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10234 ipr_wait_for_pci_err_recovery(ioa_cfg);
10240 ipr_wait_for_pci_err_recovery(ioa_cfg);
10254 ioa_cfg->hdw_dma_regs = ipr_regs;
10255 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10256 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10258 ipr_init_regs(ioa_cfg);
10260 if (ioa_cfg->sis64) {
10276 ioa_cfg->chip_cfg->cache_line_size);
10280 ipr_wait_for_pci_err_recovery(ioa_cfg);
10286 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10287 ipr_wait_for_pci_err_recovery(ioa_cfg);
10296 if (ioa_cfg->ipr_chip->has_msi)
10300 ipr_wait_for_pci_err_recovery(ioa_cfg);
10303 ioa_cfg->nvectors = rc;
10306 ioa_cfg->clear_isr = 1;
10311 ipr_wait_for_pci_err_recovery(ioa_cfg);
10320 rc = ipr_test_msi(ioa_cfg, pdev);
10324 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10328 ipr_wait_for_pci_err_recovery(ioa_cfg);
10331 ioa_cfg->nvectors = 1;
10332 ioa_cfg->clear_isr = 1;
10339 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10343 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10346 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10349 rc = ipr_alloc_mem(ioa_cfg);
10369 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10370 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10371 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10373 ioa_cfg->needs_hard_reset = 1;
10375 ioa_cfg->needs_hard_reset = 1;
10377 ioa_cfg->ioa_unit_checked = 1;
10379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10380 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10384 name_msi_vectors(ioa_cfg);
10386 ioa_cfg->vectors_info[0].desc,
10387 &ioa_cfg->hrrq[0]);
10389 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10393 IPR_NAME, &ioa_cfg->hrrq[0]);
10402 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10403 ioa_cfg->needs_warm_reset = 1;
10404 ioa_cfg->reset = ipr_reset_slot_reset;
10406 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10409 if (!ioa_cfg->reset_work_q) {
10415 ioa_cfg->reset = ipr_reset_start_bist;
10418 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10426 ipr_free_irqs(ioa_cfg);
10428 ipr_free_mem(ioa_cfg);
10430 ipr_wait_for_pci_err_recovery(ioa_cfg);
10445 * @ioa_cfg: ioa config struct
10457 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10461 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10462 ioa_cfg->sdt_state = ABORT_DUMP;
10463 ioa_cfg->reset_retries = 0;
10464 ioa_cfg->in_ioa_bringdown = 1;
10465 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10481 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10486 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10487 while (ioa_cfg->in_reset_reload) {
10488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10490 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10493 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10494 spin_lock(&ioa_cfg->hrrq[i]._lock);
10495 ioa_cfg->hrrq[i].removing_ioa = 1;
10496 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10499 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10502 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10503 flush_work(&ioa_cfg->work_q);
10504 if (ioa_cfg->reset_work_q)
10505 flush_workqueue(ioa_cfg->reset_work_q);
10506 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10507 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10510 list_del(&ioa_cfg->queue);
10513 if (ioa_cfg->sdt_state == ABORT_DUMP)
10514 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10517 ipr_free_all_resources(ioa_cfg);
10533 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10537 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10539 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10541 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10543 scsi_remove_host(ioa_cfg->host);
10560 struct ipr_ioa_cfg *ioa_cfg;
10569 ioa_cfg = pci_get_drvdata(pdev);
10570 rc = ipr_probe_ioa_part2(ioa_cfg);
10577 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10584 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10588 scsi_remove_host(ioa_cfg->host);
10593 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10597 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10599 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10601 scsi_remove_host(ioa_cfg->host);
10606 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10610 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10612 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10614 scsi_remove_host(ioa_cfg->host);
10618 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10619 ioa_cfg->scan_enabled = 1;
10620 schedule_work(&ioa_cfg->work_q);
10621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10623 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10625 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10626 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10627 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10628 ioa_cfg->iopoll_weight, ipr_iopoll);
10632 scsi_scan_host(ioa_cfg->host);
10649 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10655 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10656 ioa_cfg->iopoll_weight = 0;
10657 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10658 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10661 while (ioa_cfg->in_reset_reload) {
10662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10663 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10667 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10670 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10672 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10673 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10674 ipr_free_irqs(ioa_cfg);
10675 pci_disable_device(ioa_cfg->pdev);
10832 struct ipr_ioa_cfg *ioa_cfg;
10840 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10841 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10842 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10843 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10848 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);