Lines Matching defs:ctrl_info

55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
216 return !ctrl_info->controller_online;
219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
221 if (ctrl_info->controller_online)
222 if (!sis_is_firmware_running(ctrl_info))
223 pqi_take_ctrl_offline(ctrl_info);
232 struct pqi_ctrl_info *ctrl_info)
234 return sis_read_driver_scratch(ctrl_info);
237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
240 sis_write_driver_scratch(ctrl_info, mode);
243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
245 ctrl_info->block_device_reset = true;
248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
250 return ctrl_info->block_device_reset;
253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
255 return ctrl_info->block_requests;
258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
260 ctrl_info->block_requests = true;
261 scsi_block_requests(ctrl_info->scsi_host);
264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
266 ctrl_info->block_requests = false;
267 wake_up_all(&ctrl_info->block_requests_wait);
268 pqi_retry_raid_bypass_requests(ctrl_info);
269 scsi_unblock_requests(ctrl_info->scsi_host);
272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
277 if (!pqi_ctrl_blocked(ctrl_info))
280 atomic_inc(&ctrl_info->num_blocked_threads);
283 wait_event(ctrl_info->block_requests_wait,
284 !pqi_ctrl_blocked(ctrl_info));
290 wait_event_timeout(ctrl_info->block_requests_wait,
291 !pqi_ctrl_blocked(ctrl_info),
296 atomic_dec(&ctrl_info->num_blocked_threads);
301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
303 while (atomic_read(&ctrl_info->num_busy_threads) >
304 atomic_read(&ctrl_info->num_blocked_threads))
328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
330 ctrl_info->in_ofa = true;
333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
335 ctrl_info->in_ofa = false;
338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
340 return ctrl_info->in_ofa;
348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
351 return device->in_remove && !ctrl_info->in_shutdown;
354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
356 ctrl_info->in_shutdown = true;
359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
361 return ctrl_info->in_shutdown;
365 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
367 if (pqi_ctrl_offline(ctrl_info))
369 if (pqi_ctrl_in_ofa(ctrl_info))
372 schedule_delayed_work(&ctrl_info->rescan_work, delay);
375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
383 struct pqi_ctrl_info *ctrl_info)
385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
390 cancel_delayed_work_sync(&ctrl_info->rescan_work);
393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
395 cancel_work_sync(&ctrl_info->event_work);
398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
400 if (!ctrl_info->heartbeat_counter)
403 return readl(ctrl_info->heartbeat_counter);
406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
408 if (!ctrl_info->soft_reset_status)
411 return readb(ctrl_info->soft_reset_status);
414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
419 if (!ctrl_info->soft_reset_status)
422 status = pqi_read_soft_reset_status(ctrl_info);
424 writeb(status, ctrl_info->soft_reset_status);
464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
564 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
577 struct pqi_ctrl_info *ctrl_info)
580 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
583 io_request = &ctrl_info->io_request_pool[i];
587 i = (i + 1) % ctrl_info->max_io_slots;
591 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
603 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
611 rc = pqi_build_raid_path_request(ctrl_info, &request,
617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
627 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
630 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
634 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
638 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
642 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
645 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
649 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
652 return pqi_send_ctrl_raid_request(ctrl_info,
657 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
660 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
664 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
673 rc = pqi_build_raid_path_request(ctrl_info, &request,
683 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
686 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
691 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
701 if (pqi_ctrl_offline(ctrl_info))
710 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
718 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
722 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
728 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
737 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
744 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
753 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
756 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
774 struct pqi_ctrl_info *ctrl_info)
802 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
823 struct pqi_ctrl_info *ctrl_info)
865 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
877 struct pqi_ctrl_info *ctrl_info;
879 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
882 if (pqi_ctrl_offline(ctrl_info))
885 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
887 dev_warn(&ctrl_info->pci_dev->dev,
890 schedule_delayed_work(&ctrl_info->update_time_work,
895 struct pqi_ctrl_info *ctrl_info)
897 schedule_delayed_work(&ctrl_info->update_time_work, 0);
901 struct pqi_ctrl_info *ctrl_info)
903 cancel_delayed_work_sync(&ctrl_info->update_time_work);
906 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
909 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
913 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
929 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
950 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
976 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
979 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
983 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
986 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
989 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1000 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1002 dev_err(&ctrl_info->pci_dev->dev,
1005 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1007 dev_err(&ctrl_info->pci_dev->dev,
1098 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1109 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1122 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1162 dev_warn(&ctrl_info->pci_dev->dev,
1170 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1181 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1198 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1206 dev_warn(&ctrl_info->pci_dev->dev,
1215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1229 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1240 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1254 pqi_get_raid_map(ctrl_info, device) == 0)
1265 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1279 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1303 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1311 rc = pqi_identify_physical_device(ctrl_info, device,
1342 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1353 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1370 pqi_get_raid_level(ctrl_info, device);
1371 pqi_get_raid_bypass_status(ctrl_info, device);
1372 pqi_get_volume_status(ctrl_info, device);
1382 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1392 rc = pqi_get_logical_device_info(ctrl_info, device);
1394 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1399 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1496 dev_info(&ctrl_info->pci_dev->dev,
1498 ctrl_info->scsi_host->host_no,
1504 struct pqi_ctrl_info *ctrl_info;
1506 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1509 pqi_scan_scsi_devices(ctrl_info);
1512 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1518 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1521 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1528 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1535 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1537 dev_err(&ctrl_info->pci_dev->dev,
1539 ctrl_info->scsi_host->host_no, device->bus,
1551 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1556 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1582 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1587 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1612 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1619 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1668 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1744 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1749 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1751 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1765 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1785 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1788 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1794 find_result = pqi_scsi_find_entry(ctrl_info, device,
1825 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1841 &ctrl_info->scsi_device_list);
1847 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1849 if (pqi_ctrl_in_ofa(ctrl_info))
1850 pqi_ctrl_ofa_done(ctrl_info);
1855 pqi_dev_info(ctrl_info, "offline", device);
1856 pqi_show_volume_status(ctrl_info, device);
1860 pqi_remove_device(ctrl_info, device);
1863 pqi_dev_info(ctrl_info, "removed", device);
1872 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1891 rc = pqi_add_device(ctrl_info, device);
1893 pqi_dev_info(ctrl_info, "added", device);
1895 dev_warn(&ctrl_info->pci_dev->dev,
1897 ctrl_info->scsi_host->host_no,
1900 pqi_fixup_botched_add(ctrl_info, device);
1953 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1977 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2004 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2031 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2039 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2094 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2096 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2102 dev_warn(&ctrl_info->pci_dev->dev,
2107 dev_warn(&ctrl_info->pci_dev->dev,
2137 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2156 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2160 if (pqi_ctrl_offline(ctrl_info))
2163 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2164 pqi_schedule_rescan_worker_delayed(ctrl_info);
2167 rc = pqi_update_scsi_devices(ctrl_info);
2169 pqi_schedule_rescan_worker_delayed(ctrl_info);
2170 mutex_unlock(&ctrl_info->scan_mutex);
2178 struct pqi_ctrl_info *ctrl_info;
2180 ctrl_info = shost_to_hba(shost);
2181 if (pqi_ctrl_in_ofa(ctrl_info))
2184 pqi_scan_scsi_devices(ctrl_info);
2192 struct pqi_ctrl_info *ctrl_info;
2194 ctrl_info = shost_priv(shost);
2196 return !mutex_is_locked(&ctrl_info->scan_mutex);
2199 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2201 mutex_lock(&ctrl_info->scan_mutex);
2202 mutex_unlock(&ctrl_info->scan_mutex);
2205 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2207 mutex_lock(&ctrl_info->lun_reset_mutex);
2208 mutex_unlock(&ctrl_info->lun_reset_mutex);
2211 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2213 mutex_lock(&ctrl_info->ofa_mutex);
2214 mutex_unlock(&ctrl_info->ofa_mutex);
2244 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2566 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2584 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2591 pqi_registers = ctrl_info->pqi_registers;
2600 dev_err(&ctrl_info->pci_dev->dev,
2612 dev_err(&ctrl_info->pci_dev->dev,
2624 dev_err(&ctrl_info->pci_dev->dev,
2645 struct pqi_ctrl_info *ctrl_info;
2653 ctrl_info = shost_to_hba(sdev->host);
2654 pqi_schedule_rescan_worker(ctrl_info);
2655 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2656 path, ctrl_info->scsi_host->host_no, device->bus,
2731 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2738 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2746 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2885 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
2887 pqi_take_ctrl_offline(ctrl_info);
2890 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
2904 if (oq_pi >= ctrl_info->num_elements_per_oq) {
2905 pqi_invalid_response(ctrl_info);
2906 dev_err(&ctrl_info->pci_dev->dev,
2908 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
2919 if (request_id >= ctrl_info->max_io_slots) {
2920 pqi_invalid_response(ctrl_info);
2921 dev_err(&ctrl_info->pci_dev->dev,
2923 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
2927 io_request = &ctrl_info->io_request_pool[request_id];
2929 pqi_invalid_response(ctrl_info);
2930 dev_err(&ctrl_info->pci_dev->dev,
2961 io_request->error_info = ctrl_info->error_buffer +
2967 pqi_invalid_response(ctrl_info);
2968 dev_err(&ctrl_info->pci_dev->dev,
2980 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3004 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3013 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3023 ctrl_info->num_elements_per_iq))
3029 if (pqi_ctrl_offline(ctrl_info))
3038 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3050 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3064 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3071 struct pqi_ctrl_info *ctrl_info)
3079 status = pqi_read_soft_reset_status(ctrl_info);
3087 dev_err(&ctrl_info->pci_dev->dev,
3092 if (!sis_is_firmware_running(ctrl_info))
3099 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3107 dev_info(&ctrl_info->pci_dev->dev,
3108 "resetting controller %u\n", ctrl_info->ctrl_id);
3109 sis_soft_reset(ctrl_info);
3112 rc = pqi_ofa_ctrl_restart(ctrl_info);
3113 pqi_ofa_free_host_buffer(ctrl_info);
3114 dev_info(&ctrl_info->pci_dev->dev,
3116 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3119 pqi_ofa_ctrl_unquiesce(ctrl_info);
3120 dev_info(&ctrl_info->pci_dev->dev,
3122 ctrl_info->ctrl_id, "ABORTED");
3125 pqi_ofa_free_host_buffer(ctrl_info);
3126 pqi_take_ctrl_offline(ctrl_info);
3131 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3139 mutex_lock(&ctrl_info->ofa_mutex);
3142 dev_info(&ctrl_info->pci_dev->dev,
3144 ctrl_info->ctrl_id);
3145 pqi_ofa_ctrl_quiesce(ctrl_info);
3146 pqi_acknowledge_event(ctrl_info, event);
3147 if (ctrl_info->soft_reset_handshake_supported) {
3148 status = pqi_poll_for_soft_reset_status(ctrl_info);
3149 pqi_process_soft_reset(ctrl_info, status);
3151 pqi_process_soft_reset(ctrl_info,
3156 pqi_acknowledge_event(ctrl_info, event);
3157 pqi_ofa_setup_host_buffer(ctrl_info,
3159 pqi_ofa_host_memory_update(ctrl_info);
3161 pqi_ofa_free_host_buffer(ctrl_info);
3162 pqi_acknowledge_event(ctrl_info, event);
3163 dev_info(&ctrl_info->pci_dev->dev,
3165 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3168 mutex_unlock(&ctrl_info->ofa_mutex);
3174 struct pqi_ctrl_info *ctrl_info;
3177 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3179 pqi_ctrl_busy(ctrl_info);
3180 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3181 if (pqi_ctrl_offline(ctrl_info))
3184 pqi_schedule_rescan_worker_delayed(ctrl_info);
3186 event = ctrl_info->events;
3191 pqi_ctrl_unbusy(ctrl_info);
3192 pqi_ofa_process_event(ctrl_info, event);
3195 pqi_acknowledge_event(ctrl_info, event);
3201 pqi_ctrl_unbusy(ctrl_info);
3210 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3213 pqi_check_ctrl_health(ctrl_info);
3214 if (pqi_ctrl_offline(ctrl_info))
3217 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3218 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3220 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3221 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3222 dev_err(&ctrl_info->pci_dev->dev,
3225 pqi_take_ctrl_offline(ctrl_info);
3229 ctrl_info->previous_num_interrupts = num_interrupts;
3232 ctrl_info->previous_heartbeat_count = heartbeat_count;
3233 mod_timer(&ctrl_info->heartbeat_timer,
3237 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3239 if (!ctrl_info->heartbeat_counter)
3242 ctrl_info->previous_num_interrupts =
3243 atomic_read(&ctrl_info->num_interrupts);
3244 ctrl_info->previous_heartbeat_count =
3245 pqi_read_heartbeat_counter(ctrl_info);
3247 ctrl_info->heartbeat_timer.expires =
3249 add_timer(&ctrl_info->heartbeat_timer);
3252 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3254 del_timer_sync(&ctrl_info->heartbeat_timer);
3291 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3301 event_queue = &ctrl_info->event_queue;
3308 pqi_invalid_response(ctrl_info);
3309 dev_err(&ctrl_info->pci_dev->dev,
3325 event = &ctrl_info->events[event_index];
3340 schedule_work(&ctrl_info->event_work);
3348 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3355 pqi_registers = ctrl_info->pqi_registers;
3367 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3370 switch (ctrl_info->irq_mode) {
3376 pqi_configure_legacy_intx(ctrl_info, true);
3377 sis_enable_intx(ctrl_info);
3386 pqi_configure_legacy_intx(ctrl_info, false);
3387 sis_enable_msix(ctrl_info);
3392 pqi_configure_legacy_intx(ctrl_info, false);
3399 sis_enable_msix(ctrl_info);
3402 pqi_configure_legacy_intx(ctrl_info, true);
3403 sis_enable_intx(ctrl_info);
3411 ctrl_info->irq_mode = new_mode;
3416 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3421 switch (ctrl_info->irq_mode) {
3427 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3444 struct pqi_ctrl_info *ctrl_info;
3450 ctrl_info = queue_group->ctrl_info;
3452 if (!pqi_is_valid_irq(ctrl_info))
3455 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3459 if (irq == ctrl_info->event_irq) {
3460 num_events_handled = pqi_process_event_intr(ctrl_info);
3468 atomic_inc(&ctrl_info->num_interrupts);
3470 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3471 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3477 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3479 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3483 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3485 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3487 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3494 ctrl_info->num_msix_vectors_initialized++;
3500 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3504 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3505 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3506 &ctrl_info->queue_groups[i]);
3508 ctrl_info->num_msix_vectors_initialized = 0;
3511 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3515 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3516 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3519 dev_err(&ctrl_info->pci_dev->dev,
3525 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3526 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3530 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3532 if (ctrl_info->num_msix_vectors_enabled) {
3533 pci_free_irq_vectors(ctrl_info->pci_dev);
3534 ctrl_info->num_msix_vectors_enabled = 0;
3538 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3554 ctrl_info->num_elements_per_iq;
3557 ctrl_info->num_elements_per_oq;
3558 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3559 num_outbound_queues = ctrl_info->num_queue_groups;
3560 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3592 ctrl_info->queue_memory_base =
3593 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3594 &ctrl_info->queue_memory_base_dma_handle,
3597 if (!ctrl_info->queue_memory_base)
3600 ctrl_info->queue_memory_length = alloc_length;
3602 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3605 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3606 queue_group = &ctrl_info->queue_groups[i];
3609 ctrl_info->queue_memory_base_dma_handle +
3610 (element_array - ctrl_info->queue_memory_base);
3616 ctrl_info->queue_memory_base_dma_handle +
3617 (element_array - ctrl_info->queue_memory_base);
3623 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3624 queue_group = &ctrl_info->queue_groups[i];
3627 ctrl_info->queue_memory_base_dma_handle +
3628 (element_array - ctrl_info->queue_memory_base);
3634 ctrl_info->event_queue.oq_element_array = element_array;
3635 ctrl_info->event_queue.oq_element_array_bus_addr =
3636 ctrl_info->queue_memory_base_dma_handle +
3637 (element_array - ctrl_info->queue_memory_base);
3644 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3645 queue_group = &ctrl_info->queue_groups[i];
3648 ctrl_info->queue_memory_base_dma_handle +
3650 (void __iomem *)ctrl_info->queue_memory_base);
3656 ctrl_info->queue_memory_base_dma_handle +
3658 (void __iomem *)ctrl_info->queue_memory_base);
3664 ctrl_info->queue_memory_base_dma_handle +
3666 (void __iomem *)ctrl_info->queue_memory_base);
3672 ctrl_info->event_queue.oq_pi = next_queue_index;
3673 ctrl_info->event_queue.oq_pi_bus_addr =
3674 ctrl_info->queue_memory_base_dma_handle +
3676 (void __iomem *)ctrl_info->queue_memory_base);
3681 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3691 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3692 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3699 ctrl_info->event_queue.oq_id = next_oq_id++;
3700 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3701 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3702 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3703 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3710 ctrl_info->event_queue.int_msg_num = 0;
3711 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3712 ctrl_info->queue_groups[i].int_msg_num = i;
3714 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3715 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3716 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3717 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3718 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3722 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3731 ctrl_info->admin_queue_memory_base =
3732 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3733 &ctrl_info->admin_queue_memory_base_dma_handle,
3736 if (!ctrl_info->admin_queue_memory_base)
3739 ctrl_info->admin_queue_memory_length = alloc_length;
3741 admin_queues = &ctrl_info->admin_queues;
3742 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3753 ctrl_info->admin_queue_memory_base_dma_handle +
3755 ctrl_info->admin_queue_memory_base);
3757 ctrl_info->admin_queue_memory_base_dma_handle +
3759 ctrl_info->admin_queue_memory_base);
3761 ctrl_info->admin_queue_memory_base_dma_handle +
3763 ctrl_info->admin_queue_memory_base);
3765 ctrl_info->admin_queue_memory_base_dma_handle +
3767 (void __iomem *)ctrl_info->admin_queue_memory_base);
3775 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3783 pqi_registers = ctrl_info->pqi_registers;
3784 admin_queues = &ctrl_info->admin_queues;
3817 admin_queues->iq_pi = ctrl_info->iomem_base +
3820 admin_queues->oq_ci = ctrl_info->iomem_base +
3827 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3834 admin_queues = &ctrl_info->admin_queues;
3854 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3862 admin_queues = &ctrl_info->admin_queues;
3872 dev_err(&ctrl_info->pci_dev->dev,
3876 if (!sis_is_firmware_running(ctrl_info))
3891 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3930 ctrl_info->num_elements_per_iq))
3940 ctrl_info->num_elements_per_iq - iq_pi;
3954 ctrl_info->num_elements_per_iq;
3973 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3985 pqi_check_ctrl_health(ctrl_info);
3986 if (pqi_ctrl_offline(ctrl_info)) {
4026 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4043 if (down_interruptible(&ctrl_info->sync_request_sem))
4047 down(&ctrl_info->sync_request_sem);
4050 if (down_timeout(&ctrl_info->sync_request_sem,
4063 pqi_ctrl_busy(ctrl_info);
4064 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4066 pqi_ctrl_unbusy(ctrl_info);
4071 if (pqi_ctrl_offline(ctrl_info)) {
4072 pqi_ctrl_unbusy(ctrl_info);
4077 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4079 io_request = pqi_alloc_io_request(ctrl_info);
4095 pqi_start_io(ctrl_info,
4096 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4099 pqi_ctrl_unbusy(ctrl_info);
4102 pqi_wait_for_completion_io(ctrl_info, &wait);
4106 dev_warn(&ctrl_info->pci_dev->dev,
4125 atomic_dec(&ctrl_info->sync_cmds_outstanding);
4127 up(&ctrl_info->sync_request_sem);
4152 struct pqi_ctrl_info *ctrl_info,
4158 pqi_submit_admin_request(ctrl_info, request);
4160 rc = pqi_poll_for_admin_response(ctrl_info, response);
4169 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4191 rc = pqi_map_single(ctrl_info->pci_dev,
4198 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4201 pqi_pci_unmap(ctrl_info->pci_dev,
4213 ctrl_info->max_inbound_queues =
4215 ctrl_info->max_elements_per_iq =
4217 ctrl_info->max_iq_element_length =
4220 ctrl_info->max_outbound_queues =
4222 ctrl_info->max_elements_per_oq =
4224 ctrl_info->max_oq_element_length =
4231 ctrl_info->max_inbound_iu_length_per_firmware =
4234 ctrl_info->inbound_spanning_supported =
4236 ctrl_info->outbound_spanning_supported =
4245 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4247 if (ctrl_info->max_iq_element_length <
4249 dev_err(&ctrl_info->pci_dev->dev,
4251 ctrl_info->max_iq_element_length,
4256 if (ctrl_info->max_oq_element_length <
4258 dev_err(&ctrl_info->pci_dev->dev,
4260 ctrl_info->max_oq_element_length,
4265 if (ctrl_info->max_inbound_iu_length_per_firmware <
4267 dev_err(&ctrl_info->pci_dev->dev,
4269 ctrl_info->max_inbound_iu_length_per_firmware,
4274 if (!ctrl_info->inbound_spanning_supported) {
4275 dev_err(&ctrl_info->pci_dev->dev,
4280 if (ctrl_info->outbound_spanning_supported) {
4281 dev_err(&ctrl_info->pci_dev->dev,
4289 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4296 event_queue = &ctrl_info->event_queue;
4321 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4326 event_queue->oq_ci = ctrl_info->iomem_base +
4334 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4342 queue_group = &ctrl_info->queue_groups[group_number];
4360 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4366 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4369 dev_err(&ctrl_info->pci_dev->dev,
4374 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4395 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4401 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4404 dev_err(&ctrl_info->pci_dev->dev,
4409 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4429 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4432 dev_err(&ctrl_info->pci_dev->dev,
4451 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4459 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4462 dev_err(&ctrl_info->pci_dev->dev,
4467 queue_group->oq_ci = ctrl_info->iomem_base +
4475 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4480 rc = pqi_create_event_queue(ctrl_info);
4482 dev_err(&ctrl_info->pci_dev->dev,
4487 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4488 rc = pqi_create_queue_group(ctrl_info, i);
4490 dev_err(&ctrl_info->pci_dev->dev,
4492 i, ctrl_info->num_queue_groups);
4504 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4527 rc = pqi_map_single(ctrl_info->pci_dev,
4534 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4537 pqi_pci_unmap(ctrl_info->pci_dev,
4548 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4563 rc = pqi_map_single(ctrl_info->pci_dev,
4570 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4573 pqi_pci_unmap(ctrl_info->pci_dev,
4583 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4585 return pqi_configure_events(ctrl_info, true);
4588 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4590 return pqi_configure_events(ctrl_info, false);
4593 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4600 if (!ctrl_info->io_request_pool)
4603 dev = &ctrl_info->pci_dev->dev;
4604 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4605 io_request = ctrl_info->io_request_pool;
4607 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4617 kfree(ctrl_info->io_request_pool);
4618 ctrl_info->io_request_pool = NULL;
4621 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4624 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4625 ctrl_info->error_buffer_length,
4626 &ctrl_info->error_buffer_dma_handle,
4628 if (!ctrl_info->error_buffer)
4634 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4643 ctrl_info->io_request_pool =
4644 kcalloc(ctrl_info->max_io_slots,
4645 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4647 if (!ctrl_info->io_request_pool) {
4648 dev_err(&ctrl_info->pci_dev->dev,
4653 dev = &ctrl_info->pci_dev->dev;
4654 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4655 io_request = ctrl_info->io_request_pool;
4657 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4659 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4662 dev_err(&ctrl_info->pci_dev->dev,
4672 dev_err(&ctrl_info->pci_dev->dev,
4687 pqi_free_all_io_requests(ctrl_info);
4697 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4702 ctrl_info->scsi_ml_can_queue =
4703 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4704 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4706 ctrl_info->error_buffer_length =
4707 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4710 max_transfer_size = min(ctrl_info->max_transfer_size,
4713 max_transfer_size = min(ctrl_info->max_transfer_size,
4721 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4725 ctrl_info->sg_chain_buffer_length =
4728 ctrl_info->sg_tablesize = max_sg_entries;
4729 ctrl_info->max_sectors = max_transfer_size / 512;
4732 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4744 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4745 ctrl_info->max_outbound_queues - 1);
4749 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4753 ctrl_info->num_queue_groups = num_queue_groups;
4754 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4760 ctrl_info->max_inbound_iu_length =
4761 (ctrl_info->max_inbound_iu_length_per_firmware /
4766 (ctrl_info->max_inbound_iu_length /
4773 ctrl_info->max_elements_per_iq);
4777 ctrl_info->max_elements_per_oq);
4779 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4780 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4782 ctrl_info->max_sg_per_iu =
4783 ((ctrl_info->max_inbound_iu_length -
4800 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4825 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4864 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4890 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4941 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5013 dev_err(&ctrl_info->pci_dev->dev,
5019 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5025 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5030 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5036 io_request = pqi_alloc_io_request(ctrl_info);
5038 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5042 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5044 if (!pqi_ctrl_blocked(ctrl_info))
5045 schedule_work(&ctrl_info->raid_bypass_retry_work);
5052 struct pqi_ctrl_info *ctrl_info;
5067 ctrl_info = shost_to_hba(scmd->device->host);
5068 if (pqi_ctrl_offline(ctrl_info))
5075 struct pqi_ctrl_info *ctrl_info,
5080 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5083 &ctrl_info->raid_bypass_retry_list);
5086 &ctrl_info->raid_bypass_retry_list);
5087 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5103 struct pqi_ctrl_info *ctrl_info;
5108 ctrl_info = shost_to_hba(scmd->device->host);
5110 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5111 pqi_schedule_bypass_retry(ctrl_info);
5118 struct pqi_ctrl_info *ctrl_info;
5130 ctrl_info = shost_to_hba(scmd->device->host);
5135 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5140 struct pqi_ctrl_info *ctrl_info)
5145 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5147 &ctrl_info->raid_bypass_retry_list,
5151 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5156 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5161 pqi_ctrl_busy(ctrl_info);
5164 if (pqi_ctrl_blocked(ctrl_info))
5166 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5171 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5173 pqi_schedule_bypass_retry(ctrl_info);
5178 pqi_ctrl_unbusy(ctrl_info);
5183 struct pqi_ctrl_info *ctrl_info;
5185 ctrl_info = container_of(work, struct pqi_ctrl_info,
5187 pqi_retry_raid_bypass_requests(ctrl_info);
5191 struct pqi_ctrl_info *ctrl_info)
5195 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5196 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5197 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5217 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5221 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5225 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5234 io_request = pqi_alloc_io_request(ctrl_info);
5268 dev_err(&ctrl_info->pci_dev->dev,
5284 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5290 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5295 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5301 if (hw_queue > ctrl_info->max_hw_queue_index)
5334 struct pqi_ctrl_info *ctrl_info;
5341 ctrl_info = shost_to_hba(shost);
5351 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5358 pqi_ctrl_busy(ctrl_info);
5359 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5360 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
5371 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5372 queue_group = &ctrl_info->queue_groups[hw_queue];
5378 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5386 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5389 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5391 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5395 pqi_ctrl_unbusy(ctrl_info);
5402 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5419 pqi_check_ctrl_health(ctrl_info);
5420 if (pqi_ctrl_offline(ctrl_info))
5429 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5438 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5439 queue_group = &ctrl_info->queue_groups[i];
5441 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5452 pqi_check_ctrl_health(ctrl_info);
5453 if (pqi_ctrl_offline(ctrl_info))
5463 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5475 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5476 queue_group = &ctrl_info->queue_groups[i];
5506 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5516 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5517 queue_group = &ctrl_info->queue_groups[i];
5544 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5552 pqi_check_ctrl_health(ctrl_info);
5553 if (pqi_ctrl_offline(ctrl_info))
5557 dev_err(&ctrl_info->pci_dev->dev,
5568 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5580 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5581 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5588 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5594 pqi_check_ctrl_health(ctrl_info);
5595 if (pqi_ctrl_offline(ctrl_info))
5600 dev_err(&ctrl_info->pci_dev->dev,
5611 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5613 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5614 pqi_check_ctrl_health(ctrl_info);
5615 if (pqi_ctrl_offline(ctrl_info))
5634 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5646 pqi_check_ctrl_health(ctrl_info);
5647 if (pqi_ctrl_offline(ctrl_info)) {
5656 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5664 io_request = pqi_alloc_io_request(ctrl_info);
5678 if (ctrl_info->tmf_iu_timeout_supported)
5682 pqi_start_io(ctrl_info,
5683 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5686 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5701 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5709 rc = pqi_lun_reset(ctrl_info, device);
5717 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5722 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5727 mutex_lock(&ctrl_info->lun_reset_mutex);
5729 pqi_ctrl_block_requests(ctrl_info);
5730 pqi_ctrl_wait_until_quiesced(ctrl_info);
5731 pqi_fail_io_queued_for_device(ctrl_info, device);
5732 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5734 pqi_ctrl_unblock_requests(ctrl_info);
5739 rc = _pqi_device_reset(ctrl_info, device);
5743 mutex_unlock(&ctrl_info->lun_reset_mutex);
5752 struct pqi_ctrl_info *ctrl_info;
5756 ctrl_info = shost_to_hba(shost);
5759 dev_err(&ctrl_info->pci_dev->dev,
5763 pqi_check_ctrl_health(ctrl_info);
5764 if (pqi_ctrl_offline(ctrl_info) ||
5765 pqi_device_reset_blocked(ctrl_info)) {
5770 pqi_wait_until_ofa_finished(ctrl_info);
5772 atomic_inc(&ctrl_info->sync_cmds_outstanding);
5773 rc = pqi_device_reset(ctrl_info, device);
5774 atomic_dec(&ctrl_info->sync_cmds_outstanding);
5777 dev_err(&ctrl_info->pci_dev->dev,
5789 struct pqi_ctrl_info *ctrl_info;
5793 ctrl_info = shost_to_hba(sdev->host);
5795 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5800 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5807 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5825 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5832 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5835 ctrl_info->pci_dev, 0);
5852 struct pqi_ctrl_info *ctrl_info;
5854 ctrl_info = shost_to_hba(sdev->host);
5856 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5865 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5868 pqi_dev_info(ctrl_info, "removed", device);
5873 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5883 pci_dev = ctrl_info->pci_dev;
5983 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5994 if (pqi_ctrl_offline(ctrl_info))
6065 rc = pqi_map_single(ctrl_info->pci_dev,
6076 if (ctrl_info->raid_iu_timeout_supported)
6079 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6083 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6128 struct pqi_ctrl_info *ctrl_info;
6130 ctrl_info = shost_to_hba(sdev->host);
6132 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6139 rc = pqi_scan_scsi_devices(ctrl_info);
6142 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6148 rc = pqi_passthru_ioctl(ctrl_info, arg);
6162 struct pqi_ctrl_info *ctrl_info;
6165 ctrl_info = shost_to_hba(shost);
6167 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6181 struct pqi_ctrl_info *ctrl_info;
6184 ctrl_info = shost_to_hba(shost);
6186 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6193 struct pqi_ctrl_info *ctrl_info;
6196 ctrl_info = shost_to_hba(shost);
6198 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6205 struct pqi_ctrl_info *ctrl_info;
6208 ctrl_info = shost_to_hba(shost);
6210 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6286 struct pqi_ctrl_info *ctrl_info;
6293 ctrl_info = shost_to_hba(sdev->host);
6295 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6310 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6323 struct pqi_ctrl_info *ctrl_info;
6330 ctrl_info = shost_to_hba(sdev->host);
6332 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6342 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6352 struct pqi_ctrl_info *ctrl_info;
6365 ctrl_info = shost_to_hba(sdev->host);
6367 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6388 ctrl_info->scsi_host->host_no,
6427 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6435 struct pqi_ctrl_info *ctrl_info;
6442 ctrl_info = shost_to_hba(sdev->host);
6444 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6448 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6454 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6462 struct pqi_ctrl_info *ctrl_info;
6468 ctrl_info = shost_to_hba(sdev->host);
6470 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6474 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6482 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6490 struct pqi_ctrl_info *ctrl_info;
6497 ctrl_info = shost_to_hba(sdev->host);
6499 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6503 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6512 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6520 struct pqi_ctrl_info *ctrl_info;
6527 ctrl_info = shost_to_hba(sdev->host);
6529 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6533 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6539 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6581 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6586 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6588 dev_err(&ctrl_info->pci_dev->dev,
6590 ctrl_info->ctrl_id);
6601 shost->max_sectors = ctrl_info->max_sectors;
6602 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6604 shost->sg_tablesize = ctrl_info->sg_tablesize;
6606 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6608 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6610 shost->hostdata[0] = (unsigned long)ctrl_info;
6612 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6614 dev_err(&ctrl_info->pci_dev->dev,
6616 ctrl_info->ctrl_id);
6620 rc = pqi_add_sas_host(shost, ctrl_info);
6622 dev_err(&ctrl_info->pci_dev->dev,
6624 ctrl_info->ctrl_id);
6628 ctrl_info->scsi_host = shost;
6640 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6644 pqi_delete_sas_host(ctrl_info);
6646 shost = ctrl_info->scsi_host;
6654 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6662 pqi_registers = ctrl_info->pqi_registers;
6671 pqi_check_ctrl_health(ctrl_info);
6672 if (pqi_ctrl_offline(ctrl_info)) {
6685 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6690 if (ctrl_info->pqi_reset_quiesce_supported) {
6691 rc = sis_pqi_reset_quiesce(ctrl_info);
6693 dev_err(&ctrl_info->pci_dev->dev,
6704 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6706 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6708 dev_err(&ctrl_info->pci_dev->dev,
6714 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6723 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6727 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6729 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6737 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6746 rc = pqi_identify_controller(ctrl_info, identify);
6750 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6752 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6753 snprintf(ctrl_info->firmware_version +
6754 strlen(ctrl_info->firmware_version),
6755 sizeof(ctrl_info->firmware_version),
6758 memcpy(ctrl_info->model, identify->product_id,
6760 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6762 memcpy(ctrl_info->vendor, identify->vendor_id,
6764 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6773 struct pqi_ctrl_info *ctrl_info;
6826 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6843 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6847 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6863 return pqi_config_table_update(ctrl_info,
6873 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6877 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6881 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6887 dev_info(&ctrl_info->pci_dev->dev,
6892 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6896 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6901 ctrl_info->soft_reset_handshake_supported =
6905 ctrl_info->raid_iu_timeout_supported =
6909 ctrl_info->tmf_iu_timeout_supported =
6914 pqi_firmware_feature_status(ctrl_info, firmware_feature);
6917 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6921 firmware_feature->feature_status(ctrl_info, firmware_feature);
6958 struct pqi_ctrl_info *ctrl_info;
6964 ctrl_info = section_info->ctrl_info;
6975 pqi_firmware_feature_update(ctrl_info,
6990 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6993 dev_err(&ctrl_info->pci_dev->dev,
6998 pqi_firmware_feature_update(ctrl_info,
7012 pqi_firmware_feature_update(ctrl_info,
7036 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7045 table_length = ctrl_info->config_table_length;
7051 dev_err(&ctrl_info->pci_dev->dev,
7060 table_iomem_addr = ctrl_info->iomem_base +
7061 ctrl_info->config_table_offset;
7064 section_info.ctrl_info = ctrl_info;
7082 dev_warn(&ctrl_info->pci_dev->dev,
7085 ctrl_info->heartbeat_counter =
7093 ctrl_info->soft_reset_status =
7112 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7116 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7117 rc = pqi_reset(ctrl_info);
7120 rc = sis_reenable_sis_mode(ctrl_info);
7122 dev_err(&ctrl_info->pci_dev->dev,
7126 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7136 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7138 if (!sis_is_firmware_running(ctrl_info))
7141 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7144 if (sis_is_kernel_up(ctrl_info)) {
7145 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7149 return pqi_revert_to_sis_mode(ctrl_info);
7154 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7159 sis_soft_reset(ctrl_info);
7162 rc = pqi_force_sis_mode(ctrl_info);
7171 rc = sis_wait_for_ctrl_ready(ctrl_info);
7179 rc = sis_get_ctrl_properties(ctrl_info);
7181 dev_err(&ctrl_info->pci_dev->dev,
7186 rc = sis_get_pqi_capabilities(ctrl_info);
7188 dev_err(&ctrl_info->pci_dev->dev,
7194 if (ctrl_info->max_outstanding_requests >
7196 ctrl_info->max_outstanding_requests =
7199 if (ctrl_info->max_outstanding_requests >
7201 ctrl_info->max_outstanding_requests =
7205 pqi_calculate_io_resources(ctrl_info);
7207 rc = pqi_alloc_error_buffer(ctrl_info);
7209 dev_err(&ctrl_info->pci_dev->dev,
7219 rc = sis_init_base_struct_addr(ctrl_info);
7221 dev_err(&ctrl_info->pci_dev->dev,
7227 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7229 dev_err(&ctrl_info->pci_dev->dev,
7235 ctrl_info->pqi_mode_enabled = true;
7236 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7238 rc = pqi_alloc_admin_queues(ctrl_info);
7240 dev_err(&ctrl_info->pci_dev->dev,
7245 rc = pqi_create_admin_queues(ctrl_info);
7247 dev_err(&ctrl_info->pci_dev->dev,
7252 rc = pqi_report_device_capability(ctrl_info);
7254 dev_err(&ctrl_info->pci_dev->dev,
7259 rc = pqi_validate_device_capability(ctrl_info);
7263 pqi_calculate_queue_resources(ctrl_info);
7265 rc = pqi_enable_msix_interrupts(ctrl_info);
7269 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7270 ctrl_info->max_msix_vectors =
7271 ctrl_info->num_msix_vectors_enabled;
7272 pqi_calculate_queue_resources(ctrl_info);
7275 rc = pqi_alloc_io_resources(ctrl_info);
7279 rc = pqi_alloc_operational_queues(ctrl_info);
7281 dev_err(&ctrl_info->pci_dev->dev,
7286 pqi_init_operational_queues(ctrl_info);
7288 rc = pqi_request_irqs(ctrl_info);
7292 rc = pqi_create_queues(ctrl_info);
7296 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7298 ctrl_info->controller_online = true;
7300 rc = pqi_process_config_table(ctrl_info);
7304 pqi_start_heartbeat_timer(ctrl_info);
7306 rc = pqi_enable_events(ctrl_info);
7308 dev_err(&ctrl_info->pci_dev->dev,
7314 rc = pqi_register_scsi(ctrl_info);
7318 rc = pqi_get_ctrl_product_details(ctrl_info);
7320 dev_err(&ctrl_info->pci_dev->dev,
7325 rc = pqi_get_ctrl_serial_number(ctrl_info);
7327 dev_err(&ctrl_info->pci_dev->dev,
7332 rc = pqi_set_diag_rescan(ctrl_info);
7334 dev_err(&ctrl_info->pci_dev->dev,
7339 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7341 dev_err(&ctrl_info->pci_dev->dev,
7346 pqi_schedule_update_time_worker(ctrl_info);
7348 pqi_scan_scsi_devices(ctrl_info);
7353 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7359 admin_queues = &ctrl_info->admin_queues;
7364 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7365 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7366 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7367 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7369 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7370 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7371 writel(0, ctrl_info->queue_groups[i].oq_pi);
7374 event_queue = &ctrl_info->event_queue;
7379 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7383 rc = pqi_force_sis_mode(ctrl_info);
7391 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7399 rc = sis_get_ctrl_properties(ctrl_info);
7401 dev_err(&ctrl_info->pci_dev->dev,
7406 rc = sis_get_pqi_capabilities(ctrl_info);
7408 dev_err(&ctrl_info->pci_dev->dev,
7418 rc = sis_init_base_struct_addr(ctrl_info);
7420 dev_err(&ctrl_info->pci_dev->dev,
7426 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7428 dev_err(&ctrl_info->pci_dev->dev,
7434 ctrl_info->pqi_mode_enabled = true;
7435 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7437 pqi_reinit_queues(ctrl_info);
7439 rc = pqi_create_admin_queues(ctrl_info);
7441 dev_err(&ctrl_info->pci_dev->dev,
7446 rc = pqi_create_queues(ctrl_info);
7450 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7452 ctrl_info->controller_online = true;
7453 pqi_ctrl_unblock_requests(ctrl_info);
7455 rc = pqi_process_config_table(ctrl_info);
7459 pqi_start_heartbeat_timer(ctrl_info);
7461 rc = pqi_enable_events(ctrl_info);
7463 dev_err(&ctrl_info->pci_dev->dev,
7468 rc = pqi_get_ctrl_product_details(ctrl_info);
7470 dev_err(&ctrl_info->pci_dev->dev,
7475 rc = pqi_set_diag_rescan(ctrl_info);
7477 dev_err(&ctrl_info->pci_dev->dev,
7482 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7484 dev_err(&ctrl_info->pci_dev->dev,
7489 pqi_schedule_update_time_worker(ctrl_info);
7491 pqi_scan_scsi_devices(ctrl_info);
7507 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7512 rc = pci_enable_device(ctrl_info->pci_dev);
7514 dev_err(&ctrl_info->pci_dev->dev,
7524 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7526 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7530 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7532 dev_err(&ctrl_info->pci_dev->dev,
7537 ctrl_info->iomem_base = ioremap(pci_resource_start(
7538 ctrl_info->pci_dev, 0),
7540 if (!ctrl_info->iomem_base) {
7541 dev_err(&ctrl_info->pci_dev->dev,
7550 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7553 dev_err(&ctrl_info->pci_dev->dev,
7559 pci_set_master(ctrl_info->pci_dev);
7561 ctrl_info->registers = ctrl_info->iomem_base;
7562 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7564 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7569 pci_release_regions(ctrl_info->pci_dev);
7571 pci_disable_device(ctrl_info->pci_dev);
7576 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7578 iounmap(ctrl_info->iomem_base);
7579 pci_release_regions(ctrl_info->pci_dev);
7580 if (pci_is_enabled(ctrl_info->pci_dev))
7581 pci_disable_device(ctrl_info->pci_dev);
7582 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7587 struct pqi_ctrl_info *ctrl_info;
7589 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7591 if (!ctrl_info)
7594 mutex_init(&ctrl_info->scan_mutex);
7595 mutex_init(&ctrl_info->lun_reset_mutex);
7596 mutex_init(&ctrl_info->ofa_mutex);
7598 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7599 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7601 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7602 atomic_set(&ctrl_info->num_interrupts, 0);
7603 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
7605 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7606 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7608 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7609 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7611 sema_init(&ctrl_info->sync_request_sem,
7613 init_waitqueue_head(&ctrl_info->block_requests_wait);
7615 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7616 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7617 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7620 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7621 ctrl_info->irq_mode = IRQ_MODE_NONE;
7622 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7624 return ctrl_info;
7627 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7629 kfree(ctrl_info);
7632 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7634 pqi_free_irqs(ctrl_info);
7635 pqi_disable_msix_interrupts(ctrl_info);
7638 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7640 pqi_stop_heartbeat_timer(ctrl_info);
7641 pqi_free_interrupts(ctrl_info);
7642 if (ctrl_info->queue_memory_base)
7643 dma_free_coherent(&ctrl_info->pci_dev->dev,
7644 ctrl_info->queue_memory_length,
7645 ctrl_info->queue_memory_base,
7646 ctrl_info->queue_memory_base_dma_handle);
7647 if (ctrl_info->admin_queue_memory_base)
7648 dma_free_coherent(&ctrl_info->pci_dev->dev,
7649 ctrl_info->admin_queue_memory_length,
7650 ctrl_info->admin_queue_memory_base,
7651 ctrl_info->admin_queue_memory_base_dma_handle);
7652 pqi_free_all_io_requests(ctrl_info);
7653 if (ctrl_info->error_buffer)
7654 dma_free_coherent(&ctrl_info->pci_dev->dev,
7655 ctrl_info->error_buffer_length,
7656 ctrl_info->error_buffer,
7657 ctrl_info->error_buffer_dma_handle);
7658 if (ctrl_info->iomem_base)
7659 pqi_cleanup_pci_init(ctrl_info);
7660 pqi_free_ctrl_info(ctrl_info);
7663 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7665 pqi_cancel_rescan_worker(ctrl_info);
7666 pqi_cancel_update_time_worker(ctrl_info);
7667 pqi_unregister_scsi(ctrl_info);
7668 if (ctrl_info->pqi_mode_enabled)
7669 pqi_revert_to_sis_mode(ctrl_info);
7670 pqi_free_ctrl_resources(ctrl_info);
7673 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7675 pqi_cancel_update_time_worker(ctrl_info);
7676 pqi_cancel_rescan_worker(ctrl_info);
7677 pqi_wait_until_lun_reset_finished(ctrl_info);
7678 pqi_wait_until_scan_finished(ctrl_info);
7679 pqi_ctrl_ofa_start(ctrl_info);
7680 pqi_ctrl_block_requests(ctrl_info);
7681 pqi_ctrl_wait_until_quiesced(ctrl_info);
7682 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7683 pqi_fail_io_queued_for_all_devices(ctrl_info);
7684 pqi_wait_until_inbound_queues_empty(ctrl_info);
7685 pqi_stop_heartbeat_timer(ctrl_info);
7686 ctrl_info->pqi_mode_enabled = false;
7687 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7690 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7692 pqi_ofa_free_host_buffer(ctrl_info);
7693 ctrl_info->pqi_mode_enabled = true;
7694 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7695 ctrl_info->controller_online = true;
7696 pqi_ctrl_unblock_requests(ctrl_info);
7697 pqi_start_heartbeat_timer(ctrl_info);
7698 pqi_schedule_update_time_worker(ctrl_info);
7699 pqi_clear_soft_reset_status(ctrl_info,
7701 pqi_scan_scsi_devices(ctrl_info);
7704 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7714 dev = &ctrl_info->pci_dev->dev;
7719 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7724 ctrl_info->pqi_ofa_chunk_virt_addr =
7726 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7732 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7736 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7757 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7760 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7767 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7774 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7778 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7784 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7790 dev = &ctrl_info->pci_dev->dev;
7793 &ctrl_info->pqi_ofa_mem_dma_handle,
7804 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7806 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7814 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7820 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7832 dma_free_coherent(&ctrl_info->pci_dev->dev,
7834 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7837 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7840 dma_free_coherent(&ctrl_info->pci_dev->dev,
7842 ctrl_info->pqi_ofa_mem_dma_handle);
7843 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7846 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7854 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7867 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7874 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7878 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7881 return pqi_ctrl_init_resume(ctrl_info);
7904 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7910 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7911 io_request = &ctrl_info->io_request_pool[i];
7929 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7932 pqi_stop_heartbeat_timer(ctrl_info);
7933 pqi_free_interrupts(ctrl_info);
7934 pqi_cancel_rescan_worker(ctrl_info);
7935 pqi_cancel_update_time_worker(ctrl_info);
7936 pqi_ctrl_wait_until_quiesced(ctrl_info);
7937 pqi_fail_all_outstanding_requests(ctrl_info);
7938 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7939 pqi_ctrl_unblock_requests(ctrl_info);
7944 struct pqi_ctrl_info *ctrl_info;
7946 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7947 pqi_take_ctrl_offline_deferred(ctrl_info);
7950 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7952 if (!ctrl_info->controller_online)
7955 ctrl_info->controller_online = false;
7956 ctrl_info->pqi_mode_enabled = false;
7957 pqi_ctrl_block_requests(ctrl_info);
7959 sis_shutdown_ctrl(ctrl_info);
7960 pci_disable_device(ctrl_info->pci_dev);
7961 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7962 schedule_work(&ctrl_info->ctrl_offline_work);
7983 struct pqi_ctrl_info *ctrl_info;
8007 ctrl_info = pqi_alloc_ctrl_info(node);
8008 if (!ctrl_info) {
8014 ctrl_info->pci_dev = pci_dev;
8016 rc = pqi_pci_init(ctrl_info);
8020 rc = pqi_ctrl_init(ctrl_info);
8027 pqi_remove_ctrl(ctrl_info);
8034 struct pqi_ctrl_info *ctrl_info;
8036 ctrl_info = pci_get_drvdata(pci_dev);
8037 if (!ctrl_info)
8040 ctrl_info->in_shutdown = true;
8042 pqi_remove_ctrl(ctrl_info);
8045 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8051 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8052 io_request = &ctrl_info->io_request_pool[i];
8064 struct pqi_ctrl_info *ctrl_info;
8066 ctrl_info = pci_get_drvdata(pci_dev);
8067 if (!ctrl_info) {
8073 pqi_disable_events(ctrl_info);
8074 pqi_wait_until_ofa_finished(ctrl_info);
8075 pqi_cancel_update_time_worker(ctrl_info);
8076 pqi_cancel_rescan_worker(ctrl_info);
8077 pqi_cancel_event_worker(ctrl_info);
8079 pqi_ctrl_shutdown_start(ctrl_info);
8080 pqi_ctrl_wait_until_quiesced(ctrl_info);
8082 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8089 pqi_ctrl_block_device_reset(ctrl_info);
8090 pqi_wait_until_lun_reset_finished(ctrl_info);
8096 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8101 pqi_ctrl_block_requests(ctrl_info);
8103 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8110 pqi_crash_if_pending_command(ctrl_info);
8111 pqi_reset(ctrl_info);
8140 struct pqi_ctrl_info *ctrl_info;
8142 ctrl_info = pci_get_drvdata(pci_dev);
8144 pqi_disable_events(ctrl_info);
8145 pqi_cancel_update_time_worker(ctrl_info);
8146 pqi_cancel_rescan_worker(ctrl_info);
8147 pqi_wait_until_scan_finished(ctrl_info);
8148 pqi_wait_until_lun_reset_finished(ctrl_info);
8149 pqi_wait_until_ofa_finished(ctrl_info);
8150 pqi_flush_cache(ctrl_info, SUSPEND);
8151 pqi_ctrl_block_requests(ctrl_info);
8152 pqi_ctrl_wait_until_quiesced(ctrl_info);
8153 pqi_wait_until_inbound_queues_empty(ctrl_info);
8154 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8155 pqi_stop_heartbeat_timer(ctrl_info);
8163 ctrl_info->controller_online = false;
8164 ctrl_info->pqi_mode_enabled = false;
8172 struct pqi_ctrl_info *ctrl_info;
8174 ctrl_info = pci_get_drvdata(pci_dev);
8177 ctrl_info->max_hw_queue_index = 0;
8178 pqi_free_interrupts(ctrl_info);
8179 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8182 &ctrl_info->queue_groups[0]);
8184 dev_err(&ctrl_info->pci_dev->dev,
8189 pqi_start_heartbeat_timer(ctrl_info);
8190 pqi_ctrl_unblock_requests(ctrl_info);
8197 return pqi_ctrl_init_resume(ctrl_info);