Lines Matching defs:ihost

181 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183 u32 get_value = ihost->completion_queue_get;
187 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
193 static bool sci_controller_isr(struct isci_host *ihost)
195 if (sci_controller_completion_queue_has_entries(ihost))
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213 writel(0, &ihost->smu_registers->interrupt_mask);
215 spin_unlock(&ihost->scic_lock);
222 struct isci_host *ihost = data;
224 if (sci_controller_isr(ihost))
225 tasklet_schedule(&ihost->completion_tasklet);
230 static bool sci_controller_error_isr(struct isci_host *ihost)
235 readl(&ihost->smu_registers->interrupt_status);
251 writel(0xff, &ihost->smu_registers->interrupt_mask);
252 writel(0, &ihost->smu_registers->interrupt_mask);
257 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
260 struct isci_request *ireq = ihost->reqs[index];
265 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
272 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
283 ireq = ihost->reqs[index];
284 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
293 idev = ihost->device_table[index];
294 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
301 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
307 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
320 frame_header = ihost->uf_control.buffers.array[frame_index].header;
321 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
328 sci_controller_release_frame(ihost, frame_index);
334 iphy = &ihost->phys[index];
346 iphy = &ihost->phys[index];
349 if (index < ihost->remote_node_entries)
350 idev = ihost->device_table[index];
357 sci_controller_release_frame(ihost, frame_index);
368 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
380 dev_err(&ihost->pdev->dev,
384 ihost,
394 dev_err(&ihost->pdev->dev,
398 ihost,
403 ireq = ihost->reqs[index];
411 ireq = ihost->reqs[index];
415 dev_warn(&ihost->pdev->dev,
420 ihost,
426 idev = ihost->device_table[index];
430 dev_warn(&ihost->pdev->dev,
435 ihost,
452 iphy = &ihost->phys[index];
459 if (index < ihost->remote_node_entries) {
460 idev = ihost->device_table[index];
465 dev_err(&ihost->pdev->dev,
470 ihost,
477 dev_warn(&ihost->pdev->dev,
485 static void sci_controller_process_completions(struct isci_host *ihost)
494 dev_dbg(&ihost->pdev->dev,
497 ihost->completion_queue_get);
500 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
503 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
508 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
512 ent = ihost->completion_queue[get_index];
519 dev_dbg(&ihost->pdev->dev,
526 sci_controller_task_completion(ihost, ent);
530 sci_controller_sdma_completion(ihost, ent);
534 sci_controller_unsolicited_frame(ihost, ent);
538 sci_controller_event_completion(ihost, ent);
546 sci_controller_event_completion(ihost, ent);
550 dev_warn(&ihost->pdev->dev,
561 ihost->completion_queue_get =
569 writel(ihost->completion_queue_get,
570 &ihost->smu_registers->completion_queue_get);
574 dev_dbg(&ihost->pdev->dev,
577 ihost->completion_queue_get);
581 static void sci_controller_error_handler(struct isci_host *ihost)
586 readl(&ihost->smu_registers->interrupt_status);
589 sci_controller_completion_queue_has_entries(ihost)) {
591 sci_controller_process_completions(ihost);
592 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
594 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
597 sci_change_state(&ihost->sm, SCIC_FAILED);
605 writel(0, &ihost->smu_registers->interrupt_mask);
611 struct isci_host *ihost = data;
613 if (sci_controller_isr(ihost)) {
614 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615 tasklet_schedule(&ihost->completion_tasklet);
617 } else if (sci_controller_error_isr(ihost)) {
618 spin_lock(&ihost->scic_lock);
619 sci_controller_error_handler(ihost);
620 spin_unlock(&ihost->scic_lock);
629 struct isci_host *ihost = data;
631 if (sci_controller_error_isr(ihost))
632 sci_controller_error_handler(ihost);
645 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
648 dev_info(&ihost->pdev->dev,
650 clear_bit(IHOST_START_PENDING, &ihost->flags);
651 wake_up(&ihost->eventq);
657 struct isci_host *ihost = ha->lldd_ha;
659 if (test_bit(IHOST_START_PENDING, &ihost->flags))
679 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
682 if (!ihost)
704 static void sci_controller_enable_interrupts(struct isci_host *ihost)
706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707 writel(0, &ihost->smu_registers->interrupt_mask);
710 void sci_controller_disable_interrupts(struct isci_host *ihost)
712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask); /* flush */
717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
722 readl(&ihost->scu_registers->peg0.ptsg.control);
727 &ihost->scu_registers->peg0.ptsg.control);
730 static void sci_controller_assign_task_entries(struct isci_host *ihost)
740 readl(&ihost->smu_registers->task_context_assignment[0]);
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
747 &ihost->smu_registers->task_context_assignment[0]);
751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
758 ihost->completion_queue_get = 0;
765 &ihost->smu_registers->completion_queue_control);
777 &ihost->smu_registers->completion_queue_get);
786 &ihost->smu_registers->completion_queue_put);
794 ihost->completion_queue[index] = 0x80000000;
798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
825 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
827 if (ihost->sm.current_state_id == SCIC_STARTING) {
832 sci_change_state(&ihost->sm, SCIC_READY);
834 isci_host_start_complete(ihost, status);
861 bool is_controller_start_complete(struct isci_host *ihost)
866 struct isci_phy *iphy = &ihost->phys[i];
873 if (is_port_config_apc(ihost))
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
902 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
904 struct sci_oem_params *oem = &ihost->oem_parameters;
910 if (ihost->phy_startup_timer_pending)
913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914 if (is_controller_start_complete(ihost)) {
915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
916 sci_del_timer(&ihost->phy_timer);
917 ihost->phy_startup_timer_pending = false;
920 iphy = &ihost->phys[ihost->next_phy_to_start];
924 ihost->next_phy_to_start++;
935 return sci_controller_start_next_phy(ihost);
942 sci_mod_timer(&ihost->phy_timer,
944 ihost->phy_startup_timer_pending = true;
946 dev_warn(&ihost->pdev->dev,
951 ihost->phys[ihost->next_phy_to_start].phy_index,
955 ihost->next_phy_to_start++;
964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
968 spin_lock_irqsave(&ihost->scic_lock, flags);
973 ihost->phy_startup_timer_pending = false;
976 status = sci_controller_start_next_phy(ihost);
980 spin_unlock_irqrestore(&ihost->scic_lock, flags);
983 static u16 isci_tci_active(struct isci_host *ihost)
985 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
988 static enum sci_status sci_controller_start(struct isci_host *ihost,
994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996 __func__, ihost->sm.current_state_id);
1001 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002 ihost->tci_head = 0;
1003 ihost->tci_tail = 0;
1004 for (index = 0; index < ihost->task_context_entries; index++)
1005 isci_tci_free(ihost, index);
1008 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1009 ihost->remote_node_entries);
1015 sci_controller_disable_interrupts(ihost);
1018 sci_controller_enable_port_task_scheduler(ihost);
1020 /* Assign all the task entries to ihost physical function */
1021 sci_controller_assign_task_entries(ihost);
1024 sci_controller_initialize_completion_queue(ihost);
1027 sci_controller_initialize_unsolicited_frame_queue(ihost);
1030 for (index = 0; index < ihost->logical_port_entries; index++) {
1031 struct isci_port *iport = &ihost->ports[index];
1038 sci_controller_start_next_phy(ihost);
1040 sci_mod_timer(&ihost->timer, timeout);
1042 sci_change_state(&ihost->sm, SCIC_STARTING);
1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1052 set_bit(IHOST_START_PENDING, &ihost->flags);
1054 spin_lock_irq(&ihost->scic_lock);
1055 sci_controller_start(ihost, tmo);
1056 sci_controller_enable_interrupts(ihost);
1057 spin_unlock_irq(&ihost->scic_lock);
1060 static void isci_host_stop_complete(struct isci_host *ihost)
1062 sci_controller_disable_interrupts(ihost);
1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1064 wake_up(&ihost->eventq);
1067 static void sci_controller_completion_handler(struct isci_host *ihost)
1070 if (sci_controller_completion_queue_has_entries(ihost))
1071 sci_controller_process_completions(ihost);
1074 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1076 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077 writel(0, &ihost->smu_registers->interrupt_mask);
1080 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1086 dev_dbg(&ihost->pdev->dev,
1092 dev_dbg(&ihost->pdev->dev,
1103 wake_up_all(&ihost->eventq);
1106 isci_free_tag(ihost, ireq->io_tag);
1118 struct isci_host *ihost = (struct isci_host *)data;
1121 spin_lock_irq(&ihost->scic_lock);
1122 sci_controller_completion_handler(ihost);
1123 spin_unlock_irq(&ihost->scic_lock);
1129 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1137 &ihost->smu_registers->interrupt_coalesce_control);
1158 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1160 if (ihost->sm.current_state_id != SCIC_READY) {
1161 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1162 __func__, ihost->sm.current_state_id);
1166 sci_mod_timer(&ihost->timer, timeout);
1167 sci_change_state(&ihost->sm, SCIC_STOPPING);
1183 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1185 switch (ihost->sm.current_state_id) {
1194 sci_change_state(&ihost->sm, SCIC_RESETTING);
1197 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1198 __func__, ihost->sm.current_state_id);
1203 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1212 phy_status = sci_phy_stop(&ihost->phys[index]);
1218 dev_warn(&ihost->pdev->dev,
1222 ihost->phys[index].phy_index, phy_status);
1232 * @ihost: host to take down
1241 void isci_host_deinit(struct isci_host *ihost)
1246 for (i = 0; i < isci_gpio_count(ihost); i++)
1247 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1249 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1251 spin_lock_irq(&ihost->scic_lock);
1252 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1253 spin_unlock_irq(&ihost->scic_lock);
1255 wait_for_stop(ihost);
1262 sci_controller_stop_phys(ihost);
1267 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1269 spin_lock_irq(&ihost->scic_lock);
1270 sci_controller_reset(ihost);
1271 spin_unlock_irq(&ihost->scic_lock);
1274 for (i = 0; i < ihost->logical_port_entries; i++) {
1275 struct isci_port *iport = &ihost->ports[i];
1281 struct isci_phy *iphy = &ihost->phys[i];
1285 del_timer_sync(&ihost->port_agent.timer.timer);
1287 del_timer_sync(&ihost->power_control.timer.timer);
1289 del_timer_sync(&ihost->timer.timer);
1291 del_timer_sync(&ihost->phy_timer.timer);
1312 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1314 sci_change_state(&ihost->sm, SCIC_RESET);
1319 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1321 sci_del_timer(&ihost->timer);
1349 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1434 &ihost->smu_registers->interrupt_coalesce_control);
1437 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1438 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1446 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1450 val = readl(&ihost->smu_registers->clock_gating_control);
1455 writel(val, &ihost->smu_registers->clock_gating_control);
1458 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1463 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1466 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1469 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1475 for (index = 0; index < ihost->logical_port_entries; index++) {
1476 struct isci_port *iport = &ihost->ports[index];
1484 dev_warn(&ihost->pdev->dev,
1496 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1504 for (index = 0; index < ihost->remote_node_entries; index++) {
1505 if (ihost->device_table[index] != NULL) {
1507 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1511 dev_warn(&ihost->pdev->dev,
1516 ihost->device_table[index], device_status);
1526 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1528 sci_controller_stop_devices(ihost);
1529 sci_controller_stop_ports(ihost);
1531 if (!sci_controller_has_remote_devices_stopping(ihost))
1532 isci_host_stop_complete(ihost);
1537 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1539 sci_del_timer(&ihost->timer);
1542 static void sci_controller_reset_hardware(struct isci_host *ihost)
1545 sci_controller_disable_interrupts(ihost);
1548 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1554 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1557 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1560 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1565 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1567 sci_controller_reset_hardware(ihost);
1568 sci_change_state(&ihost->sm, SCIC_RESET);
1598 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1599 struct sci_base_state_machine *sm = &ihost->sm;
1602 spin_lock_irqsave(&ihost->scic_lock, flags);
1608 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1611 isci_host_stop_complete(ihost);
1613 dev_err(&ihost->pdev->dev,
1619 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1622 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1628 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1630 ihost->scu_registers = scu_base;
1631 ihost->smu_registers = smu_base;
1633 sci_port_configuration_agent_construct(&ihost->port_agent);
1637 sci_port_construct(&ihost->ports[i], i, ihost);
1638 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1643 sci_phy_construct(&ihost->phys[i],
1644 &ihost->ports[SCI_MAX_PORTS], i);
1647 ihost->invalid_phy_mask = 0;
1649 sci_init_timer(&ihost->timer, controller_timeout);
1651 return sci_controller_reset(ihost);
1730 static u8 max_spin_up(struct isci_host *ihost)
1732 if (ihost->user_parameters.max_concurr_spinup)
1733 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1736 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1743 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1748 spin_lock_irqsave(&ihost->scic_lock, flags);
1753 ihost->power_control.phys_granted_power = 0;
1755 if (ihost->power_control.phys_waiting == 0) {
1756 ihost->power_control.timer_started = false;
1762 if (ihost->power_control.phys_waiting == 0)
1765 iphy = ihost->power_control.requesters[i];
1769 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1772 ihost->power_control.requesters[i] = NULL;
1773 ihost->power_control.phys_waiting--;
1774 ihost->power_control.phys_granted_power++;
1781 struct isci_phy *requester = ihost->power_control.requesters[j];
1794 ihost->power_control.requesters[j] = NULL;
1795 ihost->power_control.phys_waiting--;
1808 ihost->power_control.timer_started = true;
1811 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1814 void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1819 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1820 ihost->power_control.phys_granted_power++;
1827 if (ihost->power_control.timer_started)
1828 sci_del_timer(&ihost->power_control.timer);
1830 sci_mod_timer(&ihost->power_control.timer,
1832 ihost->power_control.timer_started = true;
1844 current_phy = &ihost->phys[i];
1860 ihost->power_control.requesters[iphy->phy_index] = iphy;
1861 ihost->power_control.phys_waiting++;
1866 void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1871 if (ihost->power_control.requesters[iphy->phy_index])
1872 ihost->power_control.phys_waiting--;
1874 ihost->power_control.requesters[iphy->phy_index] = NULL;
1895 static unsigned char *to_cable_select(struct isci_host *ihost)
1899 + ihost->id;
1901 return &ihost->oem_parameters.controller.cable_selection_mask;
1904 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1906 return decode_selection_byte(phy, *to_cable_select(ihost));
1923 static void sci_controller_afe_initialization(struct isci_host *ihost)
1925 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1926 const struct sci_oem_params *oem = &ihost->oem_parameters;
1927 struct pci_dev *pdev = ihost->pdev;
1930 unsigned char cable_selection_mask = *to_cable_select(ihost);
2124 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2126 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2128 memset(ihost->power_control.requesters, 0,
2129 sizeof(ihost->power_control.requesters));
2131 ihost->power_control.phys_waiting = 0;
2132 ihost->power_control.phys_granted_power = 0;
2135 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2137 struct sci_base_state_machine *sm = &ihost->sm;
2141 if (ihost->sm.current_state_id != SCIC_RESET) {
2142 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2143 __func__, ihost->sm.current_state_id);
2149 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2151 ihost->next_phy_to_start = 0;
2152 ihost->phy_startup_timer_pending = false;
2154 sci_controller_initialize_power_control(ihost);
2161 sci_controller_afe_initialization(ihost);
2165 writel(0, &ihost->smu_registers->soft_reset_control);
2175 status = readl(&ihost->smu_registers->control_status);
2186 val = readl(&ihost->smu_registers->device_context_capacity);
2189 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2190 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2191 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2197 for (i = 0; i < ihost->logical_port_entries; i++) {
2199 *ptsg = &ihost->scu_registers->peg0.ptsg;
2205 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2207 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2209 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2211 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2218 result = sci_phy_initialize(&ihost->phys[i],
2219 &ihost->scu_registers->peg0.pe[i].tl,
2220 &ihost->scu_registers->peg0.pe[i].ll);
2225 for (i = 0; i < ihost->logical_port_entries; i++) {
2226 struct isci_port *iport = &ihost->ports[i];
2228 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2229 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2230 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2233 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2246 static int sci_controller_dma_alloc(struct isci_host *ihost)
2248 struct device *dev = &ihost->pdev->dev;
2253 if (ihost->completion_queue)
2257 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2259 if (!ihost->completion_queue)
2262 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2263 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2266 if (!ihost->remote_node_context_table)
2269 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2270 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2272 if (!ihost->task_context_table)
2276 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2277 if (!ihost->ufi_buf)
2288 ireq->tc = &ihost->task_context_table[i];
2289 ireq->owning_controller = ihost;
2291 ireq->isci_host = ihost;
2292 ihost->reqs[i] = ireq;
2298 static int sci_controller_mem_init(struct isci_host *ihost)
2300 int err = sci_controller_dma_alloc(ihost);
2305 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2306 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2308 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2309 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2311 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2312 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2314 sci_unsolicited_frame_control_construct(ihost);
2320 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2321 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2322 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2323 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2325 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2326 &ihost->scu_registers->sdma.uf_address_table_lower);
2327 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2328 &ihost->scu_registers->sdma.uf_address_table_upper);
2335 * @ihost: host to init
2341 int isci_host_init(struct isci_host *ihost)
2346 spin_lock_irq(&ihost->scic_lock);
2347 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2348 spin_unlock_irq(&ihost->scic_lock);
2350 dev_err(&ihost->pdev->dev,
2357 spin_lock_irq(&ihost->scic_lock);
2358 status = sci_controller_initialize(ihost);
2359 spin_unlock_irq(&ihost->scic_lock);
2361 dev_warn(&ihost->pdev->dev,
2368 err = sci_controller_mem_init(ihost);
2373 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2374 for (i = 0; i < isci_gpio_count(ihost); i++)
2375 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2376 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2381 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2384 switch (ihost->sm.current_state_id) {
2386 sci_del_timer(&ihost->phy_timer);
2387 ihost->phy_startup_timer_pending = false;
2388 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2390 sci_controller_start_next_phy(ihost);
2393 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2397 dev_dbg(&ihost->pdev->dev,
2400 ihost->sm.current_state_id);
2404 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2407 switch (ihost->sm.current_state_id) {
2410 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2414 dev_dbg(&ihost->pdev->dev,
2419 ihost->sm.current_state_id);
2423 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2427 for (index = 0; index < ihost->remote_node_entries; index++) {
2428 if ((ihost->device_table[index] != NULL) &&
2429 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2436 void sci_controller_remote_device_stopped(struct isci_host *ihost,
2439 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2440 dev_dbg(&ihost->pdev->dev,
2443 ihost, idev,
2444 ihost->sm.current_state_id);
2448 if (!sci_controller_has_remote_devices_stopping(ihost))
2449 isci_host_stop_complete(ihost);
2452 void sci_controller_post_request(struct isci_host *ihost, u32 request)
2454 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2455 __func__, ihost->id, request);
2457 writel(request, &ihost->smu_registers->post_context_port);
2460 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2467 if (task_index < ihost->task_context_entries) {
2468 struct isci_request *ireq = ihost->reqs[task_index];
2473 if (task_sequence == ihost->io_request_sequence[task_index])
2495 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2503 &ihost->available_remote_nodes, remote_node_count
2507 ihost->device_table[node_index] = idev;
2517 void sci_controller_free_remote_node_context(struct isci_host *ihost,
2523 if (ihost->device_table[node_id] == idev) {
2524 ihost->device_table[node_id] = NULL;
2527 &ihost->available_remote_nodes, remote_node_count, node_id
2544 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2546 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2547 writel(ihost->uf_control.get,
2548 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2551 void isci_tci_free(struct isci_host *ihost, u16 tci)
2553 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2555 ihost->tci_pool[tail] = tci;
2556 ihost->tci_tail = tail + 1;
2559 static u16 isci_tci_alloc(struct isci_host *ihost)
2561 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2562 u16 tci = ihost->tci_pool[head];
2564 ihost->tci_head = head + 1;
2568 static u16 isci_tci_space(struct isci_host *ihost)
2570 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2573 u16 isci_alloc_tag(struct isci_host *ihost)
2575 if (isci_tci_space(ihost)) {
2576 u16 tci = isci_tci_alloc(ihost);
2577 u8 seq = ihost->io_request_sequence[tci];
2585 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2591 if (isci_tci_active(ihost) == 0)
2594 if (seq == ihost->io_request_sequence[tci]) {
2595 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2597 isci_tci_free(ihost, tci);
2604 enum sci_status sci_controller_start_io(struct isci_host *ihost,
2610 if (ihost->sm.current_state_id != SCIC_READY) {
2611 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2612 __func__, ihost->sm.current_state_id);
2616 status = sci_remote_device_start_io(ihost, idev, ireq);
2621 sci_controller_post_request(ihost, ireq->post_context);
2625 enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2635 if (ihost->sm.current_state_id != SCIC_READY) {
2636 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2637 __func__, ihost->sm.current_state_id);
2642 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2652 ihost, ireq->post_context |
2663 * @ihost: The handle to the controller object for which to complete the
2669 enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2675 switch (ihost->sm.current_state_id) {
2680 status = sci_remote_device_complete_io(ihost, idev, ireq);
2687 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2688 __func__, ihost->sm.current_state_id);
2696 struct isci_host *ihost = ireq->owning_controller;
2698 if (ihost->sm.current_state_id != SCIC_READY) {
2699 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2700 __func__, ihost->sm.current_state_id);
2705 sci_controller_post_request(ihost, ireq->post_context);
2718 enum sci_status sci_controller_start_task(struct isci_host *ihost,
2724 if (ihost->sm.current_state_id != SCIC_READY) {
2725 dev_warn(&ihost->pdev->dev,
2732 status = sci_remote_device_start_task(ihost, idev, ireq);
2745 sci_controller_post_request(ihost, ireq->post_context);
2754 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2762 for (d = 0; d < isci_gpio_count(ihost); d++) {
2781 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2793 struct isci_host *ihost = sas_ha->lldd_ha;
2798 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);