Lines Matching defs:iport
73 static struct device *sciport_to_dev(struct isci_port *iport)
75 int i = iport->physical_port_index;
82 table = iport - i;
88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
94 struct isci_phy *iphy = iport->phy_table[index];
102 static u32 sci_port_get_phys(struct isci_port *iport)
109 if (iport->phy_table[index])
128 enum sci_status sci_port_get_properties(struct isci_port *iport,
131 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
134 prop->index = iport->logical_port_index;
135 prop->phy_mask = sci_port_get_phys(iport);
136 sci_port_get_sas_address(iport, &prop->local.sas_address);
137 sci_port_get_protocols(iport, &prop->local.protocols);
138 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
143 static void sci_port_bcn_enable(struct isci_port *iport)
149 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
150 iphy = iport->phy_table[i];
160 struct isci_port *iport,
169 sci_port_bcn_enable(iport);
173 struct isci_port *iport,
182 __func__, iport);
186 sci_port_get_properties(iport, &properties);
295 static void port_state_machine_change(struct isci_port *iport,
298 struct sci_base_state_machine *sm = &iport->sm;
302 iport->ready_exit = true;
305 iport->ready_exit = false;
363 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
365 struct isci_host *ihost = iport->owning_controller;
372 if ((iport->physical_port_index == 1) && (phy_index != 1))
375 if (iport->physical_port_index == 3 && phy_index != 3)
378 if (iport->physical_port_index == 2 &&
383 if (iport->phy_table[index] && index != phy_index)
410 struct isci_port *iport,
413 if (iport->physical_port_index == 0) {
419 } else if (iport->physical_port_index == 1) {
423 } else if (iport->physical_port_index == 2) {
428 } else if (iport->physical_port_index == 3) {
445 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
454 iphy = iport->phy_table[index];
455 if (iphy && sci_port_active_phy(iport, iphy))
462 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
468 if (!iport->phy_table[iphy->phy_index] &&
470 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
474 iport->logical_port_index = iport->physical_port_index;
475 iport->phy_table[iphy->phy_index] = iphy;
476 sci_phy_set_port(iphy, iport);
484 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
487 if (iport->phy_table[iphy->phy_index] == iphy &&
488 phy_get_non_dummy_port(iphy) == iport) {
489 struct isci_host *ihost = iport->owning_controller;
493 iport->phy_table[iphy->phy_index] = NULL;
500 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
507 if (iport->phy_table[index])
508 sci_phy_get_sas_address(iport->phy_table[index], sas);
511 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
519 iphy = sci_port_get_a_connected_phy(iport);
543 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
547 rnc = &iport->owning_controller->remote_node_context_table[rni];
556 rnc->ssp.logical_port_index = iport->physical_port_index;
571 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
573 struct isci_host *ihost = iport->owning_controller;
581 task_context->logical_port_index = iport->physical_port_index;
586 task_context->remote_node_index = iport->reserved_rni;
591 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
593 struct isci_host *ihost = iport->owning_controller;
595 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
596 isci_free_tag(ihost, iport->reserved_tag);
598 if (iport->reserved_rni != SCU_DUMMY_INDEX)
600 1, iport->reserved_rni);
602 iport->reserved_rni = SCU_DUMMY_INDEX;
603 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
606 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
611 if (iport->active_phy_mask & (1 << index))
612 sci_phy_setup_transport(iport->phy_table[index], device_id);
616 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
619 iport->enabled_phy_mask |= 1 << iphy->phy_index;
622 static void sci_port_activate_phy(struct isci_port *iport,
626 struct isci_host *ihost = iport->owning_controller;
631 iport->active_phy_mask |= 1 << iphy->phy_index;
636 isci_port_link_up(ihost, iport, iphy);
639 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
642 struct isci_host *ihost = iport->owning_controller;
644 iport->active_phy_mask &= ~(1 << iphy->phy_index);
645 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
646 if (!iport->active_phy_mask)
647 iport->last_active_phy = iphy->phy_index;
654 if (iport->owning_controller->oem_parameters.controller.mode_type ==
657 &iport->port_pe_configuration_register[iphy->phy_index]);
660 isci_port_link_down(ihost, iphy, iport);
663 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
665 struct isci_host *ihost = iport->owning_controller;
689 static void sci_port_general_link_up_handler(struct isci_port *iport,
696 sci_port_get_attached_sas_address(iport, &port_sas_address);
706 iport->active_phy_mask == 0) {
707 struct sci_base_state_machine *sm = &iport->sm;
709 sci_port_activate_phy(iport, iphy, flags);
711 port_state_machine_change(iport, SCI_PORT_READY);
713 sci_port_invalid_link_up(iport, iphy);
727 static bool sci_port_is_wide(struct isci_port *iport)
733 if (iport->phy_table[index] != NULL) {
755 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
757 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
759 if (sci_port_is_wide(iport)) {
760 sci_port_invalid_link_up(iport, iphy);
763 struct isci_host *ihost = iport->owning_controller;
776 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
777 struct isci_host *ihost = iport->owning_controller;
786 current_state = iport->sm.current_state_id;
792 port_state_machine_change(iport, SCI_PORT_FAILED);
797 dev_err(sciport_to_dev(iport),
800 iport);
802 dev_dbg(sciport_to_dev(iport),
804 __func__, iport->physical_port_index);
809 dev_err(sciport_to_dev(iport),
811 "in state %d.\n", __func__, iport, current_state);
825 static void sci_port_update_viit_entry(struct isci_port *iport)
829 sci_port_get_sas_address(iport, &sas_address);
832 &iport->viit_registers->initiator_sas_address_hi);
834 &iport->viit_registers->initiator_sas_address_lo);
837 writel(0, &iport->viit_registers->reserved);
842 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
844 &iport->viit_registers->status);
847 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
857 iphy = iport->phy_table[index];
858 if (iphy && sci_port_active_phy(iport, iphy) &&
866 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
870 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
872 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
884 static void sci_port_post_dummy_request(struct isci_port *iport)
886 struct isci_host *ihost = iport->owning_controller;
887 u16 tag = iport->reserved_tag;
895 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
908 static void sci_port_abort_dummy_request(struct isci_port *iport)
910 struct isci_host *ihost = iport->owning_controller;
911 u16 tag = iport->reserved_tag;
919 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
932 sci_port_resume_port_task_scheduler(struct isci_port *iport)
936 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
938 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
943 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
945 sci_port_suspend_port_task_scheduler(iport);
947 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
949 if (iport->active_phy_mask != 0) {
951 port_state_machine_change(iport,
959 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
960 sci_port_resume_port_task_scheduler(iport);
966 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
967 struct isci_host *ihost = iport->owning_controller;
970 __func__, iport->physical_port_index);
973 if (iport->phy_table[index]) {
974 writel(iport->physical_port_index,
975 &iport->port_pe_configuration_register[
976 iport->phy_table[index]->phy_index]);
977 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
978 sci_port_resume_phy(iport, iport->phy_table[index]);
982 sci_port_update_viit_entry(iport);
988 sci_port_post_dummy_request(iport);
991 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
993 struct isci_host *ihost = iport->owning_controller;
994 u8 phys_index = iport->physical_port_index;
996 u16 rni = iport->reserved_rni;
1026 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1027 struct isci_host *ihost = iport->owning_controller;
1034 sci_port_abort_dummy_request(iport);
1037 __func__, iport->physical_port_index);
1039 if (iport->ready_exit)
1040 sci_port_invalidate_dummy_remote_node(iport);
1045 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1046 struct isci_host *ihost = iport->owning_controller;
1048 if (iport->active_phy_mask == 0) {
1050 __func__, iport->physical_port_index);
1052 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1054 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1057 enum sci_status sci_port_start(struct isci_port *iport)
1059 struct isci_host *ihost = iport->owning_controller;
1064 state = iport->sm.current_state_id;
1066 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1071 if (iport->assigned_device_count > 0) {
1080 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1085 sci_port_construct_dummy_rnc(iport, rni);
1088 iport->reserved_rni = rni;
1091 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1098 sci_port_construct_dummy_task(iport, tag);
1099 iport->reserved_tag = tag;
1103 phy_mask = sci_port_get_phys(iport);
1110 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1111 port_state_machine_change(iport,
1120 sci_port_destroy_dummy_resources(iport);
1125 enum sci_status sci_port_stop(struct isci_port *iport)
1129 state = iport->sm.current_state_id;
1137 port_state_machine_change(iport,
1141 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1147 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1154 state = iport->sm.current_state_id;
1156 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1163 iphy = iport->phy_table[phy_index];
1164 if (iphy && !sci_port_active_phy(iport, iphy)) {
1181 sci_mod_timer(&iport->timer, timeout);
1182 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1184 port_state_machine_change(iport, SCI_PORT_RESETTING);
1197 enum sci_status sci_port_add_phy(struct isci_port *iport,
1203 sci_port_bcn_enable(iport);
1205 state = iport->sm.current_state_id;
1211 sci_port_get_sas_address(iport, &port_sas_address);
1225 return sci_port_set_phy(iport, iphy);
1229 status = sci_port_set_phy(iport, iphy);
1234 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1235 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1236 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1240 status = sci_port_set_phy(iport, iphy);
1244 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1249 port_state_machine_change(iport,
1253 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1268 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1274 state = iport->sm.current_state_id;
1278 return sci_port_clear_phy(iport, iphy);
1280 status = sci_port_clear_phy(iport, iphy);
1284 sci_port_deactivate_phy(iport, iphy, true);
1285 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1286 port_state_machine_change(iport,
1290 status = sci_port_clear_phy(iport, iphy);
1294 sci_port_deactivate_phy(iport, iphy, true);
1299 port_state_machine_change(iport,
1303 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1309 enum sci_status sci_port_link_up(struct isci_port *iport,
1314 state = iport->sm.current_state_id;
1320 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1322 port_state_machine_change(iport,
1326 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1343 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1346 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1352 enum sci_status sci_port_link_down(struct isci_port *iport,
1357 state = iport->sm.current_state_id;
1360 sci_port_deactivate_phy(iport, iphy, true);
1366 if (iport->active_phy_mask == 0)
1367 port_state_machine_change(iport,
1373 sci_port_deactivate_phy(iport, iphy, false);
1376 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1382 enum sci_status sci_port_start_io(struct isci_port *iport,
1388 state = iport->sm.current_state_id;
1393 iport->started_request_count++;
1396 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1402 enum sci_status sci_port_complete_io(struct isci_port *iport,
1408 state = iport->sm.current_state_id;
1411 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1415 sci_port_decrement_request_count(iport);
1417 if (iport->started_request_count == 0)
1418 port_state_machine_change(iport,
1426 sci_port_decrement_request_count(iport);
1429 sci_port_decrement_request_count(iport);
1430 if (iport->started_request_count == 0) {
1431 port_state_machine_change(iport,
1439 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1444 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1446 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1449 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1453 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1456 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1459 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1461 struct isci_host *ihost = iport->owning_controller;
1462 u8 phys_index = iport->physical_port_index;
1464 u16 rni = iport->reserved_rni;
1489 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1491 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1496 sci_port_disable_port_task_scheduler(iport);
1502 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1505 sci_port_enable_port_task_scheduler(iport);
1510 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1511 struct isci_host *ihost = iport->owning_controller;
1514 prev_state = iport->sm.previous_state_id;
1516 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1519 __func__, iport->physical_port_index);
1522 sci_port_post_dummy_remote_node(iport);
1525 port_state_machine_change(iport,
1531 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1533 sci_del_timer(&iport->timer);
1538 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1540 sci_del_timer(&iport->timer);
1542 sci_port_destroy_dummy_resources(iport);
1547 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1549 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1552 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1555 u32 phy_mask = iport->active_phy_mask;
1558 ++iport->hang_detect_users;
1559 else if (iport->hang_detect_users > 1)
1560 --iport->hang_detect_users;
1562 iport->hang_detect_users = 0;
1564 if (timeout || (iport->hang_detect_users == 0)) {
1568 &iport->phy_table[phy_index]
1607 void sci_port_construct(struct isci_port *iport, u8 index,
1610 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1612 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1613 iport->physical_port_index = index;
1614 iport->active_phy_mask = 0;
1615 iport->enabled_phy_mask = 0;
1616 iport->last_active_phy = 0;
1617 iport->ready_exit = false;
1619 iport->owning_controller = ihost;
1621 iport->started_request_count = 0;
1622 iport->assigned_device_count = 0;
1623 iport->hang_detect_users = 0;
1625 iport->reserved_rni = SCU_DUMMY_INDEX;
1626 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1628 sci_init_timer(&iport->timer, port_timeout);
1630 iport->port_task_scheduler_registers = NULL;
1633 iport->phy_table[index] = NULL;
1636 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1638 struct isci_host *ihost = iport->owning_controller;
1641 isci_port_bc_change_received(ihost, iport, iphy);
1644 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1646 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1649 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1656 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1657 __func__, iport);
1660 set_bit(IPORT_RESET_PENDING, &iport->state);
1663 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1668 wait_port_reset(ihost, iport);
1671 "%s: iport = %p; hard reset completion\n",
1672 __func__, iport);
1674 if (iport->hard_reset_status != SCI_SUCCESS) {
1678 "%s: iport = %p; hard reset failed (0x%x)\n",
1679 __func__, iport, iport->hard_reset_status);
1682 clear_bit(IPORT_RESET_PENDING, &iport->state);
1687 "%s: iport = %p; sci_port_hard_reset call"
1689 __func__, iport, status);
1697 struct isci_port *iport = dev->port->lldd_port;
1710 if (test_bit(IPORT_RESET_PENDING, &iport->state))
1713 rc = !!iport->active_phy_mask;
1723 struct isci_port *iport = phy->port->lldd_port;
1730 if (!iport)
1735 if (iport->active_phy_mask & 1 << i)
1742 __func__, (long) (iport - &ihost->ports[0]));
1750 struct isci_port *iport = NULL;
1761 iport = &ihost->ports[i];
1762 if (iport->active_phy_mask & 1 << iphy->phy_index)
1768 iport = NULL;
1770 port->lldd_port = iport;