Lines Matching refs:sw

111 		if (tunnel->src_port->sw == in->sw &&
112 tunnel->dst_port->sw == out->sw) {
181 static void tb_add_dp_resources(struct tb_switch *sw)
183 struct tb_cm *tcm = tb_priv(sw->tb);
186 tb_switch_for_each_port(sw, port) {
190 if (!tb_switch_query_dp_resource(sw, port))
198 static void tb_remove_dp_resources(struct tb_switch *sw)
200 struct tb_cm *tcm = tb_priv(sw->tb);
204 tb_switch_for_each_port(sw, port) {
206 tb_remove_dp_resources(port->remote->sw);
210 if (port->sw == sw) {
244 static int tb_enable_clx(struct tb_switch *sw)
246 struct tb_cm *tcm = tb_priv(sw->tb);
258 while (sw && sw->config.depth > 1)
259 sw = tb_switch_parent(sw);
261 if (!sw)
264 if (sw->config.depth != 1)
273 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
282 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
284 ret = tb_switch_clx_enable(sw, clx);
289 static void tb_disable_clx(struct tb_switch *sw)
292 if (tb_switch_clx_disable(sw) < 0)
293 tb_sw_warn(sw, "failed to disable CL states\n");
294 sw = tb_switch_parent(sw);
295 } while (sw);
300 struct tb_switch *sw;
302 sw = tb_to_switch(dev);
303 if (!sw)
306 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
310 if (tb_switch_clx_is_enabled(sw, TB_CL1))
315 ret = tb_switch_tmu_configure(sw, mode);
319 return tb_switch_tmu_enable(sw);
327 struct tb_switch *sw;
341 sw = tunnel->tb->root_switch;
342 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
345 static int tb_enable_tmu(struct tb_switch *sw)
356 ret = tb_switch_tmu_configure(sw,
359 if (tb_switch_clx_is_enabled(sw, TB_CL1))
360 ret = tb_switch_tmu_configure(sw,
363 ret = tb_switch_tmu_configure(sw,
370 if (tb_switch_tmu_is_enabled(sw))
373 ret = tb_switch_tmu_disable(sw);
377 ret = tb_switch_tmu_post_time(sw);
381 return tb_switch_tmu_enable(sw);
384 static void tb_switch_discover_tunnels(struct tb_switch *sw,
388 struct tb *tb = sw->tb;
391 tb_switch_for_each_port(sw, port) {
416 tb_switch_for_each_port(sw, port) {
418 tb_switch_discover_tunnels(port->remote->sw, list,
433 struct tb_switch *parent = tunnel->dst_port->sw;
435 while (parent != tunnel->src_port->sw) {
444 pm_runtime_get_sync(&in->sw->dev);
445 pm_runtime_get_sync(&out->sw->dev);
454 if (tb_switch_is_usb4(port->sw))
461 if (tb_switch_is_usb4(port->sw))
471 struct tb_switch *sw = port->sw;
472 struct tb *tb = sw->tb;
486 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
489 tb_port_at(route, sw)->xdomain = xd;
496 * tb_find_unused_port() - return the first inactive port on @sw
497 * @sw: Switch to find the port on
500 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
505 tb_switch_for_each_port(sw, port) {
519 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
524 down = usb4_switch_map_usb3_down(sw, port);
553 struct tb_switch *sw;
556 if (dst_port->sw->config.depth > src_port->sw->config.depth)
557 sw = dst_port->sw;
559 sw = src_port->sw;
562 if (sw == tb->root_switch)
566 port = tb_port_at(tb_route(sw), tb->root_switch);
584 tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
610 link_speed = port->sw->link_speed;
612 * sw->link_width is from upstream perspective
616 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
619 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
623 up_bw = link_speed * port->sw->link_width * 1000;
751 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
753 struct tb_switch *parent = tb_switch_parent(sw);
764 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
768 if (!sw->link_usb4)
775 port = tb_switch_downstream_port(sw);
834 static int tb_create_usb3_tunnels(struct tb_switch *sw)
842 if (tb_route(sw)) {
843 ret = tb_tunnel_usb3(sw->tb, sw);
848 tb_switch_for_each_port(sw, port) {
851 ret = tb_create_usb3_tunnels(port->remote->sw);
864 static void tb_scan_switch(struct tb_switch *sw)
868 pm_runtime_get_sync(&sw->dev);
870 tb_switch_for_each_port(sw, port)
873 pm_runtime_mark_last_busy(&sw->dev);
874 pm_runtime_put_autosuspend(&sw->dev);
882 struct tb_cm *tcm = tb_priv(port->sw->tb);
885 struct tb_switch *sw;
893 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
918 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
920 if (IS_ERR(sw)) {
926 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
931 if (tb_switch_configure(sw)) {
932 tb_switch_put(sw);
952 dev_set_uevent_suppress(&sw->dev, true);
960 sw->rpm = sw->generation > 1;
962 if (tb_switch_add(sw)) {
963 tb_switch_put(sw);
968 upstream_port = tb_upstream_port(sw);
977 tb_switch_lane_bonding_enable(sw);
979 tb_switch_configure_link(sw);
985 tb_sw_dbg(sw, "discovery, not touching CL states\n");
986 else if (tb_enable_clx(sw))
987 tb_sw_warn(sw, "failed to enable CL states\n");
989 if (tb_enable_tmu(sw))
990 tb_sw_warn(sw, "failed to enable TMU\n");
996 tb_switch_configuration_valid(sw);
1007 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1008 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1010 tb_add_dp_resources(sw);
1011 tb_scan_switch(sw);
1042 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1044 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1045 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1046 pm_runtime_mark_last_busy(&src_port->sw->dev);
1047 pm_runtime_put_autosuspend(&src_port->sw->dev);
1083 static void tb_free_unplugged_children(struct tb_switch *sw)
1087 tb_switch_for_each_port(sw, port) {
1091 if (port->remote->sw->is_unplugged) {
1093 tb_remove_dp_resources(port->remote->sw);
1094 tb_switch_unconfigure_link(port->remote->sw);
1095 tb_switch_lane_bonding_disable(port->remote->sw);
1096 tb_switch_remove(port->remote->sw);
1101 tb_free_unplugged_children(port->remote->sw);
1106 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1115 if (tb_switch_is_usb4(sw)) {
1116 down = usb4_switch_map_pcie_down(sw, port);
1117 } else if (!tb_route(sw)) {
1125 if (tb_switch_is_cactus_ridge(sw) ||
1126 tb_switch_is_alpine_ridge(sw))
1128 else if (tb_switch_is_falcon_ridge(sw))
1130 else if (tb_switch_is_titan_ridge(sw))
1136 if (WARN_ON(index > sw->config.max_port_number))
1139 down = &sw->ports[index];
1152 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1215 if (in->sw->config.depth < out->sw->config.depth)
1253 host_port = tb_route(in->sw) ?
1254 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1271 if (host_port && tb_route(port->sw)) {
1274 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1354 pm_runtime_get_sync(&in->sw->dev);
1355 pm_runtime_get_sync(&out->sw->dev);
1357 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1411 tb_switch_dealloc_dp_resource(in->sw, in);
1413 pm_runtime_mark_last_busy(&out->sw->dev);
1414 pm_runtime_put_autosuspend(&out->sw->dev);
1415 pm_runtime_mark_last_busy(&in->sw->dev);
1416 pm_runtime_put_autosuspend(&in->sw->dev);
1490 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1495 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1503 tb_switch_xhci_disconnect(sw);
1511 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1517 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1525 port = tb_switch_downstream_port(sw);
1526 down = tb_find_pcie_down(tb_switch_parent(sw), port);
1545 if (tb_switch_pcie_l1_enable(sw))
1546 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1548 if (tb_switch_xhci_connect(sw))
1549 tb_sw_warn(sw, "failed to connect xHCI\n");
1562 struct tb_switch *sw;
1565 sw = tb_to_switch(xd->dev.parent);
1566 dst_port = tb_port_at(xd->route, sw);
1575 tb_disable_clx(sw);
1598 tb_enable_clx(sw);
1611 struct tb_switch *sw;
1613 sw = tb_to_switch(xd->dev.parent);
1614 dst_port = tb_port_at(xd->route, sw);
1633 tb_enable_clx(sw);
1662 struct tb_switch *sw;
1672 sw = tb_switch_find_by_route(tb, ev->route);
1673 if (!sw) {
1679 if (ev->port > sw->config.max_port_number) {
1685 port = &sw->ports[ev->port];
1692 pm_runtime_get_sync(&sw->dev);
1699 tb_sw_set_unplugged(port->remote->sw);
1701 tb_remove_dp_resources(port->remote->sw);
1702 tb_switch_tmu_disable(port->remote->sw);
1703 tb_switch_unconfigure_link(port->remote->sw);
1704 tb_switch_lane_bonding_disable(port->remote->sw);
1705 tb_switch_remove(port->remote->sw);
1732 tb_sw_dbg(sw, "xHCI disconnect request\n");
1733 tb_switch_xhci_disconnect(sw);
1740 } else if (!port->port && sw->authorized) {
1741 tb_sw_dbg(sw, "xHCI connect request\n");
1742 tb_switch_xhci_connect(sw);
1754 pm_runtime_mark_last_busy(&sw->dev);
1755 pm_runtime_put_autosuspend(&sw->dev);
1758 tb_switch_put(sw);
1892 struct tb_switch *sw;
1900 sw = tb_switch_find_by_route(tb, ev->route);
1901 if (!sw) {
1907 in = &sw->ports[ev->port];
1940 if (in->sw->config.depth < out->sw->config.depth) {
1963 tb_switch_put(sw);
2069 struct tb_switch *sw = tb_to_switch(dev);
2076 if (sw->boot)
2077 sw->authorized = 1;
2163 static void tb_restore_children(struct tb_switch *sw)
2168 if (sw->is_unplugged)
2171 if (tb_enable_clx(sw))
2172 tb_sw_warn(sw, "failed to re-enable CL states\n");
2174 if (tb_enable_tmu(sw))
2175 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2177 tb_switch_configuration_valid(sw);
2179 tb_switch_for_each_port(sw, port) {
2184 tb_switch_lane_bonding_enable(port->remote->sw);
2185 tb_switch_configure_link(port->remote->sw);
2187 tb_restore_children(port->remote->sw);
2250 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2255 tb_switch_for_each_port(sw, port) {
2265 ret += tb_free_unplugged_xdomains(port->remote->sw);