Lines Matching refs:tb
15 #include "tb.h"
42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
44 return ((void *)tcm - sizeof(struct tb));
49 struct tb *tb;
62 group->tb = tcm_to_tb(tcm);
163 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
171 ev->tb = tb;
176 queue_work(tb->wq, &ev->work);
183 struct tb_cm *tcm = tb_priv(sw->tb);
200 struct tb_cm *tcm = tb_priv(sw->tb);
217 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
219 struct tb_cm *tcm = tb_priv(tb);
232 static void tb_discover_dp_resources(struct tb *tb)
234 struct tb_cm *tcm = tb_priv(tb);
239 tb_discover_dp_resource(tb, tunnel->dst_port);
246 struct tb_cm *tcm = tb_priv(sw->tb);
341 sw = tunnel->tb->root_switch;
388 struct tb *tb = sw->tb;
396 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
401 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
405 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
424 static void tb_discover_tunnels(struct tb *tb)
426 struct tb_cm *tcm = tb_priv(tb);
429 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
472 struct tb *tb = sw->tb;
480 xd = tb_xdomain_find_by_route(tb, route);
486 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
530 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
534 struct tb_cm *tcm = tb_priv(tb);
548 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
562 if (sw == tb->root_switch)
566 port = tb_port_at(tb_route(sw), tb->root_switch);
568 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
572 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
575 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
579 struct tb_cm *tcm = tb_priv(tb);
583 tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
587 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
712 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
718 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
722 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
728 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
732 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
738 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
741 tb_warn(tb, "failed to calculate available bandwidth\n");
745 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
751 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
756 struct tb_cm *tcm = tb_priv(tb);
760 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
792 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
797 ret = tb_available_bandwidth(tb, down, up, &available_up,
805 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
821 tb_reclaim_usb3_bandwidth(tb, down, up);
829 tb_reclaim_usb3_bandwidth(tb, down, up);
843 ret = tb_tunnel_usb3(sw->tb, sw);
882 struct tb_cm *tcm = tb_priv(port->sw->tb);
893 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
918 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1007 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1023 struct tb *tb;
1031 tb = tunnel->tb;
1051 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1068 static void tb_free_invalid_tunnels(struct tb *tb)
1070 struct tb_cm *tcm = tb_priv(tb);
1159 struct tb *tb = group->tb;
1163 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1175 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1188 ret = tb_release_unused_usb3_bandwidth(tb,
1198 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1225 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1228 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1231 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1233 struct tb_cm *tcm = tb_priv(tb);
1236 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1245 tb_dbg(tb, "bandwidth re-calculation done\n");
1248 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1251 struct tb_cm *tcm = tb_priv(tb);
1254 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1274 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1285 static void tb_tunnel_dp(struct tb *tb)
1288 struct tb_cm *tcm = tb_priv(tb);
1293 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1301 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1316 out = tb_find_dp_out(tb, port);
1324 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1328 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1366 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1368 tb_warn(tb, "failed to release unused bandwidth\n");
1372 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1376 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1379 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1392 tb_reclaim_usb3_bandwidth(tb, in, out);
1395 tb_recalc_estimated_bandwidth(tb);
1407 tb_reclaim_usb3_bandwidth(tb, in, out);
1419 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1434 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1442 tb_recalc_estimated_bandwidth(tb);
1443 tb_tunnel_dp(tb);
1446 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1448 struct tb_cm *tcm = tb_priv(tb);
1464 tb_tunnel_dp(tb);
1467 static void tb_disconnect_and_release_dp(struct tb *tb)
1469 struct tb_cm *tcm = tb_priv(tb);
1490 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1499 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1511 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1514 struct tb_cm *tcm = tb_priv(tb);
1530 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1555 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1559 struct tb_cm *tcm = tb_priv(tb);
1567 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1569 mutex_lock(&tb->lock);
1577 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1592 mutex_unlock(&tb->lock);
1599 mutex_unlock(&tb->lock);
1604 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1608 struct tb_cm *tcm = tb_priv(tb);
1615 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1636 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1641 mutex_lock(&tb->lock);
1642 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1645 mutex_unlock(&tb->lock);
1655 * Executes on tb->wq.
1660 struct tb *tb = ev->tb;
1661 struct tb_cm *tcm = tb_priv(tb);
1666 pm_runtime_get_sync(&tb->dev);
1668 mutex_lock(&tb->lock);
1672 sw = tb_switch_find_by_route(tb, ev->route);
1674 tb_warn(tb,
1680 tb_warn(tb,
1687 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1700 tb_free_invalid_tunnels(tb);
1710 tb_recalc_estimated_bandwidth(tb);
1711 tb_tunnel_dp(tb);
1726 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1730 tb_dp_resource_unavailable(tb, port);
1750 tb_dp_resource_available(tb, port);
1760 mutex_unlock(&tb->lock);
1762 pm_runtime_mark_last_busy(&tb->dev);
1763 pm_runtime_put_autosuspend(&tb->dev);
1774 struct tb *tb = tunnel->tb;
1855 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1864 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1880 tb_reclaim_usb3_bandwidth(tb, in, out);
1890 struct tb *tb = ev->tb;
1891 struct tb_cm *tcm = tb_priv(tb);
1894 pm_runtime_get_sync(&tb->dev);
1896 mutex_lock(&tb->lock);
1900 sw = tb_switch_find_by_route(tb, ev->route);
1902 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
1932 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1959 tb_recalc_estimated_bandwidth(tb);
1965 mutex_unlock(&tb->lock);
1967 pm_runtime_mark_last_busy(&tb->dev);
1968 pm_runtime_put_autosuspend(&tb->dev);
1973 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
1981 ev->tb = tb;
1985 queue_work(tb->wq, &ev->work);
1988 static void tb_handle_notification(struct tb *tb, u64 route,
1996 if (tb_cfg_ack_notification(tb->ctl, route, error))
1997 tb_warn(tb, "could not ack notification on %llx\n",
2002 if (tb_cfg_ack_notification(tb->ctl, route, error))
2003 tb_warn(tb, "could not ack notification on %llx\n",
2005 tb_queue_dp_bandwidth_request(tb, route, error->port);
2019 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2027 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2032 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2036 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2037 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2041 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2044 static void tb_stop(struct tb *tb)
2046 struct tb_cm *tcm = tb_priv(tb);
2062 tb_switch_remove(tb->root_switch);
2087 static int tb_start(struct tb *tb)
2089 struct tb_cm *tcm = tb_priv(tb);
2092 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2093 if (IS_ERR(tb->root_switch))
2094 return PTR_ERR(tb->root_switch);
2104 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2106 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2108 ret = tb_switch_configure(tb->root_switch);
2110 tb_switch_put(tb->root_switch);
2115 ret = tb_switch_add(tb->root_switch);
2117 tb_switch_put(tb->root_switch);
2125 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2127 tb_switch_tmu_enable(tb->root_switch);
2129 tb_scan_switch(tb->root_switch);
2131 tb_discover_tunnels(tb);
2133 tb_discover_dp_resources(tb);
2138 tb_create_usb3_tunnels(tb->root_switch);
2140 tb_add_dp_resources(tb->root_switch);
2142 device_for_each_child(&tb->root_switch->dev, NULL,
2150 static int tb_suspend_noirq(struct tb *tb)
2152 struct tb_cm *tcm = tb_priv(tb);
2154 tb_dbg(tb, "suspending...\n");
2155 tb_disconnect_and_release_dp(tb);
2156 tb_switch_suspend(tb->root_switch, false);
2158 tb_dbg(tb, "suspend finished\n");
2194 static int tb_resume_noirq(struct tb *tb)
2196 struct tb_cm *tcm = tb_priv(tb);
2201 tb_dbg(tb, "resuming...\n");
2204 tb_switch_reset(tb->root_switch);
2206 tb_switch_resume(tb->root_switch);
2207 tb_free_invalid_tunnels(tb);
2208 tb_free_unplugged_children(tb->root_switch);
2209 tb_restore_children(tb->root_switch);
2217 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2240 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2245 tb_dbg(tb, "resume finished\n");
2272 static int tb_freeze_noirq(struct tb *tb)
2274 struct tb_cm *tcm = tb_priv(tb);
2280 static int tb_thaw_noirq(struct tb *tb)
2282 struct tb_cm *tcm = tb_priv(tb);
2288 static void tb_complete(struct tb *tb)
2295 mutex_lock(&tb->lock);
2296 if (tb_free_unplugged_xdomains(tb->root_switch))
2297 tb_scan_switch(tb->root_switch);
2298 mutex_unlock(&tb->lock);
2301 static int tb_runtime_suspend(struct tb *tb)
2303 struct tb_cm *tcm = tb_priv(tb);
2305 mutex_lock(&tb->lock);
2306 tb_switch_suspend(tb->root_switch, true);
2308 mutex_unlock(&tb->lock);
2316 struct tb *tb = tcm_to_tb(tcm);
2318 mutex_lock(&tb->lock);
2319 if (tb->root_switch) {
2320 tb_free_unplugged_children(tb->root_switch);
2321 tb_free_unplugged_xdomains(tb->root_switch);
2323 mutex_unlock(&tb->lock);
2326 static int tb_runtime_resume(struct tb *tb)
2328 struct tb_cm *tcm = tb_priv(tb);
2331 mutex_lock(&tb->lock);
2332 tb_switch_resume(tb->root_switch);
2333 tb_free_invalid_tunnels(tb);
2334 tb_restore_children(tb->root_switch);
2338 mutex_unlock(&tb->lock);
2345 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2434 struct tb *tb_probe(struct tb_nhi *nhi)
2437 struct tb *tb;
2439 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2440 if (!tb)
2444 tb->security_level = TB_SECURITY_USER;
2446 tb->security_level = TB_SECURITY_NOPCIE;
2448 tb->cm_ops = &tb_cm_ops;
2450 tcm = tb_priv(tb);
2456 tb_dbg(tb, "using software connection manager\n");
2464 tb_warn(tb, "device links to tunneled native ports are missing!\n");
2466 return tb;