Lines Matching refs:sw

42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 if (uuid_equal(&st->uuid, sw->uuid))
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59 st = __nvm_get_auth_status(sw);
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
69 if (WARN_ON(!sw->uuid))
73 st = __nvm_get_auth_status(sw);
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
95 st = __nvm_get_auth_status(sw);
103 static int nvm_validate_and_write(struct tb_switch *sw)
106 const u8 *buf = sw->nvm->buf;
113 image_size = sw->nvm->buf_data_size;
137 if (!sw->safe_mode) {
145 if (device_id != sw->config.device_id)
148 if (sw->generation < 3) {
150 ret = dma_port_flash_write(sw->dma_port,
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 sw->nvm->flushed = true;
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
180 if (!sw->safe_mode) {
183 ret = tb_domain_disconnect_all_paths(sw->tb);
190 ret = dma_port_flash_update_auth(sw->dma_port);
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
207 dma_port_power_cycle(sw->dma_port);
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
215 ret = dma_port_flash_update_auth(sw->dma_port);
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
280 static inline bool nvm_readable(struct tb_switch *sw)
282 if (tb_switch_is_usb4(sw)) {
289 return usb4_switch_nvm_sector_size(sw) > 0;
293 return !!sw->dma_port;
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
298 if (sw->no_nvm_upgrade)
300 return nvm_readable(sw);
303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
311 static int nvm_authenticate(struct tb_switch *sw)
315 if (tb_switch_is_usb4(sw))
316 return usb4_switch_nvm_authenticate(sw);
318 if (!tb_route(sw)) {
319 nvm_authenticate_start_dma_port(sw);
320 ret = nvm_authenticate_host_dma_port(sw);
322 ret = nvm_authenticate_device_dma_port(sw);
332 struct tb_switch *sw = tb_to_switch(nvm->dev);
335 pm_runtime_get_sync(&sw->dev);
337 if (!mutex_trylock(&sw->tb->lock)) {
342 ret = nvm_read(sw, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
346 pm_runtime_mark_last_busy(&sw->dev);
347 pm_runtime_put_autosuspend(&sw->dev);
356 struct tb_switch *sw = tb_to_switch(nvm->dev);
359 if (!mutex_trylock(&sw->tb->lock))
369 mutex_unlock(&sw->tb->lock);
374 static int tb_switch_nvm_add(struct tb_switch *sw)
380 if (!nvm_readable(sw))
388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 sw->config.vendor_id != 0x8087) {
390 dev_info(&sw->dev,
392 sw->config.vendor_id);
396 nvm = tb_nvm_alloc(&sw->dev);
405 if (!sw->safe_mode) {
408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
428 if (!sw->no_nvm_upgrade) {
435 sw->nvm = nvm;
443 static void tb_switch_nvm_remove(struct tb_switch *sw)
447 nvm = sw->nvm;
448 sw->nvm = NULL;
455 nvm_clear_auth_status(sw);
601 if (credits == 0 || port->sw->is_unplugged)
608 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
667 if (tb_switch_is_icm(port->sw))
671 if (tb_switch_is_usb4(port->sw))
736 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
762 tb_dump_port(port->sw->tb, &port->config);
845 const struct tb_switch *sw)
848 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
874 if (prev->sw == end->sw) {
880 if (tb_switch_is_reachable(prev->sw, end->sw)) {
881 next = tb_port_at(tb_route(end->sw), prev->sw);
890 next = tb_upstream_port(prev->sw);
1239 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1241 switch (sw->generation) {
1255 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1257 const struct tb_regs_switch_header *regs = &sw->config;
1260 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1275 * @sw: Switch to reset
1279 int tb_switch_reset(struct tb_switch *sw)
1283 if (sw->generation > 1)
1286 tb_sw_dbg(sw, "resetting switch\n");
1288 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1292 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
1305 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1310 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1313 sw->config.plug_events_delay = 0xff;
1314 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1318 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1324 switch (sw->config.device_id) {
1335 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1336 sw->cap_plug_events + 1, 1);
1343 struct tb_switch *sw = tb_to_switch(dev);
1345 return sprintf(buf, "%u\n", sw->authorized);
1348 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1352 if (!mutex_trylock(&sw->tb->lock))
1355 if (sw->authorized)
1361 if (sw->key)
1362 ret = tb_domain_approve_switch_key(sw->tb, sw);
1364 ret = tb_domain_approve_switch(sw->tb, sw);
1369 if (sw->key)
1370 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1378 sw->authorized = val;
1380 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1384 mutex_unlock(&sw->tb->lock);
1392 struct tb_switch *sw = tb_to_switch(dev);
1402 pm_runtime_get_sync(&sw->dev);
1403 ret = tb_switch_set_authorized(sw, val);
1404 pm_runtime_mark_last_busy(&sw->dev);
1405 pm_runtime_put_autosuspend(&sw->dev);
1414 struct tb_switch *sw = tb_to_switch(dev);
1416 return sprintf(buf, "%u\n", sw->boot);
1423 struct tb_switch *sw = tb_to_switch(dev);
1425 return sprintf(buf, "%#x\n", sw->device);
1432 struct tb_switch *sw = tb_to_switch(dev);
1434 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1441 struct tb_switch *sw = tb_to_switch(dev);
1443 return sprintf(buf, "%u\n", sw->generation);
1450 struct tb_switch *sw = tb_to_switch(dev);
1453 if (!mutex_trylock(&sw->tb->lock))
1456 if (sw->key)
1457 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1461 mutex_unlock(&sw->tb->lock);
1468 struct tb_switch *sw = tb_to_switch(dev);
1478 if (!mutex_trylock(&sw->tb->lock))
1481 if (sw->authorized) {
1484 kfree(sw->key);
1486 sw->key = NULL;
1488 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1489 if (!sw->key)
1494 mutex_unlock(&sw->tb->lock);
1502 struct tb_switch *sw = tb_to_switch(dev);
1504 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1517 struct tb_switch *sw = tb_to_switch(dev);
1519 return sprintf(buf, "%u\n", sw->link_width);
1532 struct tb_switch *sw = tb_to_switch(dev);
1535 nvm_get_auth_status(sw, &status);
1542 struct tb_switch *sw = tb_to_switch(dev);
1546 pm_runtime_get_sync(&sw->dev);
1548 if (!mutex_trylock(&sw->tb->lock)) {
1554 if (!sw->nvm) {
1564 nvm_clear_auth_status(sw);
1567 if (!sw->nvm->flushed) {
1568 if (!sw->nvm->buf) {
1573 ret = nvm_validate_and_write(sw);
1579 ret = tb_lc_force_power(sw);
1581 sw->nvm->authenticating = true;
1582 ret = nvm_authenticate(sw);
1588 mutex_unlock(&sw->tb->lock);
1590 pm_runtime_mark_last_busy(&sw->dev);
1591 pm_runtime_put_autosuspend(&sw->dev);
1625 struct tb_switch *sw = tb_to_switch(dev);
1628 if (!mutex_trylock(&sw->tb->lock))
1631 if (sw->safe_mode)
1633 else if (!sw->nvm)
1636 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1638 mutex_unlock(&sw->tb->lock);
1647 struct tb_switch *sw = tb_to_switch(dev);
1649 return sprintf(buf, "%#x\n", sw->vendor);
1656 struct tb_switch *sw = tb_to_switch(dev);
1658 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1665 struct tb_switch *sw = tb_to_switch(dev);
1667 return sprintf(buf, "%pUb\n", sw->uuid);
1695 struct tb_switch *sw = tb_to_switch(dev);
1698 if (!sw->device)
1701 if (!sw->device_name)
1704 if (!sw->vendor)
1707 if (!sw->vendor_name)
1710 if (tb_route(sw) &&
1711 sw->tb->security_level == TB_SECURITY_SECURE &&
1712 sw->security_level == TB_SECURITY_SECURE)
1719 if (tb_route(sw))
1723 if (nvm_upgradeable(sw))
1727 if (nvm_readable(sw))
1731 if (tb_route(sw))
1735 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1740 return sw->safe_mode ? 0 : attr->mode;
1755 struct tb_switch *sw = tb_to_switch(dev);
1758 dma_port_free(sw->dma_port);
1760 tb_switch_for_each_port(sw, port) {
1765 kfree(sw->uuid);
1766 kfree(sw->device_name);
1767 kfree(sw->vendor_name);
1768 kfree(sw->ports);
1769 kfree(sw->drom);
1770 kfree(sw->key);
1771 kfree(sw);
1780 struct tb_switch *sw = tb_to_switch(dev);
1781 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1784 return cm_ops->runtime_suspend_switch(sw);
1791 struct tb_switch *sw = tb_to_switch(dev);
1792 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1795 return cm_ops->runtime_resume_switch(sw);
1810 static int tb_switch_get_generation(struct tb_switch *sw)
1812 switch (sw->config.device_id) {
1841 if (tb_switch_is_usb4(sw))
1848 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1849 sw->config.device_id);
1854 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1858 if (tb_switch_is_usb4(sw) ||
1859 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1884 struct tb_switch *sw;
1903 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1904 if (!sw)
1907 sw->tb = tb;
1908 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1912 sw->generation = tb_switch_get_generation(sw);
1915 tb_dump_switch(tb, sw);
1918 sw->config.upstream_port_number = upstream_port;
1919 sw->config.depth = depth;
1920 sw->config.route_hi = upper_32_bits(route);
1921 sw->config.route_lo = lower_32_bits(route);
1922 sw->config.enabled = 0;
1925 if (tb_switch_exceeds_max_depth(sw, depth)) {
1931 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1933 if (!sw->ports) {
1938 for (i = 0; i <= sw->config.max_port_number; i++) {
1940 sw->ports[i].sw = sw;
1941 sw->ports[i].port = i;
1945 ida_init(&sw->ports[i].in_hopids);
1946 ida_init(&sw->ports[i].out_hopids);
1950 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1952 sw->cap_plug_events = ret;
1954 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1956 sw->cap_lc = ret;
1960 sw->authorized = true;
1962 device_initialize(&sw->dev);
1963 sw->dev.parent = parent;
1964 sw->dev.bus = &tb_bus_type;
1965 sw->dev.type = &tb_switch_type;
1966 sw->dev.groups = switch_groups;
1967 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1969 return sw;
1972 kfree(sw->ports);
1973 kfree(sw);
1995 struct tb_switch *sw;
1997 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1998 if (!sw)
2001 sw->tb = tb;
2002 sw->config.depth = tb_route_length(route);
2003 sw->config.route_hi = upper_32_bits(route);
2004 sw->config.route_lo = lower_32_bits(route);
2005 sw->safe_mode = true;
2007 device_initialize(&sw->dev);
2008 sw->dev.parent = parent;
2009 sw->dev.bus = &tb_bus_type;
2010 sw->dev.type = &tb_switch_type;
2011 sw->dev.groups = switch_groups;
2012 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2014 return sw;
2019 * @sw: Switch to configure
2028 int tb_switch_configure(struct tb_switch *sw)
2030 struct tb *tb = sw->tb;
2034 route = tb_route(sw);
2037 sw->config.enabled ? "restoring" : "initializing", route,
2038 tb_route_length(route), sw->config.upstream_port_number);
2040 sw->config.enabled = 1;
2042 if (tb_switch_is_usb4(sw)) {
2048 sw->config.cmuv = USB4_VERSION_1_0;
2049 sw->config.plug_events_delay = 0xa;
2052 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2057 ret = usb4_switch_setup(sw);
2059 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2060 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2061 sw->config.vendor_id);
2063 if (!sw->cap_plug_events) {
2064 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2069 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2075 return tb_plug_events_active(sw, true);
2078 static int tb_switch_set_uuid(struct tb_switch *sw)
2084 if (sw->uuid)
2087 if (tb_switch_is_usb4(sw)) {
2088 ret = usb4_switch_read_uid(sw, &sw->uid);
2097 ret = tb_lc_read_uuid(sw, uuid);
2112 uuid[0] = sw->uid & 0xffffffff;
2113 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2118 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2119 if (!sw->uuid)
2124 static int tb_switch_add_dma_port(struct tb_switch *sw)
2129 switch (sw->generation) {
2132 if (tb_route(sw))
2137 ret = tb_switch_set_uuid(sw);
2147 if (!sw->safe_mode)
2153 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2156 sw->dma_port = dma_port_alloc(sw);
2157 if (!sw->dma_port)
2160 if (sw->no_nvm_upgrade)
2169 nvm_get_auth_status(sw, &status);
2171 if (!tb_route(sw))
2172 nvm_authenticate_complete_dma_port(sw);
2181 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2186 if (!tb_route(sw))
2187 nvm_authenticate_complete_dma_port(sw);
2190 tb_sw_info(sw, "switch flash authentication failed\n");
2191 nvm_set_auth_status(sw, status);
2194 tb_sw_info(sw, "power cycling the switch now\n");
2195 dma_port_power_cycle(sw->dma_port);
2204 static void tb_switch_default_link_ports(struct tb_switch *sw)
2208 for (i = 1; i <= sw->config.max_port_number; i++) {
2209 struct tb_port *port = &sw->ports[i];
2216 if (i == sw->config.max_port_number ||
2217 !tb_port_is_null(&sw->ports[i + 1]))
2221 subordinate = &sw->ports[i + 1];
2228 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2234 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2236 const struct tb_port *up = tb_upstream_port(sw);
2241 if (tb_switch_is_usb4(sw))
2242 return usb4_switch_lane_bonding_possible(sw);
2243 return tb_lc_lane_bonding_possible(sw);
2246 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2252 if (!tb_route(sw) || tb_switch_is_icm(sw))
2255 up = tb_upstream_port(sw);
2260 if (sw->link_speed != ret)
2262 sw->link_speed = ret;
2267 if (sw->link_width != ret)
2269 sw->link_width = ret;
2272 if (device_is_registered(&sw->dev) && change)
2273 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2280 * @sw: Switch to enable lane bonding
2286 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2288 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2290 u64 route = tb_route(sw);
2296 if (!tb_switch_lane_bonding_possible(sw))
2299 up = tb_upstream_port(sw);
2326 tb_switch_update_link_attributes(sw);
2328 tb_sw_dbg(sw, "lane bonding enabled\n");
2334 * @sw: Switch whose lane bonding to disable
2336 * Disables lane bonding between @sw and parent. This can be called even
2339 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2341 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2344 if (!tb_route(sw))
2347 up = tb_upstream_port(sw);
2351 down = tb_port_at(tb_route(sw), parent);
2356 tb_switch_update_link_attributes(sw);
2357 tb_sw_dbg(sw, "lane bonding disabled\n");
2362 * @sw: Switch whose link is configured
2364 * Sets the link upstream from @sw configured (from both ends) so that
2372 int tb_switch_configure_link(struct tb_switch *sw)
2377 if (!tb_route(sw) || tb_switch_is_icm(sw))
2380 up = tb_upstream_port(sw);
2381 if (tb_switch_is_usb4(up->sw))
2389 if (tb_switch_is_usb4(down->sw))
2396 * @sw: Switch whose link is unconfigured
2398 * Sets the link unconfigured so the @sw will be disconnected if the
2401 void tb_switch_unconfigure_link(struct tb_switch *sw)
2405 if (sw->is_unplugged)
2407 if (!tb_route(sw) || tb_switch_is_icm(sw))
2410 up = tb_upstream_port(sw);
2411 if (tb_switch_is_usb4(up->sw))
2417 if (tb_switch_is_usb4(down->sw))
2423 static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
2427 if (tb_switch_is_icm(sw))
2430 tb_switch_for_each_port(sw, port) {
2445 * @sw: Switch to add
2455 int tb_switch_add(struct tb_switch *sw)
2466 ret = tb_switch_add_dma_port(sw);
2468 dev_err(&sw->dev, "failed to add DMA port\n");
2472 if (!sw->safe_mode) {
2474 ret = tb_drom_read(sw);
2476 dev_err(&sw->dev, "reading DROM failed\n");
2479 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2481 ret = tb_switch_set_uuid(sw);
2483 dev_err(&sw->dev, "failed to set UUID\n");
2487 for (i = 0; i <= sw->config.max_port_number; i++) {
2488 if (sw->ports[i].disabled) {
2489 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2492 ret = tb_init_port(&sw->ports[i]);
2494 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2499 tb_switch_default_link_ports(sw);
2501 ret = tb_switch_update_link_attributes(sw);
2505 ret = tb_switch_tmu_init(sw);
2510 ret = tb_switch_port_hotplug_enable(sw);
2514 ret = device_add(&sw->dev);
2516 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2520 if (tb_route(sw)) {
2521 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2522 sw->vendor, sw->device);
2523 if (sw->vendor_name && sw->device_name)
2524 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2525 sw->device_name);
2528 ret = tb_switch_nvm_add(sw);
2530 dev_err(&sw->dev, "failed to add NVM devices\n");
2531 device_del(&sw->dev);
2540 device_init_wakeup(&sw->dev, true);
2542 pm_runtime_set_active(&sw->dev);
2543 if (sw->rpm) {
2544 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2545 pm_runtime_use_autosuspend(&sw->dev);
2546 pm_runtime_mark_last_busy(&sw->dev);
2547 pm_runtime_enable(&sw->dev);
2548 pm_request_autosuspend(&sw->dev);
2551 tb_switch_debugfs_init(sw);
2557 * @sw: Switch to remove
2563 void tb_switch_remove(struct tb_switch *sw)
2567 tb_switch_debugfs_remove(sw);
2569 if (sw->rpm) {
2570 pm_runtime_get_sync(&sw->dev);
2571 pm_runtime_disable(&sw->dev);
2575 tb_switch_for_each_port(sw, port) {
2577 tb_switch_remove(port->remote->sw);
2588 if (!sw->is_unplugged)
2589 tb_plug_events_active(sw, false);
2591 tb_switch_nvm_remove(sw);
2593 if (tb_route(sw))
2594 dev_info(&sw->dev, "device disconnected\n");
2595 device_unregister(&sw->dev);
2601 void tb_sw_set_unplugged(struct tb_switch *sw)
2605 if (sw == sw->tb->root_switch) {
2606 tb_sw_WARN(sw, "cannot unplug root switch\n");
2609 if (sw->is_unplugged) {
2610 tb_sw_WARN(sw, "is_unplugged already set\n");
2613 sw->is_unplugged = true;
2614 tb_switch_for_each_port(sw, port) {
2616 tb_sw_set_unplugged(port->remote->sw);
2622 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2625 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2627 tb_sw_dbg(sw, "disabling wakeup\n");
2629 if (tb_switch_is_usb4(sw))
2630 return usb4_switch_set_wake(sw, flags);
2631 return tb_lc_set_wake(sw, flags);
2634 int tb_switch_resume(struct tb_switch *sw)
2639 tb_sw_dbg(sw, "resuming switch\n");
2645 if (tb_route(sw)) {
2653 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2655 tb_sw_info(sw, "switch not present anymore\n");
2659 if (tb_switch_is_usb4(sw))
2660 err = usb4_switch_read_uid(sw, &uid);
2662 err = tb_drom_read_uid_only(sw, &uid);
2664 tb_sw_warn(sw, "uid read failed\n");
2667 if (sw->uid != uid) {
2668 tb_sw_info(sw,
2670 sw->uid, uid);
2675 err = tb_switch_configure(sw);
2680 tb_switch_set_wake(sw, 0);
2682 err = tb_switch_tmu_init(sw);
2687 tb_switch_for_each_port(sw, port) {
2695 tb_sw_set_unplugged(port->remote->sw);
2705 if (port->remote && tb_switch_resume(port->remote->sw)) {
2708 tb_sw_set_unplugged(port->remote->sw);
2717 * @sw: Switch to suspend
2721 * value of @runtime and then sets sleep bit for the router. If @sw is
2725 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2731 tb_sw_dbg(sw, "suspending switch\n");
2733 err = tb_plug_events_active(sw, false);
2737 tb_switch_for_each_port(sw, port) {
2739 tb_switch_suspend(port->remote->sw, runtime);
2746 } else if (device_may_wakeup(&sw->dev)) {
2750 tb_switch_set_wake(sw, flags);
2752 if (tb_switch_is_usb4(sw))
2753 usb4_switch_set_sleep(sw);
2755 tb_lc_set_sleep(sw);
2760 * @sw: Switch whose DP resource is queried
2766 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2768 if (tb_switch_is_usb4(sw))
2769 return usb4_switch_query_dp_resource(sw, in);
2770 return tb_lc_dp_sink_query(sw, in);
2775 * @sw: Switch whose DP resource is allocated
2782 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2784 if (tb_switch_is_usb4(sw))
2785 return usb4_switch_alloc_dp_resource(sw, in);
2786 return tb_lc_dp_sink_alloc(sw, in);
2791 * @sw: Switch whose DP resource is de-allocated
2797 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2801 if (tb_switch_is_usb4(sw))
2802 ret = usb4_switch_dealloc_dp_resource(sw, in);
2804 ret = tb_lc_dp_sink_dealloc(sw, in);
2807 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2821 struct tb_switch *sw = tb_to_switch(dev);
2824 if (!sw)
2826 if (sw->tb != lookup->tb)
2830 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2833 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2834 sw->config.route_hi == upper_32_bits(lookup->route);
2839 return !sw->depth;
2841 return sw->link == lookup->link && sw->depth == lookup->depth;
2922 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2923 * @sw: Switch to find the port from
2926 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
2931 tb_switch_for_each_port(sw, port) {