Lines Matching refs:sw
37 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
42 if (uuid_equal(&st->uuid, sw->uuid))
49 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
54 st = __nvm_get_auth_status(sw);
60 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
64 if (WARN_ON(!sw->uuid))
68 st = __nvm_get_auth_status(sw);
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 static void nvm_clear_auth_status(const struct tb_switch *sw)
90 st = __nvm_get_auth_status(sw);
98 static int nvm_validate_and_write(struct tb_switch *sw)
104 ret = tb_nvm_validate(sw->nvm);
108 ret = tb_nvm_write_headers(sw->nvm);
112 buf = sw->nvm->buf_data_start;
113 image_size = sw->nvm->buf_data_size;
115 if (tb_switch_is_usb4(sw))
116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
122 sw->nvm->flushed = true;
126 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
135 if (!sw->safe_mode) {
138 ret = tb_domain_disconnect_all_paths(sw->tb);
145 ret = dma_port_flash_update_auth(sw->dma_port);
153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
155 nvm_set_auth_status(sw, status);
162 dma_port_power_cycle(sw->dma_port);
166 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
170 ret = dma_port_flash_update_auth(sw->dma_port);
191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
196 tb_sw_warn(sw, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw, status);
200 tb_sw_info(sw, "power cycling the switch now\n");
201 dma_port_power_cycle(sw->dma_port);
211 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
221 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
226 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
230 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
235 static inline bool nvm_readable(struct tb_switch *sw)
237 if (tb_switch_is_usb4(sw)) {
244 return usb4_switch_nvm_sector_size(sw) > 0;
248 return !!sw->dma_port;
251 static inline bool nvm_upgradeable(struct tb_switch *sw)
253 if (sw->no_nvm_upgrade)
255 return nvm_readable(sw);
258 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
262 if (tb_switch_is_usb4(sw)) {
264 ret = usb4_switch_nvm_set_offset(sw, 0);
268 sw->nvm->authenticating = true;
269 return usb4_switch_nvm_authenticate(sw);
274 sw->nvm->authenticating = true;
275 if (!tb_route(sw)) {
276 nvm_authenticate_start_dma_port(sw);
277 ret = nvm_authenticate_host_dma_port(sw);
279 ret = nvm_authenticate_device_dma_port(sw);
287 * @sw: Router whose NVM to read
296 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
299 if (tb_switch_is_usb4(sw))
300 return usb4_switch_nvm_read(sw, address, buf, size);
301 return dma_port_flash_read(sw->dma_port, address, buf, size);
307 struct tb_switch *sw = tb_to_switch(nvm->dev);
310 pm_runtime_get_sync(&sw->dev);
312 if (!mutex_trylock(&sw->tb->lock)) {
317 ret = tb_switch_nvm_read(sw, offset, val, bytes);
318 mutex_unlock(&sw->tb->lock);
321 pm_runtime_mark_last_busy(&sw->dev);
322 pm_runtime_put_autosuspend(&sw->dev);
330 struct tb_switch *sw = tb_to_switch(nvm->dev);
333 if (!mutex_trylock(&sw->tb->lock))
343 mutex_unlock(&sw->tb->lock);
348 static int tb_switch_nvm_add(struct tb_switch *sw)
353 if (!nvm_readable(sw))
356 nvm = tb_nvm_alloc(&sw->dev);
371 if (!sw->safe_mode) {
377 if (!sw->no_nvm_upgrade) {
383 sw->nvm = nvm;
387 tb_sw_dbg(sw, "NVM upgrade disabled\n");
388 sw->no_nvm_upgrade = true;
395 static void tb_switch_nvm_remove(struct tb_switch *sw)
399 nvm = sw->nvm;
400 sw->nvm = NULL;
407 nvm_clear_auth_status(sw);
570 if (credits == 0 || port->sw->is_unplugged)
577 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
619 if (tb_switch_is_icm(port->sw))
623 if (tb_switch_is_usb4(port->sw))
700 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
745 tb_dump_port(port->sw->tb, port);
825 const struct tb_switch *sw)
828 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
854 if (prev->sw == end->sw) {
860 if (tb_switch_is_reachable(prev->sw, end->sw)) {
861 next = tb_port_at(tb_route(end->sw), prev->sw);
870 next = tb_upstream_port(prev->sw);
1213 if (tb_switch_is_usb4(port->sw))
1388 if (tb_switch_is_usb4(port->sw))
1454 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1456 switch (sw->generation) {
1470 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1472 const struct tb_regs_switch_header *regs = &sw->config;
1475 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1490 * @sw: Switch to reset
1494 int tb_switch_reset(struct tb_switch *sw)
1498 if (sw->generation > 1)
1501 tb_sw_dbg(sw, "resetting switch\n");
1503 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1507 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1515 * @sw: Router to read the offset value from
1525 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1534 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1554 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1559 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1562 sw->config.plug_events_delay = 0xff;
1563 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1567 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1573 switch (sw->config.device_id) {
1584 if (!tb_switch_is_alpine_ridge(sw))
1590 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1591 sw->cap_plug_events + 1, 1);
1598 struct tb_switch *sw = tb_to_switch(dev);
1600 return sysfs_emit(buf, "%u\n", sw->authorized);
1606 struct tb_switch *sw;
1608 sw = tb_to_switch(dev);
1609 if (sw && sw->authorized) {
1613 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1617 ret = tb_domain_disapprove_switch(sw->tb, sw);
1621 sw->authorized = 0;
1622 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1628 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1634 if (!mutex_trylock(&sw->tb->lock))
1637 if (!!sw->authorized == !!val)
1643 if (tb_route(sw)) {
1644 ret = disapprove_switch(&sw->dev, NULL);
1651 if (sw->key)
1652 ret = tb_domain_approve_switch_key(sw->tb, sw);
1654 ret = tb_domain_approve_switch(sw->tb, sw);
1659 if (sw->key)
1660 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1668 sw->authorized = val;
1673 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1674 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1678 mutex_unlock(&sw->tb->lock);
1686 struct tb_switch *sw = tb_to_switch(dev);
1696 pm_runtime_get_sync(&sw->dev);
1697 ret = tb_switch_set_authorized(sw, val);
1698 pm_runtime_mark_last_busy(&sw->dev);
1699 pm_runtime_put_autosuspend(&sw->dev);
1708 struct tb_switch *sw = tb_to_switch(dev);
1710 return sysfs_emit(buf, "%u\n", sw->boot);
1717 struct tb_switch *sw = tb_to_switch(dev);
1719 return sysfs_emit(buf, "%#x\n", sw->device);
1726 struct tb_switch *sw = tb_to_switch(dev);
1728 return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
1735 struct tb_switch *sw = tb_to_switch(dev);
1737 return sysfs_emit(buf, "%u\n", sw->generation);
1744 struct tb_switch *sw = tb_to_switch(dev);
1747 if (!mutex_trylock(&sw->tb->lock))
1750 if (sw->key)
1751 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1755 mutex_unlock(&sw->tb->lock);
1762 struct tb_switch *sw = tb_to_switch(dev);
1772 if (!mutex_trylock(&sw->tb->lock))
1775 if (sw->authorized) {
1778 kfree(sw->key);
1780 sw->key = NULL;
1782 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1783 if (!sw->key)
1788 mutex_unlock(&sw->tb->lock);
1796 struct tb_switch *sw = tb_to_switch(dev);
1798 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
1811 struct tb_switch *sw = tb_to_switch(dev);
1814 switch (sw->link_width) {
1837 struct tb_switch *sw = tb_to_switch(dev);
1840 switch (sw->link_width) {
1863 struct tb_switch *sw = tb_to_switch(dev);
1866 nvm_get_auth_status(sw, &status);
1873 struct tb_switch *sw = tb_to_switch(dev);
1876 pm_runtime_get_sync(&sw->dev);
1878 if (!mutex_trylock(&sw->tb->lock)) {
1883 if (sw->no_nvm_upgrade) {
1889 if (!sw->nvm) {
1899 nvm_clear_auth_status(sw);
1906 ret = nvm_authenticate(sw, true);
1908 if (!sw->nvm->flushed) {
1909 if (!sw->nvm->buf) {
1914 ret = nvm_validate_and_write(sw);
1920 ret = tb_lc_force_power(sw);
1922 ret = nvm_authenticate(sw, false);
1928 mutex_unlock(&sw->tb->lock);
1930 pm_runtime_mark_last_busy(&sw->dev);
1931 pm_runtime_put_autosuspend(&sw->dev);
1965 struct tb_switch *sw = tb_to_switch(dev);
1968 if (!mutex_trylock(&sw->tb->lock))
1971 if (sw->safe_mode)
1973 else if (!sw->nvm)
1976 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1978 mutex_unlock(&sw->tb->lock);
1987 struct tb_switch *sw = tb_to_switch(dev);
1989 return sysfs_emit(buf, "%#x\n", sw->vendor);
1996 struct tb_switch *sw = tb_to_switch(dev);
1998 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
2005 struct tb_switch *sw = tb_to_switch(dev);
2007 return sysfs_emit(buf, "%pUb\n", sw->uuid);
2035 struct tb_switch *sw = tb_to_switch(dev);
2038 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2039 sw->tb->security_level == TB_SECURITY_DPONLY)
2042 if (!sw->device)
2045 if (!sw->device_name)
2048 if (!sw->vendor)
2051 if (!sw->vendor_name)
2054 if (tb_route(sw) &&
2055 sw->tb->security_level == TB_SECURITY_SECURE &&
2056 sw->security_level == TB_SECURITY_SECURE)
2063 if (tb_route(sw))
2067 if (nvm_upgradeable(sw))
2071 if (nvm_readable(sw))
2075 if (tb_route(sw))
2079 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2084 return sw->safe_mode ? 0 : attr->mode;
2099 struct tb_switch *sw = tb_to_switch(dev);
2102 dma_port_free(sw->dma_port);
2104 tb_switch_for_each_port(sw, port) {
2109 kfree(sw->uuid);
2110 kfree(sw->device_name);
2111 kfree(sw->vendor_name);
2112 kfree(sw->ports);
2113 kfree(sw->drom);
2114 kfree(sw->key);
2115 kfree(sw);
2120 const struct tb_switch *sw = tb_to_switch(dev);
2123 if (tb_switch_is_usb4(sw)) {
2125 usb4_switch_version(sw)))
2129 if (!tb_route(sw)) {
2136 tb_switch_for_each_port(sw, port) {
2158 struct tb_switch *sw = tb_to_switch(dev);
2159 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2162 return cm_ops->runtime_suspend_switch(sw);
2169 struct tb_switch *sw = tb_to_switch(dev);
2170 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2173 return cm_ops->runtime_resume_switch(sw);
2189 static int tb_switch_get_generation(struct tb_switch *sw)
2191 if (tb_switch_is_usb4(sw))
2194 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
2195 switch (sw->config.device_id) {
2229 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2230 sw->config.device_id);
2234 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2238 if (tb_switch_is_usb4(sw) ||
2239 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2264 struct tb_switch *sw;
2283 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2284 if (!sw)
2287 sw->tb = tb;
2288 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2292 sw->generation = tb_switch_get_generation(sw);
2295 tb_dump_switch(tb, sw);
2298 sw->config.upstream_port_number = upstream_port;
2299 sw->config.depth = depth;
2300 sw->config.route_hi = upper_32_bits(route);
2301 sw->config.route_lo = lower_32_bits(route);
2302 sw->config.enabled = 0;
2305 if (tb_switch_exceeds_max_depth(sw, depth)) {
2311 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2313 if (!sw->ports) {
2318 for (i = 0; i <= sw->config.max_port_number; i++) {
2320 sw->ports[i].sw = sw;
2321 sw->ports[i].port = i;
2325 ida_init(&sw->ports[i].in_hopids);
2326 ida_init(&sw->ports[i].out_hopids);
2330 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2332 sw->cap_plug_events = ret;
2334 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2336 sw->cap_vsec_tmu = ret;
2338 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2340 sw->cap_lc = ret;
2342 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2344 sw->cap_lp = ret;
2348 sw->authorized = true;
2350 device_initialize(&sw->dev);
2351 sw->dev.parent = parent;
2352 sw->dev.bus = &tb_bus_type;
2353 sw->dev.type = &tb_switch_type;
2354 sw->dev.groups = switch_groups;
2355 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2357 return sw;
2360 kfree(sw->ports);
2361 kfree(sw);
2383 struct tb_switch *sw;
2385 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2386 if (!sw)
2389 sw->tb = tb;
2390 sw->config.depth = tb_route_length(route);
2391 sw->config.route_hi = upper_32_bits(route);
2392 sw->config.route_lo = lower_32_bits(route);
2393 sw->safe_mode = true;
2395 device_initialize(&sw->dev);
2396 sw->dev.parent = parent;
2397 sw->dev.bus = &tb_bus_type;
2398 sw->dev.type = &tb_switch_type;
2399 sw->dev.groups = switch_groups;
2400 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2402 return sw;
2407 * @sw: Switch to configure
2416 int tb_switch_configure(struct tb_switch *sw)
2418 struct tb *tb = sw->tb;
2422 route = tb_route(sw);
2425 sw->config.enabled ? "restoring" : "initializing", route,
2426 tb_route_length(route), sw->config.upstream_port_number);
2428 sw->config.enabled = 1;
2430 if (tb_switch_is_usb4(sw)) {
2437 if (usb4_switch_version(sw) < 2)
2438 sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
2440 sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
2441 sw->config.plug_events_delay = 0xa;
2444 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2449 ret = usb4_switch_setup(sw);
2451 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2452 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2453 sw->config.vendor_id);
2455 if (!sw->cap_plug_events) {
2456 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2461 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2467 return tb_plug_events_active(sw, true);
2472 * @sw: Router to configure
2479 int tb_switch_configuration_valid(struct tb_switch *sw)
2481 if (tb_switch_is_usb4(sw))
2482 return usb4_switch_configuration_valid(sw);
2486 static int tb_switch_set_uuid(struct tb_switch *sw)
2492 if (sw->uuid)
2495 if (tb_switch_is_usb4(sw)) {
2496 ret = usb4_switch_read_uid(sw, &sw->uid);
2505 ret = tb_lc_read_uuid(sw, uuid);
2520 uuid[0] = sw->uid & 0xffffffff;
2521 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2526 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2527 if (!sw->uuid)
2532 static int tb_switch_add_dma_port(struct tb_switch *sw)
2537 switch (sw->generation) {
2540 if (tb_route(sw))
2546 ret = tb_switch_set_uuid(sw);
2556 if (!sw->safe_mode)
2561 if (sw->no_nvm_upgrade)
2564 if (tb_switch_is_usb4(sw)) {
2565 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2570 tb_sw_info(sw, "switch flash authentication failed\n");
2571 nvm_set_auth_status(sw, status);
2578 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2581 sw->dma_port = dma_port_alloc(sw);
2582 if (!sw->dma_port)
2591 nvm_get_auth_status(sw, &status);
2593 if (!tb_route(sw))
2594 nvm_authenticate_complete_dma_port(sw);
2603 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2608 if (!tb_route(sw))
2609 nvm_authenticate_complete_dma_port(sw);
2612 tb_sw_info(sw, "switch flash authentication failed\n");
2613 nvm_set_auth_status(sw, status);
2616 tb_sw_info(sw, "power cycling the switch now\n");
2617 dma_port_power_cycle(sw->dma_port);
2626 static void tb_switch_default_link_ports(struct tb_switch *sw)
2630 for (i = 1; i <= sw->config.max_port_number; i++) {
2631 struct tb_port *port = &sw->ports[i];
2638 if (i == sw->config.max_port_number ||
2639 !tb_port_is_null(&sw->ports[i + 1]))
2643 subordinate = &sw->ports[i + 1];
2650 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2656 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2658 const struct tb_port *up = tb_upstream_port(sw);
2663 if (tb_switch_is_usb4(sw))
2664 return usb4_switch_lane_bonding_possible(sw);
2665 return tb_lc_lane_bonding_possible(sw);
2668 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2674 if (!tb_route(sw) || tb_switch_is_icm(sw))
2677 up = tb_upstream_port(sw);
2682 if (sw->link_speed != ret)
2684 sw->link_speed = ret;
2689 if (sw->link_width != ret)
2691 sw->link_width = ret;
2694 if (device_is_registered(&sw->dev) && change)
2695 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2702 * @sw: Switch to enable lane bonding
2708 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2711 u64 route = tb_route(sw);
2718 if (!tb_switch_lane_bonding_possible(sw))
2721 up = tb_upstream_port(sw);
2722 down = tb_switch_downstream_port(sw);
2760 tb_switch_update_link_attributes(sw);
2762 tb_sw_dbg(sw, "lane bonding enabled\n");
2768 * @sw: Switch whose lane bonding to disable
2770 * Disables lane bonding between @sw and parent. This can be called even
2773 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2778 if (!tb_route(sw))
2781 up = tb_upstream_port(sw);
2785 down = tb_switch_downstream_port(sw);
2796 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2800 tb_switch_update_link_attributes(sw);
2802 tb_sw_dbg(sw, "lane bonding disabled\n");
2807 * @sw: Switch whose link is configured
2809 * Sets the link upstream from @sw configured (from both ends) so that
2817 int tb_switch_configure_link(struct tb_switch *sw)
2822 if (!tb_route(sw) || tb_switch_is_icm(sw))
2825 up = tb_upstream_port(sw);
2826 if (tb_switch_is_usb4(up->sw))
2834 if (tb_switch_is_usb4(down->sw))
2841 * @sw: Switch whose link is unconfigured
2843 * Sets the link unconfigured so the @sw will be disconnected if the
2846 void tb_switch_unconfigure_link(struct tb_switch *sw)
2850 if (sw->is_unplugged)
2852 if (!tb_route(sw) || tb_switch_is_icm(sw))
2855 up = tb_upstream_port(sw);
2856 if (tb_switch_is_usb4(up->sw))
2862 if (tb_switch_is_usb4(down->sw))
2868 static void tb_switch_credits_init(struct tb_switch *sw)
2870 if (tb_switch_is_icm(sw))
2872 if (!tb_switch_is_usb4(sw))
2874 if (usb4_switch_credits_init(sw))
2875 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2878 static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
2882 if (tb_switch_is_icm(sw))
2885 tb_switch_for_each_port(sw, port) {
2900 * @sw: Switch to add
2910 int tb_switch_add(struct tb_switch *sw)
2921 ret = tb_switch_add_dma_port(sw);
2923 dev_err(&sw->dev, "failed to add DMA port\n");
2927 if (!sw->safe_mode) {
2928 tb_switch_credits_init(sw);
2931 ret = tb_drom_read(sw);
2933 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2934 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2936 ret = tb_switch_set_uuid(sw);
2938 dev_err(&sw->dev, "failed to set UUID\n");
2942 for (i = 0; i <= sw->config.max_port_number; i++) {
2943 if (sw->ports[i].disabled) {
2944 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2947 ret = tb_init_port(&sw->ports[i]);
2949 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2954 tb_check_quirks(sw);
2956 tb_switch_default_link_ports(sw);
2958 ret = tb_switch_update_link_attributes(sw);
2962 ret = tb_switch_clx_init(sw);
2966 ret = tb_switch_tmu_init(sw);
2971 ret = tb_switch_port_hotplug_enable(sw);
2975 ret = device_add(&sw->dev);
2977 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2981 if (tb_route(sw)) {
2982 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2983 sw->vendor, sw->device);
2984 if (sw->vendor_name && sw->device_name)
2985 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2986 sw->device_name);
2989 ret = usb4_switch_add_ports(sw);
2991 dev_err(&sw->dev, "failed to add USB4 ports\n");
2995 ret = tb_switch_nvm_add(sw);
2997 dev_err(&sw->dev, "failed to add NVM devices\n");
3006 device_init_wakeup(&sw->dev, true);
3008 pm_runtime_set_active(&sw->dev);
3009 if (sw->rpm) {
3010 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3011 pm_runtime_use_autosuspend(&sw->dev);
3012 pm_runtime_mark_last_busy(&sw->dev);
3013 pm_runtime_enable(&sw->dev);
3014 pm_request_autosuspend(&sw->dev);
3017 tb_switch_debugfs_init(sw);
3021 usb4_switch_remove_ports(sw);
3023 device_del(&sw->dev);
3030 * @sw: Switch to remove
3036 void tb_switch_remove(struct tb_switch *sw)
3040 tb_switch_debugfs_remove(sw);
3042 if (sw->rpm) {
3043 pm_runtime_get_sync(&sw->dev);
3044 pm_runtime_disable(&sw->dev);
3048 tb_switch_for_each_port(sw, port) {
3050 tb_switch_remove(port->remote->sw);
3061 if (!sw->is_unplugged)
3062 tb_plug_events_active(sw, false);
3064 tb_switch_nvm_remove(sw);
3065 usb4_switch_remove_ports(sw);
3067 if (tb_route(sw))
3068 dev_info(&sw->dev, "device disconnected\n");
3069 device_unregister(&sw->dev);
3074 * @sw: Router to mark unplugged
3076 void tb_sw_set_unplugged(struct tb_switch *sw)
3080 if (sw == sw->tb->root_switch) {
3081 tb_sw_WARN(sw, "cannot unplug root switch\n");
3084 if (sw->is_unplugged) {
3085 tb_sw_WARN(sw, "is_unplugged already set\n");
3088 sw->is_unplugged = true;
3089 tb_switch_for_each_port(sw, port) {
3091 tb_sw_set_unplugged(port->remote->sw);
3097 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3100 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3102 tb_sw_dbg(sw, "disabling wakeup\n");
3104 if (tb_switch_is_usb4(sw))
3105 return usb4_switch_set_wake(sw, flags);
3106 return tb_lc_set_wake(sw, flags);
3109 int tb_switch_resume(struct tb_switch *sw)
3114 tb_sw_dbg(sw, "resuming switch\n");
3120 if (tb_route(sw)) {
3128 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3130 tb_sw_info(sw, "switch not present anymore\n");
3135 if (!sw->uid)
3138 if (tb_switch_is_usb4(sw))
3139 err = usb4_switch_read_uid(sw, &uid);
3141 err = tb_drom_read_uid_only(sw, &uid);
3143 tb_sw_warn(sw, "uid read failed\n");
3146 if (sw->uid != uid) {
3147 tb_sw_info(sw,
3149 sw->uid, uid);
3154 err = tb_switch_configure(sw);
3159 tb_switch_set_wake(sw, 0);
3161 err = tb_switch_tmu_init(sw);
3166 tb_switch_for_each_port(sw, port) {
3177 tb_sw_set_unplugged(port->remote->sw);
3187 if (port->remote && tb_switch_resume(port->remote->sw)) {
3190 tb_sw_set_unplugged(port->remote->sw);
3199 * @sw: Switch to suspend
3203 * value of @runtime and then sets sleep bit for the router. If @sw is
3207 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3213 tb_sw_dbg(sw, "suspending switch\n");
3219 tb_switch_clx_disable(sw);
3221 err = tb_plug_events_active(sw, false);
3225 tb_switch_for_each_port(sw, port) {
3227 tb_switch_suspend(port->remote->sw, runtime);
3235 } else if (device_may_wakeup(&sw->dev)) {
3239 tb_switch_set_wake(sw, flags);
3241 if (tb_switch_is_usb4(sw))
3242 usb4_switch_set_sleep(sw);
3244 tb_lc_set_sleep(sw);
3249 * @sw: Switch whose DP resource is queried
3255 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3257 if (tb_switch_is_usb4(sw))
3258 return usb4_switch_query_dp_resource(sw, in);
3259 return tb_lc_dp_sink_query(sw, in);
3264 * @sw: Switch whose DP resource is allocated
3271 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3275 if (tb_switch_is_usb4(sw))
3276 ret = usb4_switch_alloc_dp_resource(sw, in);
3278 ret = tb_lc_dp_sink_alloc(sw, in);
3281 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3284 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3291 * @sw: Switch whose DP resource is de-allocated
3297 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3301 if (tb_switch_is_usb4(sw))
3302 ret = usb4_switch_dealloc_dp_resource(sw, in);
3304 ret = tb_lc_dp_sink_dealloc(sw, in);
3307 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3310 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3323 struct tb_switch *sw = tb_to_switch(dev);
3326 if (!sw)
3328 if (sw->tb != lookup->tb)
3332 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3335 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3336 sw->config.route_hi == upper_32_bits(lookup->route);
3341 return !sw->depth;
3343 return sw->link == lookup->link && sw->depth == lookup->depth;
3424 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3425 * @sw: Switch to find the port from
3428 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3433 tb_switch_for_each_port(sw, port) {
3445 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3451 if (sw->generation != 3)
3454 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3455 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3466 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3468 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3472 ret = tb_switch_wait_for_bit(sw, offset,
3477 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3489 * @sw: Router to enable PCIe L1
3496 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3498 struct tb_switch *parent = tb_switch_parent(sw);
3501 if (!tb_route(sw))
3504 if (!tb_switch_is_titan_ridge(sw))
3512 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3517 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3522 * @sw: Router whose xHCI to connect
3530 int tb_switch_xhci_connect(struct tb_switch *sw)
3535 if (sw->generation != 3)
3538 port1 = &sw->ports[1];
3539 port3 = &sw->ports[3];
3541 if (tb_switch_is_alpine_ridge(sw)) {
3557 } else if (tb_switch_is_titan_ridge(sw)) {
3569 * @sw: Router whose xHCI to disconnect
3574 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3576 if (sw->generation == 3) {
3577 struct tb_port *port1 = &sw->ports[1];
3578 struct tb_port *port3 = &sw->ports[3];