1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
12#include <linux/pm_runtime.h>
13
14#include "tb.h"
15#include "tb_regs.h"
16#include "tunnel.h"
17
18/**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @dp_resources: List of available DP resources for DP tunneling
22 * @hotplug_active: tb_handle_hotplug will stop progressing plug
23 *		    events and exit if this is not set (it needs to
24 *		    acquire the lock one more time). Used to drain wq
25 *		    after cfg has been paused.
26 * @remove_work: Work used to remove any unplugged routers after
27 *		 runtime resume
28 */
29struct tb_cm {
30	struct list_head tunnel_list;
31	struct list_head dp_resources;
32	bool hotplug_active;
33	struct delayed_work remove_work;
34};
35
36static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
37{
38	return ((void *)tcm - sizeof(struct tb));
39}
40
41struct tb_hotplug_event {
42	struct work_struct work;
43	struct tb *tb;
44	u64 route;
45	u8 port;
46	bool unplug;
47};
48
49static void tb_handle_hotplug(struct work_struct *work);
50
51static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
52{
53	struct tb_hotplug_event *ev;
54
55	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
56	if (!ev)
57		return;
58
59	ev->tb = tb;
60	ev->route = route;
61	ev->port = port;
62	ev->unplug = unplug;
63	INIT_WORK(&ev->work, tb_handle_hotplug);
64	queue_work(tb->wq, &ev->work);
65}
66
67/* enumeration & hot plug handling */
68
69static void tb_add_dp_resources(struct tb_switch *sw)
70{
71	struct tb_cm *tcm = tb_priv(sw->tb);
72	struct tb_port *port;
73
74	tb_switch_for_each_port(sw, port) {
75		if (!tb_port_is_dpin(port))
76			continue;
77
78		if (!tb_switch_query_dp_resource(sw, port))
79			continue;
80
81		list_add_tail(&port->list, &tcm->dp_resources);
82		tb_port_dbg(port, "DP IN resource available\n");
83	}
84}
85
86static void tb_remove_dp_resources(struct tb_switch *sw)
87{
88	struct tb_cm *tcm = tb_priv(sw->tb);
89	struct tb_port *port, *tmp;
90
91	/* Clear children resources first */
92	tb_switch_for_each_port(sw, port) {
93		if (tb_port_has_remote(port))
94			tb_remove_dp_resources(port->remote->sw);
95	}
96
97	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
98		if (port->sw == sw) {
99			tb_port_dbg(port, "DP OUT resource unavailable\n");
100			list_del_init(&port->list);
101		}
102	}
103}
104
105static void tb_discover_tunnels(struct tb_switch *sw)
106{
107	struct tb *tb = sw->tb;
108	struct tb_cm *tcm = tb_priv(tb);
109	struct tb_port *port;
110
111	tb_switch_for_each_port(sw, port) {
112		struct tb_tunnel *tunnel = NULL;
113
114		switch (port->config.type) {
115		case TB_TYPE_DP_HDMI_IN:
116			tunnel = tb_tunnel_discover_dp(tb, port);
117			break;
118
119		case TB_TYPE_PCIE_DOWN:
120			tunnel = tb_tunnel_discover_pci(tb, port);
121			break;
122
123		case TB_TYPE_USB3_DOWN:
124			tunnel = tb_tunnel_discover_usb3(tb, port);
125			break;
126
127		default:
128			break;
129		}
130
131		if (!tunnel)
132			continue;
133
134		if (tb_tunnel_is_pci(tunnel)) {
135			struct tb_switch *parent = tunnel->dst_port->sw;
136
137			while (parent != tunnel->src_port->sw) {
138				parent->boot = true;
139				parent = tb_switch_parent(parent);
140			}
141		} else if (tb_tunnel_is_dp(tunnel)) {
142			/* Keep the domain from powering down */
143			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
144			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
145		}
146
147		list_add_tail(&tunnel->list, &tcm->tunnel_list);
148	}
149
150	tb_switch_for_each_port(sw, port) {
151		if (tb_port_has_remote(port))
152			tb_discover_tunnels(port->remote->sw);
153	}
154}
155
156static int tb_port_configure_xdomain(struct tb_port *port)
157{
158	/*
159	 * XDomain paths currently only support single lane so we must
160	 * disable the other lane according to USB4 spec.
161	 */
162	tb_port_disable(port->dual_link_port);
163
164	if (tb_switch_is_usb4(port->sw))
165		return usb4_port_configure_xdomain(port);
166	return tb_lc_configure_xdomain(port);
167}
168
169static void tb_port_unconfigure_xdomain(struct tb_port *port)
170{
171	if (tb_switch_is_usb4(port->sw))
172		usb4_port_unconfigure_xdomain(port);
173	else
174		tb_lc_unconfigure_xdomain(port);
175
176	tb_port_enable(port->dual_link_port);
177}
178
179static void tb_scan_xdomain(struct tb_port *port)
180{
181	struct tb_switch *sw = port->sw;
182	struct tb *tb = sw->tb;
183	struct tb_xdomain *xd;
184	u64 route;
185
186	route = tb_downstream_route(port);
187	xd = tb_xdomain_find_by_route(tb, route);
188	if (xd) {
189		tb_xdomain_put(xd);
190		return;
191	}
192
193	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
194			      NULL);
195	if (xd) {
196		tb_port_at(route, sw)->xdomain = xd;
197		tb_port_configure_xdomain(port);
198		tb_xdomain_add(xd);
199	}
200}
201
202static int tb_enable_tmu(struct tb_switch *sw)
203{
204	int ret;
205
206	/* If it is already enabled in correct mode, don't touch it */
207	if (tb_switch_tmu_is_enabled(sw))
208		return 0;
209
210	ret = tb_switch_tmu_disable(sw);
211	if (ret)
212		return ret;
213
214	ret = tb_switch_tmu_post_time(sw);
215	if (ret)
216		return ret;
217
218	return tb_switch_tmu_enable(sw);
219}
220
221/**
222 * tb_find_unused_port() - return the first inactive port on @sw
223 * @sw: Switch to find the port on
224 * @type: Port type to look for
225 */
226static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
227					   enum tb_port_type type)
228{
229	struct tb_port *port;
230
231	tb_switch_for_each_port(sw, port) {
232		if (tb_is_upstream_port(port))
233			continue;
234		if (port->config.type != type)
235			continue;
236		if (!port->cap_adap)
237			continue;
238		if (tb_port_is_enabled(port))
239			continue;
240		return port;
241	}
242	return NULL;
243}
244
245static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
246					 const struct tb_port *port)
247{
248	struct tb_port *down;
249
250	down = usb4_switch_map_usb3_down(sw, port);
251	if (down && !tb_usb3_port_is_enabled(down))
252		return down;
253	return NULL;
254}
255
256static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
257					struct tb_port *src_port,
258					struct tb_port *dst_port)
259{
260	struct tb_cm *tcm = tb_priv(tb);
261	struct tb_tunnel *tunnel;
262
263	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
264		if (tunnel->type == type &&
265		    ((src_port && src_port == tunnel->src_port) ||
266		     (dst_port && dst_port == tunnel->dst_port))) {
267			return tunnel;
268		}
269	}
270
271	return NULL;
272}
273
274static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
275						   struct tb_port *src_port,
276						   struct tb_port *dst_port)
277{
278	struct tb_port *port, *usb3_down;
279	struct tb_switch *sw;
280
281	/* Pick the router that is deepest in the topology */
282	if (dst_port->sw->config.depth > src_port->sw->config.depth)
283		sw = dst_port->sw;
284	else
285		sw = src_port->sw;
286
287	/* Can't be the host router */
288	if (sw == tb->root_switch)
289		return NULL;
290
291	/* Find the downstream USB4 port that leads to this router */
292	port = tb_port_at(tb_route(sw), tb->root_switch);
293	/* Find the corresponding host router USB3 downstream port */
294	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
295	if (!usb3_down)
296		return NULL;
297
298	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
299}
300
301static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
302	struct tb_port *dst_port, int *available_up, int *available_down)
303{
304	int usb3_consumed_up, usb3_consumed_down, ret;
305	struct tb_cm *tcm = tb_priv(tb);
306	struct tb_tunnel *tunnel;
307	struct tb_port *port;
308
309	tb_port_dbg(dst_port, "calculating available bandwidth\n");
310
311	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
312	if (tunnel) {
313		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
314						   &usb3_consumed_down);
315		if (ret)
316			return ret;
317	} else {
318		usb3_consumed_up = 0;
319		usb3_consumed_down = 0;
320	}
321
322	*available_up = *available_down = 40000;
323
324	/* Find the minimum available bandwidth over all links */
325	tb_for_each_port_on_path(src_port, dst_port, port) {
326		int link_speed, link_width, up_bw, down_bw;
327
328		if (!tb_port_is_null(port))
329			continue;
330
331		if (tb_is_upstream_port(port)) {
332			link_speed = port->sw->link_speed;
333		} else {
334			link_speed = tb_port_get_link_speed(port);
335			if (link_speed < 0)
336				return link_speed;
337		}
338
339		link_width = port->bonded ? 2 : 1;
340
341		up_bw = link_speed * link_width * 1000; /* Mb/s */
342		/* Leave 10% guard band */
343		up_bw -= up_bw / 10;
344		down_bw = up_bw;
345
346		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
347
348		/*
349		 * Find all DP tunnels that cross the port and reduce
350		 * their consumed bandwidth from the available.
351		 */
352		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
353			int dp_consumed_up, dp_consumed_down;
354
355			if (!tb_tunnel_is_dp(tunnel))
356				continue;
357
358			if (!tb_tunnel_port_on_path(tunnel, port))
359				continue;
360
361			ret = tb_tunnel_consumed_bandwidth(tunnel,
362							   &dp_consumed_up,
363							   &dp_consumed_down);
364			if (ret)
365				return ret;
366
367			up_bw -= dp_consumed_up;
368			down_bw -= dp_consumed_down;
369		}
370
371		/*
372		 * If USB3 is tunneled from the host router down to the
373		 * branch leading to port we need to take USB3 consumed
374		 * bandwidth into account regardless whether it actually
375		 * crosses the port.
376		 */
377		up_bw -= usb3_consumed_up;
378		down_bw -= usb3_consumed_down;
379
380		if (up_bw < *available_up)
381			*available_up = up_bw;
382		if (down_bw < *available_down)
383			*available_down = down_bw;
384	}
385
386	if (*available_up < 0)
387		*available_up = 0;
388	if (*available_down < 0)
389		*available_down = 0;
390
391	return 0;
392}
393
394static int tb_release_unused_usb3_bandwidth(struct tb *tb,
395					    struct tb_port *src_port,
396					    struct tb_port *dst_port)
397{
398	struct tb_tunnel *tunnel;
399
400	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
401	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
402}
403
404static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
405				      struct tb_port *dst_port)
406{
407	int ret, available_up, available_down;
408	struct tb_tunnel *tunnel;
409
410	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
411	if (!tunnel)
412		return;
413
414	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
415
416	/*
417	 * Calculate available bandwidth for the first hop USB3 tunnel.
418	 * That determines the whole USB3 bandwidth for this branch.
419	 */
420	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
421				     &available_up, &available_down);
422	if (ret) {
423		tb_warn(tb, "failed to calculate available bandwidth\n");
424		return;
425	}
426
427	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
428	       available_up, available_down);
429
430	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
431}
432
433static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
434{
435	struct tb_switch *parent = tb_switch_parent(sw);
436	int ret, available_up, available_down;
437	struct tb_port *up, *down, *port;
438	struct tb_cm *tcm = tb_priv(tb);
439	struct tb_tunnel *tunnel;
440
441	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
442	if (!up)
443		return 0;
444
445	if (!sw->link_usb4)
446		return 0;
447
448	/*
449	 * Look up available down port. Since we are chaining it should
450	 * be found right above this switch.
451	 */
452	port = tb_port_at(tb_route(sw), parent);
453	down = tb_find_usb3_down(parent, port);
454	if (!down)
455		return 0;
456
457	if (tb_route(parent)) {
458		struct tb_port *parent_up;
459		/*
460		 * Check first that the parent switch has its upstream USB3
461		 * port enabled. Otherwise the chain is not complete and
462		 * there is no point setting up a new tunnel.
463		 */
464		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
465		if (!parent_up || !tb_port_is_enabled(parent_up))
466			return 0;
467
468		/* Make all unused bandwidth available for the new tunnel */
469		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
470		if (ret)
471			return ret;
472	}
473
474	ret = tb_available_bandwidth(tb, down, up, &available_up,
475				     &available_down);
476	if (ret)
477		goto err_reclaim;
478
479	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
480		    available_up, available_down);
481
482	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
483				      available_down);
484	if (!tunnel) {
485		ret = -ENOMEM;
486		goto err_reclaim;
487	}
488
489	if (tb_tunnel_activate(tunnel)) {
490		tb_port_info(up,
491			     "USB3 tunnel activation failed, aborting\n");
492		ret = -EIO;
493		goto err_free;
494	}
495
496	list_add_tail(&tunnel->list, &tcm->tunnel_list);
497	if (tb_route(parent))
498		tb_reclaim_usb3_bandwidth(tb, down, up);
499
500	return 0;
501
502err_free:
503	tb_tunnel_free(tunnel);
504err_reclaim:
505	if (tb_route(parent))
506		tb_reclaim_usb3_bandwidth(tb, down, up);
507
508	return ret;
509}
510
511static int tb_create_usb3_tunnels(struct tb_switch *sw)
512{
513	struct tb_port *port;
514	int ret;
515
516	if (tb_route(sw)) {
517		ret = tb_tunnel_usb3(sw->tb, sw);
518		if (ret)
519			return ret;
520	}
521
522	tb_switch_for_each_port(sw, port) {
523		if (!tb_port_has_remote(port))
524			continue;
525		ret = tb_create_usb3_tunnels(port->remote->sw);
526		if (ret)
527			return ret;
528	}
529
530	return 0;
531}
532
533static void tb_scan_port(struct tb_port *port);
534
535/**
536 * tb_scan_switch() - scan for and initialize downstream switches
537 */
538static void tb_scan_switch(struct tb_switch *sw)
539{
540	struct tb_port *port;
541
542	pm_runtime_get_sync(&sw->dev);
543
544	tb_switch_for_each_port(sw, port)
545		tb_scan_port(port);
546
547	pm_runtime_mark_last_busy(&sw->dev);
548	pm_runtime_put_autosuspend(&sw->dev);
549}
550
551/**
552 * tb_scan_port() - check for and initialize switches below port
553 */
554static void tb_scan_port(struct tb_port *port)
555{
556	struct tb_cm *tcm = tb_priv(port->sw->tb);
557	struct tb_port *upstream_port;
558	struct tb_switch *sw;
559
560	if (tb_is_upstream_port(port))
561		return;
562
563	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
564	    !tb_dp_port_is_enabled(port)) {
565		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
566		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
567				 false);
568		return;
569	}
570
571	if (port->config.type != TB_TYPE_PORT)
572		return;
573	if (port->dual_link_port && port->link_nr)
574		return; /*
575			 * Downstream switch is reachable through two ports.
576			 * Only scan on the primary port (link_nr == 0).
577			 */
578	if (tb_wait_for_port(port, false) <= 0)
579		return;
580	if (port->remote) {
581		tb_port_dbg(port, "port already has a remote\n");
582		return;
583	}
584
585	tb_retimer_scan(port);
586
587	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
588			     tb_downstream_route(port));
589	if (IS_ERR(sw)) {
590		/*
591		 * If there is an error accessing the connected switch
592		 * it may be connected to another domain. Also we allow
593		 * the other domain to be connected to a max depth switch.
594		 */
595		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
596			tb_scan_xdomain(port);
597		return;
598	}
599
600	if (tb_switch_configure(sw)) {
601		tb_switch_put(sw);
602		return;
603	}
604
605	/*
606	 * If there was previously another domain connected remove it
607	 * first.
608	 */
609	if (port->xdomain) {
610		tb_xdomain_remove(port->xdomain);
611		tb_port_unconfigure_xdomain(port);
612		port->xdomain = NULL;
613	}
614
615	/*
616	 * Do not send uevents until we have discovered all existing
617	 * tunnels and know which switches were authorized already by
618	 * the boot firmware.
619	 */
620	if (!tcm->hotplug_active)
621		dev_set_uevent_suppress(&sw->dev, true);
622
623	/*
624	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
625	 * can support runtime PM.
626	 */
627	sw->rpm = sw->generation > 1;
628
629	if (tb_switch_add(sw)) {
630		tb_switch_put(sw);
631		return;
632	}
633
634	/* Link the switches using both links if available */
635	upstream_port = tb_upstream_port(sw);
636	port->remote = upstream_port;
637	upstream_port->remote = port;
638	if (port->dual_link_port && upstream_port->dual_link_port) {
639		port->dual_link_port->remote = upstream_port->dual_link_port;
640		upstream_port->dual_link_port->remote = port->dual_link_port;
641	}
642
643	/* Enable lane bonding if supported */
644	tb_switch_lane_bonding_enable(sw);
645	/* Set the link configured */
646	tb_switch_configure_link(sw);
647
648	if (tb_enable_tmu(sw))
649		tb_sw_warn(sw, "failed to enable TMU\n");
650
651	/* Scan upstream retimers */
652	tb_retimer_scan(upstream_port);
653
654	/*
655	 * Create USB 3.x tunnels only when the switch is plugged to the
656	 * domain. This is because we scan the domain also during discovery
657	 * and want to discover existing USB 3.x tunnels before we create
658	 * any new.
659	 */
660	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
661		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
662
663	tb_add_dp_resources(sw);
664	tb_scan_switch(sw);
665}
666
667static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
668{
669	struct tb_port *src_port, *dst_port;
670	struct tb *tb;
671
672	if (!tunnel)
673		return;
674
675	tb_tunnel_deactivate(tunnel);
676	list_del(&tunnel->list);
677
678	tb = tunnel->tb;
679	src_port = tunnel->src_port;
680	dst_port = tunnel->dst_port;
681
682	switch (tunnel->type) {
683	case TB_TUNNEL_DP:
684		/*
685		 * In case of DP tunnel make sure the DP IN resource is
686		 * deallocated properly.
687		 */
688		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
689		/* Now we can allow the domain to runtime suspend again */
690		pm_runtime_mark_last_busy(&dst_port->sw->dev);
691		pm_runtime_put_autosuspend(&dst_port->sw->dev);
692		pm_runtime_mark_last_busy(&src_port->sw->dev);
693		pm_runtime_put_autosuspend(&src_port->sw->dev);
694		fallthrough;
695
696	case TB_TUNNEL_USB3:
697		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
698		break;
699
700	default:
701		/*
702		 * PCIe and DMA tunnels do not consume guaranteed
703		 * bandwidth.
704		 */
705		break;
706	}
707
708	tb_tunnel_free(tunnel);
709}
710
711/**
712 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
713 */
714static void tb_free_invalid_tunnels(struct tb *tb)
715{
716	struct tb_cm *tcm = tb_priv(tb);
717	struct tb_tunnel *tunnel;
718	struct tb_tunnel *n;
719
720	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
721		if (tb_tunnel_is_invalid(tunnel))
722			tb_deactivate_and_free_tunnel(tunnel);
723	}
724}
725
726/**
727 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
728 */
729static void tb_free_unplugged_children(struct tb_switch *sw)
730{
731	struct tb_port *port;
732
733	tb_switch_for_each_port(sw, port) {
734		if (!tb_port_has_remote(port))
735			continue;
736
737		if (port->remote->sw->is_unplugged) {
738			tb_retimer_remove_all(port);
739			tb_remove_dp_resources(port->remote->sw);
740			tb_switch_unconfigure_link(port->remote->sw);
741			tb_switch_lane_bonding_disable(port->remote->sw);
742			tb_switch_remove(port->remote->sw);
743			port->remote = NULL;
744			if (port->dual_link_port)
745				port->dual_link_port->remote = NULL;
746		} else {
747			tb_free_unplugged_children(port->remote->sw);
748		}
749	}
750}
751
752static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
753					 const struct tb_port *port)
754{
755	struct tb_port *down = NULL;
756
757	/*
758	 * To keep plugging devices consistently in the same PCIe
759	 * hierarchy, do mapping here for switch downstream PCIe ports.
760	 */
761	if (tb_switch_is_usb4(sw)) {
762		down = usb4_switch_map_pcie_down(sw, port);
763	} else if (!tb_route(sw)) {
764		int phy_port = tb_phy_port_from_link(port->port);
765		int index;
766
767		/*
768		 * Hard-coded Thunderbolt port to PCIe down port mapping
769		 * per controller.
770		 */
771		if (tb_switch_is_cactus_ridge(sw) ||
772		    tb_switch_is_alpine_ridge(sw))
773			index = !phy_port ? 6 : 7;
774		else if (tb_switch_is_falcon_ridge(sw))
775			index = !phy_port ? 6 : 8;
776		else if (tb_switch_is_titan_ridge(sw))
777			index = !phy_port ? 8 : 9;
778		else
779			goto out;
780
781		/* Validate the hard-coding */
782		if (WARN_ON(index > sw->config.max_port_number))
783			goto out;
784
785		down = &sw->ports[index];
786	}
787
788	if (down) {
789		if (WARN_ON(!tb_port_is_pcie_down(down)))
790			goto out;
791		if (tb_pci_port_is_enabled(down))
792			goto out;
793
794		return down;
795	}
796
797out:
798	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
799}
800
801static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
802{
803	struct tb_port *host_port, *port;
804	struct tb_cm *tcm = tb_priv(tb);
805
806	host_port = tb_route(in->sw) ?
807		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
808
809	list_for_each_entry(port, &tcm->dp_resources, list) {
810		if (!tb_port_is_dpout(port))
811			continue;
812
813		if (tb_port_is_enabled(port)) {
814			tb_port_dbg(port, "in use\n");
815			continue;
816		}
817
818		tb_port_dbg(port, "DP OUT available\n");
819
820		/*
821		 * Keep the DP tunnel under the topology starting from
822		 * the same host router downstream port.
823		 */
824		if (host_port && tb_route(port->sw)) {
825			struct tb_port *p;
826
827			p = tb_port_at(tb_route(port->sw), tb->root_switch);
828			if (p != host_port)
829				continue;
830		}
831
832		return port;
833	}
834
835	return NULL;
836}
837
838static void tb_tunnel_dp(struct tb *tb)
839{
840	int available_up, available_down, ret;
841	struct tb_cm *tcm = tb_priv(tb);
842	struct tb_port *port, *in, *out;
843	struct tb_tunnel *tunnel;
844
845	/*
846	 * Find pair of inactive DP IN and DP OUT adapters and then
847	 * establish a DP tunnel between them.
848	 */
849	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
850
851	in = NULL;
852	out = NULL;
853	list_for_each_entry(port, &tcm->dp_resources, list) {
854		if (!tb_port_is_dpin(port))
855			continue;
856
857		if (tb_port_is_enabled(port)) {
858			tb_port_dbg(port, "in use\n");
859			continue;
860		}
861
862		tb_port_dbg(port, "DP IN available\n");
863
864		out = tb_find_dp_out(tb, port);
865		if (out) {
866			in = port;
867			break;
868		}
869	}
870
871	if (!in) {
872		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
873		return;
874	}
875	if (!out) {
876		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
877		return;
878	}
879
880	/*
881	 * DP stream needs the domain to be active so runtime resume
882	 * both ends of the tunnel.
883	 *
884	 * This should bring the routers in the middle active as well
885	 * and keeps the domain from runtime suspending while the DP
886	 * tunnel is active.
887	 */
888	pm_runtime_get_sync(&in->sw->dev);
889	pm_runtime_get_sync(&out->sw->dev);
890
891	if (tb_switch_alloc_dp_resource(in->sw, in)) {
892		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
893		goto err_rpm_put;
894	}
895
896	/* Make all unused USB3 bandwidth available for the new DP tunnel */
897	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
898	if (ret) {
899		tb_warn(tb, "failed to release unused bandwidth\n");
900		goto err_dealloc_dp;
901	}
902
903	ret = tb_available_bandwidth(tb, in, out, &available_up,
904				     &available_down);
905	if (ret)
906		goto err_reclaim;
907
908	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
909	       available_up, available_down);
910
911	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
912	if (!tunnel) {
913		tb_port_dbg(out, "could not allocate DP tunnel\n");
914		goto err_reclaim;
915	}
916
917	if (tb_tunnel_activate(tunnel)) {
918		tb_port_info(out, "DP tunnel activation failed, aborting\n");
919		goto err_free;
920	}
921
922	list_add_tail(&tunnel->list, &tcm->tunnel_list);
923	tb_reclaim_usb3_bandwidth(tb, in, out);
924	return;
925
926err_free:
927	tb_tunnel_free(tunnel);
928err_reclaim:
929	tb_reclaim_usb3_bandwidth(tb, in, out);
930err_dealloc_dp:
931	tb_switch_dealloc_dp_resource(in->sw, in);
932err_rpm_put:
933	pm_runtime_mark_last_busy(&out->sw->dev);
934	pm_runtime_put_autosuspend(&out->sw->dev);
935	pm_runtime_mark_last_busy(&in->sw->dev);
936	pm_runtime_put_autosuspend(&in->sw->dev);
937}
938
939static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
940{
941	struct tb_port *in, *out;
942	struct tb_tunnel *tunnel;
943
944	if (tb_port_is_dpin(port)) {
945		tb_port_dbg(port, "DP IN resource unavailable\n");
946		in = port;
947		out = NULL;
948	} else {
949		tb_port_dbg(port, "DP OUT resource unavailable\n");
950		in = NULL;
951		out = port;
952	}
953
954	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
955	tb_deactivate_and_free_tunnel(tunnel);
956	list_del_init(&port->list);
957
958	/*
959	 * See if there is another DP OUT port that can be used for
960	 * to create another tunnel.
961	 */
962	tb_tunnel_dp(tb);
963}
964
965static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
966{
967	struct tb_cm *tcm = tb_priv(tb);
968	struct tb_port *p;
969
970	if (tb_port_is_enabled(port))
971		return;
972
973	list_for_each_entry(p, &tcm->dp_resources, list) {
974		if (p == port)
975			return;
976	}
977
978	tb_port_dbg(port, "DP %s resource available\n",
979		    tb_port_is_dpin(port) ? "IN" : "OUT");
980	list_add_tail(&port->list, &tcm->dp_resources);
981
982	/* Look for suitable DP IN <-> DP OUT pairs now */
983	tb_tunnel_dp(tb);
984}
985
986static void tb_disconnect_and_release_dp(struct tb *tb)
987{
988	struct tb_cm *tcm = tb_priv(tb);
989	struct tb_tunnel *tunnel, *n;
990
991	/*
992	 * Tear down all DP tunnels and release their resources. They
993	 * will be re-established after resume based on plug events.
994	 */
995	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
996		if (tb_tunnel_is_dp(tunnel))
997			tb_deactivate_and_free_tunnel(tunnel);
998	}
999
1000	while (!list_empty(&tcm->dp_resources)) {
1001		struct tb_port *port;
1002
1003		port = list_first_entry(&tcm->dp_resources,
1004					struct tb_port, list);
1005		list_del_init(&port->list);
1006	}
1007}
1008
1009static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1010{
1011	struct tb_port *up, *down, *port;
1012	struct tb_cm *tcm = tb_priv(tb);
1013	struct tb_switch *parent_sw;
1014	struct tb_tunnel *tunnel;
1015
1016	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1017	if (!up)
1018		return 0;
1019
1020	/*
1021	 * Look up available down port. Since we are chaining it should
1022	 * be found right above this switch.
1023	 */
1024	parent_sw = tb_to_switch(sw->dev.parent);
1025	port = tb_port_at(tb_route(sw), parent_sw);
1026	down = tb_find_pcie_down(parent_sw, port);
1027	if (!down)
1028		return 0;
1029
1030	tunnel = tb_tunnel_alloc_pci(tb, up, down);
1031	if (!tunnel)
1032		return -ENOMEM;
1033
1034	if (tb_tunnel_activate(tunnel)) {
1035		tb_port_info(up,
1036			     "PCIe tunnel activation failed, aborting\n");
1037		tb_tunnel_free(tunnel);
1038		return -EIO;
1039	}
1040
1041	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1042	return 0;
1043}
1044
1045static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1046{
1047	struct tb_cm *tcm = tb_priv(tb);
1048	struct tb_port *nhi_port, *dst_port;
1049	struct tb_tunnel *tunnel;
1050	struct tb_switch *sw;
1051
1052	sw = tb_to_switch(xd->dev.parent);
1053	dst_port = tb_port_at(xd->route, sw);
1054	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1055
1056	mutex_lock(&tb->lock);
1057	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
1058				     xd->transmit_path, xd->receive_ring,
1059				     xd->receive_path);
1060	if (!tunnel) {
1061		mutex_unlock(&tb->lock);
1062		return -ENOMEM;
1063	}
1064
1065	if (tb_tunnel_activate(tunnel)) {
1066		tb_port_info(nhi_port,
1067			     "DMA tunnel activation failed, aborting\n");
1068		tb_tunnel_free(tunnel);
1069		mutex_unlock(&tb->lock);
1070		return -EIO;
1071	}
1072
1073	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1074	mutex_unlock(&tb->lock);
1075	return 0;
1076}
1077
1078static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1079{
1080	struct tb_port *dst_port;
1081	struct tb_tunnel *tunnel;
1082	struct tb_switch *sw;
1083
1084	sw = tb_to_switch(xd->dev.parent);
1085	dst_port = tb_port_at(xd->route, sw);
1086
1087	/*
1088	 * It is possible that the tunnel was already teared down (in
1089	 * case of cable disconnect) so it is fine if we cannot find it
1090	 * here anymore.
1091	 */
1092	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
1093	tb_deactivate_and_free_tunnel(tunnel);
1094}
1095
1096static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1097{
1098	if (!xd->is_unplugged) {
1099		mutex_lock(&tb->lock);
1100		__tb_disconnect_xdomain_paths(tb, xd);
1101		mutex_unlock(&tb->lock);
1102	}
1103	return 0;
1104}
1105
1106/* hotplug handling */
1107
1108/**
1109 * tb_handle_hotplug() - handle hotplug event
1110 *
1111 * Executes on tb->wq.
1112 */
1113static void tb_handle_hotplug(struct work_struct *work)
1114{
1115	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1116	struct tb *tb = ev->tb;
1117	struct tb_cm *tcm = tb_priv(tb);
1118	struct tb_switch *sw;
1119	struct tb_port *port;
1120
1121	/* Bring the domain back from sleep if it was suspended */
1122	pm_runtime_get_sync(&tb->dev);
1123
1124	mutex_lock(&tb->lock);
1125	if (!tcm->hotplug_active)
1126		goto out; /* during init, suspend or shutdown */
1127
1128	sw = tb_switch_find_by_route(tb, ev->route);
1129	if (!sw) {
1130		tb_warn(tb,
1131			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1132			ev->route, ev->port, ev->unplug);
1133		goto out;
1134	}
1135	if (ev->port > sw->config.max_port_number) {
1136		tb_warn(tb,
1137			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1138			ev->route, ev->port, ev->unplug);
1139		goto put_sw;
1140	}
1141	port = &sw->ports[ev->port];
1142	if (tb_is_upstream_port(port)) {
1143		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1144		       ev->route, ev->port, ev->unplug);
1145		goto put_sw;
1146	}
1147
1148	pm_runtime_get_sync(&sw->dev);
1149
1150	if (ev->unplug) {
1151		tb_retimer_remove_all(port);
1152
1153		if (tb_port_has_remote(port)) {
1154			tb_port_dbg(port, "switch unplugged\n");
1155			tb_sw_set_unplugged(port->remote->sw);
1156			tb_free_invalid_tunnels(tb);
1157			tb_remove_dp_resources(port->remote->sw);
1158			tb_switch_tmu_disable(port->remote->sw);
1159			tb_switch_unconfigure_link(port->remote->sw);
1160			tb_switch_lane_bonding_disable(port->remote->sw);
1161			tb_switch_remove(port->remote->sw);
1162			port->remote = NULL;
1163			if (port->dual_link_port)
1164				port->dual_link_port->remote = NULL;
1165			/* Maybe we can create another DP tunnel */
1166			tb_tunnel_dp(tb);
1167		} else if (port->xdomain) {
1168			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1169
1170			tb_port_dbg(port, "xdomain unplugged\n");
1171			/*
1172			 * Service drivers are unbound during
1173			 * tb_xdomain_remove() so setting XDomain as
1174			 * unplugged here prevents deadlock if they call
1175			 * tb_xdomain_disable_paths(). We will tear down
1176			 * the path below.
1177			 */
1178			xd->is_unplugged = true;
1179			tb_xdomain_remove(xd);
1180			port->xdomain = NULL;
1181			__tb_disconnect_xdomain_paths(tb, xd);
1182			tb_xdomain_put(xd);
1183			tb_port_unconfigure_xdomain(port);
1184		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1185			tb_dp_resource_unavailable(tb, port);
1186		} else {
1187			tb_port_dbg(port,
1188				   "got unplug event for disconnected port, ignoring\n");
1189		}
1190	} else if (port->remote) {
1191		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1192	} else {
1193		if (tb_port_is_null(port)) {
1194			tb_port_dbg(port, "hotplug: scanning\n");
1195			tb_scan_port(port);
1196			if (!port->remote)
1197				tb_port_dbg(port, "hotplug: no switch found\n");
1198		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1199			tb_dp_resource_available(tb, port);
1200		}
1201	}
1202
1203	pm_runtime_mark_last_busy(&sw->dev);
1204	pm_runtime_put_autosuspend(&sw->dev);
1205
1206put_sw:
1207	tb_switch_put(sw);
1208out:
1209	mutex_unlock(&tb->lock);
1210
1211	pm_runtime_mark_last_busy(&tb->dev);
1212	pm_runtime_put_autosuspend(&tb->dev);
1213
1214	kfree(ev);
1215}
1216
1217/**
1218 * tb_schedule_hotplug_handler() - callback function for the control channel
1219 *
1220 * Delegates to tb_handle_hotplug.
1221 */
1222static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1223			    const void *buf, size_t size)
1224{
1225	const struct cfg_event_pkg *pkg = buf;
1226	u64 route;
1227
1228	if (type != TB_CFG_PKG_EVENT) {
1229		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1230		return;
1231	}
1232
1233	route = tb_cfg_get_route(&pkg->header);
1234
1235	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1236		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1237			pkg->port);
1238	}
1239
1240	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1241}
1242
1243static void tb_stop(struct tb *tb)
1244{
1245	struct tb_cm *tcm = tb_priv(tb);
1246	struct tb_tunnel *tunnel;
1247	struct tb_tunnel *n;
1248
1249	cancel_delayed_work(&tcm->remove_work);
1250	/* tunnels are only present after everything has been initialized */
1251	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1252		/*
1253		 * DMA tunnels require the driver to be functional so we
1254		 * tear them down. Other protocol tunnels can be left
1255		 * intact.
1256		 */
1257		if (tb_tunnel_is_dma(tunnel))
1258			tb_tunnel_deactivate(tunnel);
1259		tb_tunnel_free(tunnel);
1260	}
1261	tb_switch_remove(tb->root_switch);
1262	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1263}
1264
1265static int tb_scan_finalize_switch(struct device *dev, void *data)
1266{
1267	if (tb_is_switch(dev)) {
1268		struct tb_switch *sw = tb_to_switch(dev);
1269
1270		/*
1271		 * If we found that the switch was already setup by the
1272		 * boot firmware, mark it as authorized now before we
1273		 * send uevent to userspace.
1274		 */
1275		if (sw->boot)
1276			sw->authorized = 1;
1277
1278		dev_set_uevent_suppress(dev, false);
1279		kobject_uevent(&dev->kobj, KOBJ_ADD);
1280		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1281	}
1282
1283	return 0;
1284}
1285
1286static int tb_start(struct tb *tb)
1287{
1288	struct tb_cm *tcm = tb_priv(tb);
1289	int ret;
1290
1291	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1292	if (IS_ERR(tb->root_switch))
1293		return PTR_ERR(tb->root_switch);
1294
1295	/*
1296	 * ICM firmware upgrade needs running firmware and in native
1297	 * mode that is not available so disable firmware upgrade of the
1298	 * root switch.
1299	 */
1300	tb->root_switch->no_nvm_upgrade = true;
1301	/* All USB4 routers support runtime PM */
1302	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1303
1304	ret = tb_switch_configure(tb->root_switch);
1305	if (ret) {
1306		tb_switch_put(tb->root_switch);
1307		return ret;
1308	}
1309
1310	/* Announce the switch to the world */
1311	ret = tb_switch_add(tb->root_switch);
1312	if (ret) {
1313		tb_switch_put(tb->root_switch);
1314		return ret;
1315	}
1316
1317	/* Enable TMU if it is off */
1318	tb_switch_tmu_enable(tb->root_switch);
1319	/* Full scan to discover devices added before the driver was loaded. */
1320	tb_scan_switch(tb->root_switch);
1321	/* Find out tunnels created by the boot firmware */
1322	tb_discover_tunnels(tb->root_switch);
1323	/*
1324	 * If the boot firmware did not create USB 3.x tunnels create them
1325	 * now for the whole topology.
1326	 */
1327	tb_create_usb3_tunnels(tb->root_switch);
1328	/* Add DP IN resources for the root switch */
1329	tb_add_dp_resources(tb->root_switch);
1330	/* Make the discovered switches available to the userspace */
1331	device_for_each_child(&tb->root_switch->dev, NULL,
1332			      tb_scan_finalize_switch);
1333
1334	/* Allow tb_handle_hotplug to progress events */
1335	tcm->hotplug_active = true;
1336	return 0;
1337}
1338
1339static int tb_suspend_noirq(struct tb *tb)
1340{
1341	struct tb_cm *tcm = tb_priv(tb);
1342
1343	tb_dbg(tb, "suspending...\n");
1344	tb_disconnect_and_release_dp(tb);
1345	tb_switch_suspend(tb->root_switch, false);
1346	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1347	tb_dbg(tb, "suspend finished\n");
1348
1349	return 0;
1350}
1351
1352static void tb_restore_children(struct tb_switch *sw)
1353{
1354	struct tb_port *port;
1355
1356	/* No need to restore if the router is already unplugged */
1357	if (sw->is_unplugged)
1358		return;
1359
1360	if (tb_enable_tmu(sw))
1361		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1362
1363	tb_switch_for_each_port(sw, port) {
1364		if (!tb_port_has_remote(port) && !port->xdomain)
1365			continue;
1366
1367		if (port->remote) {
1368			tb_switch_lane_bonding_enable(port->remote->sw);
1369			tb_switch_configure_link(port->remote->sw);
1370
1371			tb_restore_children(port->remote->sw);
1372		} else if (port->xdomain) {
1373			tb_port_configure_xdomain(port);
1374		}
1375	}
1376}
1377
1378static int tb_resume_noirq(struct tb *tb)
1379{
1380	struct tb_cm *tcm = tb_priv(tb);
1381	struct tb_tunnel *tunnel, *n;
1382
1383	tb_dbg(tb, "resuming...\n");
1384
1385	/* remove any pci devices the firmware might have setup */
1386	tb_switch_reset(tb->root_switch);
1387
1388	tb_switch_resume(tb->root_switch);
1389	tb_free_invalid_tunnels(tb);
1390	tb_free_unplugged_children(tb->root_switch);
1391	tb_restore_children(tb->root_switch);
1392	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1393		tb_tunnel_restart(tunnel);
1394	if (!list_empty(&tcm->tunnel_list)) {
1395		/*
1396		 * the pcie links need some time to get going.
1397		 * 100ms works for me...
1398		 */
1399		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1400		msleep(100);
1401	}
1402	 /* Allow tb_handle_hotplug to progress events */
1403	tcm->hotplug_active = true;
1404	tb_dbg(tb, "resume finished\n");
1405
1406	return 0;
1407}
1408
1409static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1410{
1411	struct tb_port *port;
1412	int ret = 0;
1413
1414	tb_switch_for_each_port(sw, port) {
1415		if (tb_is_upstream_port(port))
1416			continue;
1417		if (port->xdomain && port->xdomain->is_unplugged) {
1418			tb_retimer_remove_all(port);
1419			tb_xdomain_remove(port->xdomain);
1420			tb_port_unconfigure_xdomain(port);
1421			port->xdomain = NULL;
1422			ret++;
1423		} else if (port->remote) {
1424			ret += tb_free_unplugged_xdomains(port->remote->sw);
1425		}
1426	}
1427
1428	return ret;
1429}
1430
1431static int tb_freeze_noirq(struct tb *tb)
1432{
1433	struct tb_cm *tcm = tb_priv(tb);
1434
1435	tcm->hotplug_active = false;
1436	return 0;
1437}
1438
1439static int tb_thaw_noirq(struct tb *tb)
1440{
1441	struct tb_cm *tcm = tb_priv(tb);
1442
1443	tcm->hotplug_active = true;
1444	return 0;
1445}
1446
1447static void tb_complete(struct tb *tb)
1448{
1449	/*
1450	 * Release any unplugged XDomains and if there is a case where
1451	 * another domain is swapped in place of unplugged XDomain we
1452	 * need to run another rescan.
1453	 */
1454	mutex_lock(&tb->lock);
1455	if (tb_free_unplugged_xdomains(tb->root_switch))
1456		tb_scan_switch(tb->root_switch);
1457	mutex_unlock(&tb->lock);
1458}
1459
1460static int tb_runtime_suspend(struct tb *tb)
1461{
1462	struct tb_cm *tcm = tb_priv(tb);
1463
1464	mutex_lock(&tb->lock);
1465	tb_switch_suspend(tb->root_switch, true);
1466	tcm->hotplug_active = false;
1467	mutex_unlock(&tb->lock);
1468
1469	return 0;
1470}
1471
1472static void tb_remove_work(struct work_struct *work)
1473{
1474	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1475	struct tb *tb = tcm_to_tb(tcm);
1476
1477	mutex_lock(&tb->lock);
1478	if (tb->root_switch) {
1479		tb_free_unplugged_children(tb->root_switch);
1480		tb_free_unplugged_xdomains(tb->root_switch);
1481	}
1482	mutex_unlock(&tb->lock);
1483}
1484
1485static int tb_runtime_resume(struct tb *tb)
1486{
1487	struct tb_cm *tcm = tb_priv(tb);
1488	struct tb_tunnel *tunnel, *n;
1489
1490	mutex_lock(&tb->lock);
1491	tb_switch_resume(tb->root_switch);
1492	tb_free_invalid_tunnels(tb);
1493	tb_restore_children(tb->root_switch);
1494	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1495		tb_tunnel_restart(tunnel);
1496	tcm->hotplug_active = true;
1497	mutex_unlock(&tb->lock);
1498
1499	/*
1500	 * Schedule cleanup of any unplugged devices. Run this in a
1501	 * separate thread to avoid possible deadlock if the device
1502	 * removal runtime resumes the unplugged device.
1503	 */
1504	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1505	return 0;
1506}
1507
1508static const struct tb_cm_ops tb_cm_ops = {
1509	.start = tb_start,
1510	.stop = tb_stop,
1511	.suspend_noirq = tb_suspend_noirq,
1512	.resume_noirq = tb_resume_noirq,
1513	.freeze_noirq = tb_freeze_noirq,
1514	.thaw_noirq = tb_thaw_noirq,
1515	.complete = tb_complete,
1516	.runtime_suspend = tb_runtime_suspend,
1517	.runtime_resume = tb_runtime_resume,
1518	.handle_event = tb_handle_event,
1519	.approve_switch = tb_tunnel_pci,
1520	.approve_xdomain_paths = tb_approve_xdomain_paths,
1521	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1522};
1523
1524struct tb *tb_probe(struct tb_nhi *nhi)
1525{
1526	struct tb_cm *tcm;
1527	struct tb *tb;
1528
1529	tb = tb_domain_alloc(nhi, sizeof(*tcm));
1530	if (!tb)
1531		return NULL;
1532
1533	tb->security_level = TB_SECURITY_USER;
1534	tb->cm_ops = &tb_cm_ops;
1535
1536	tcm = tb_priv(tb);
1537	INIT_LIST_HEAD(&tcm->tunnel_list);
1538	INIT_LIST_HEAD(&tcm->dp_resources);
1539	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1540
1541	return tb;
1542}
1543