1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
12#include <linux/pm_runtime.h>
13#include <linux/sched/signal.h>
14#include <linux/sizes.h>
15#include <linux/slab.h>
16
17#include "tb.h"
18
19/* Switch NVM support */
20
21#define NVM_CSS			0x10
22
23struct nvm_auth_status {
24	struct list_head list;
25	uuid_t uuid;
26	u32 status;
27};
28
29enum nvm_write_ops {
30	WRITE_AND_AUTHENTICATE = 1,
31	WRITE_ONLY = 2,
32};
33
34/*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39static LIST_HEAD(nvm_auth_status_cache);
40static DEFINE_MUTEX(nvm_auth_status_lock);
41
42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43{
44	struct nvm_auth_status *st;
45
46	list_for_each_entry(st, &nvm_auth_status_cache, list) {
47		if (uuid_equal(&st->uuid, sw->uuid))
48			return st;
49	}
50
51	return NULL;
52}
53
54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55{
56	struct nvm_auth_status *st;
57
58	mutex_lock(&nvm_auth_status_lock);
59	st = __nvm_get_auth_status(sw);
60	mutex_unlock(&nvm_auth_status_lock);
61
62	*status = st ? st->status : 0;
63}
64
65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66{
67	struct nvm_auth_status *st;
68
69	if (WARN_ON(!sw->uuid))
70		return;
71
72	mutex_lock(&nvm_auth_status_lock);
73	st = __nvm_get_auth_status(sw);
74
75	if (!st) {
76		st = kzalloc(sizeof(*st), GFP_KERNEL);
77		if (!st)
78			goto unlock;
79
80		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81		INIT_LIST_HEAD(&st->list);
82		list_add_tail(&st->list, &nvm_auth_status_cache);
83	}
84
85	st->status = status;
86unlock:
87	mutex_unlock(&nvm_auth_status_lock);
88}
89
90static void nvm_clear_auth_status(const struct tb_switch *sw)
91{
92	struct nvm_auth_status *st;
93
94	mutex_lock(&nvm_auth_status_lock);
95	st = __nvm_get_auth_status(sw);
96	if (st) {
97		list_del(&st->list);
98		kfree(st);
99	}
100	mutex_unlock(&nvm_auth_status_lock);
101}
102
103static int nvm_validate_and_write(struct tb_switch *sw)
104{
105	unsigned int image_size, hdr_size;
106	const u8 *buf = sw->nvm->buf;
107	u16 ds_size;
108	int ret;
109
110	if (!buf)
111		return -EINVAL;
112
113	image_size = sw->nvm->buf_data_size;
114	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115		return -EINVAL;
116
117	/*
118	 * FARB pointer must point inside the image and must at least
119	 * contain parts of the digital section we will be reading here.
120	 */
121	hdr_size = (*(u32 *)buf) & 0xffffff;
122	if (hdr_size + NVM_DEVID + 2 >= image_size)
123		return -EINVAL;
124
125	/* Digital section start should be aligned to 4k page */
126	if (!IS_ALIGNED(hdr_size, SZ_4K))
127		return -EINVAL;
128
129	/*
130	 * Read digital section size and check that it also fits inside
131	 * the image.
132	 */
133	ds_size = *(u16 *)(buf + hdr_size);
134	if (ds_size >= image_size)
135		return -EINVAL;
136
137	if (!sw->safe_mode) {
138		u16 device_id;
139
140		/*
141		 * Make sure the device ID in the image matches the one
142		 * we read from the switch config space.
143		 */
144		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145		if (device_id != sw->config.device_id)
146			return -EINVAL;
147
148		if (sw->generation < 3) {
149			/* Write CSS headers first */
150			ret = dma_port_flash_write(sw->dma_port,
151				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152				DMA_PORT_CSS_MAX_SIZE);
153			if (ret)
154				return ret;
155		}
156
157		/* Skip headers in the image */
158		buf += hdr_size;
159		image_size -= hdr_size;
160	}
161
162	if (tb_switch_is_usb4(sw))
163		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164	else
165		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166	if (!ret)
167		sw->nvm->flushed = true;
168	return ret;
169}
170
171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172{
173	int ret = 0;
174
175	/*
176	 * Root switch NVM upgrade requires that we disconnect the
177	 * existing paths first (in case it is not in safe mode
178	 * already).
179	 */
180	if (!sw->safe_mode) {
181		u32 status;
182
183		ret = tb_domain_disconnect_all_paths(sw->tb);
184		if (ret)
185			return ret;
186		/*
187		 * The host controller goes away pretty soon after this if
188		 * everything goes well so getting timeout is expected.
189		 */
190		ret = dma_port_flash_update_auth(sw->dma_port);
191		if (!ret || ret == -ETIMEDOUT)
192			return 0;
193
194		/*
195		 * Any error from update auth operation requires power
196		 * cycling of the host router.
197		 */
198		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200			nvm_set_auth_status(sw, status);
201	}
202
203	/*
204	 * From safe mode we can get out by just power cycling the
205	 * switch.
206	 */
207	dma_port_power_cycle(sw->dma_port);
208	return ret;
209}
210
211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212{
213	int ret, retries = 10;
214
215	ret = dma_port_flash_update_auth(sw->dma_port);
216	switch (ret) {
217	case 0:
218	case -ETIMEDOUT:
219	case -EACCES:
220	case -EINVAL:
221		/* Power cycle is required */
222		break;
223	default:
224		return ret;
225	}
226
227	/*
228	 * Poll here for the authentication status. It takes some time
229	 * for the device to respond (we get timeout for a while). Once
230	 * we get response the device needs to be power cycled in order
231	 * to the new NVM to be taken into use.
232	 */
233	do {
234		u32 status;
235
236		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237		if (ret < 0 && ret != -ETIMEDOUT)
238			return ret;
239		if (ret > 0) {
240			if (status) {
241				tb_sw_warn(sw, "failed to authenticate NVM\n");
242				nvm_set_auth_status(sw, status);
243			}
244
245			tb_sw_info(sw, "power cycling the switch now\n");
246			dma_port_power_cycle(sw->dma_port);
247			return 0;
248		}
249
250		msleep(500);
251	} while (--retries);
252
253	return -ETIMEDOUT;
254}
255
256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257{
258	struct pci_dev *root_port;
259
260	/*
261	 * During host router NVM upgrade we should not allow root port to
262	 * go into D3cold because some root ports cannot trigger PME
263	 * itself. To be on the safe side keep the root port in D0 during
264	 * the whole upgrade process.
265	 */
266	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267	if (root_port)
268		pm_runtime_get_noresume(&root_port->dev);
269}
270
271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272{
273	struct pci_dev *root_port;
274
275	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276	if (root_port)
277		pm_runtime_put(&root_port->dev);
278}
279
280static inline bool nvm_readable(struct tb_switch *sw)
281{
282	if (tb_switch_is_usb4(sw)) {
283		/*
284		 * USB4 devices must support NVM operations but it is
285		 * optional for hosts. Therefore we query the NVM sector
286		 * size here and if it is supported assume NVM
287		 * operations are implemented.
288		 */
289		return usb4_switch_nvm_sector_size(sw) > 0;
290	}
291
292	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
293	return !!sw->dma_port;
294}
295
296static inline bool nvm_upgradeable(struct tb_switch *sw)
297{
298	if (sw->no_nvm_upgrade)
299		return false;
300	return nvm_readable(sw);
301}
302
303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304			   void *buf, size_t size)
305{
306	if (tb_switch_is_usb4(sw))
307		return usb4_switch_nvm_read(sw, address, buf, size);
308	return dma_port_flash_read(sw->dma_port, address, buf, size);
309}
310
311static int nvm_authenticate(struct tb_switch *sw)
312{
313	int ret;
314
315	if (tb_switch_is_usb4(sw))
316		return usb4_switch_nvm_authenticate(sw);
317
318	if (!tb_route(sw)) {
319		nvm_authenticate_start_dma_port(sw);
320		ret = nvm_authenticate_host_dma_port(sw);
321	} else {
322		ret = nvm_authenticate_device_dma_port(sw);
323	}
324
325	return ret;
326}
327
328static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
329			      size_t bytes)
330{
331	struct tb_nvm *nvm = priv;
332	struct tb_switch *sw = tb_to_switch(nvm->dev);
333	int ret;
334
335	pm_runtime_get_sync(&sw->dev);
336
337	if (!mutex_trylock(&sw->tb->lock)) {
338		ret = restart_syscall();
339		goto out;
340	}
341
342	ret = nvm_read(sw, offset, val, bytes);
343	mutex_unlock(&sw->tb->lock);
344
345out:
346	pm_runtime_mark_last_busy(&sw->dev);
347	pm_runtime_put_autosuspend(&sw->dev);
348
349	return ret;
350}
351
352static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
353			       size_t bytes)
354{
355	struct tb_nvm *nvm = priv;
356	struct tb_switch *sw = tb_to_switch(nvm->dev);
357	int ret;
358
359	if (!mutex_trylock(&sw->tb->lock))
360		return restart_syscall();
361
362	/*
363	 * Since writing the NVM image might require some special steps,
364	 * for example when CSS headers are written, we cache the image
365	 * locally here and handle the special cases when the user asks
366	 * us to authenticate the image.
367	 */
368	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369	mutex_unlock(&sw->tb->lock);
370
371	return ret;
372}
373
374static int tb_switch_nvm_add(struct tb_switch *sw)
375{
376	struct tb_nvm *nvm;
377	u32 val;
378	int ret;
379
380	if (!nvm_readable(sw))
381		return 0;
382
383	/*
384	 * The NVM format of non-Intel hardware is not known so
385	 * currently restrict NVM upgrade for Intel hardware. We may
386	 * relax this in the future when we learn other NVM formats.
387	 */
388	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389	    sw->config.vendor_id != 0x8087) {
390		dev_info(&sw->dev,
391			 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392			 sw->config.vendor_id);
393		return 0;
394	}
395
396	nvm = tb_nvm_alloc(&sw->dev);
397	if (IS_ERR(nvm))
398		return PTR_ERR(nvm);
399
400	/*
401	 * If the switch is in safe-mode the only accessible portion of
402	 * the NVM is the non-active one where userspace is expected to
403	 * write new functional NVM.
404	 */
405	if (!sw->safe_mode) {
406		u32 nvm_size, hdr_size;
407
408		ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
409		if (ret)
410			goto err_nvm;
411
412		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413		nvm_size = (SZ_1M << (val & 7)) / 8;
414		nvm_size = (nvm_size - hdr_size) / 2;
415
416		ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
417		if (ret)
418			goto err_nvm;
419
420		nvm->major = val >> 16;
421		nvm->minor = val >> 8;
422
423		ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
424		if (ret)
425			goto err_nvm;
426	}
427
428	if (!sw->no_nvm_upgrade) {
429		ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430					    tb_switch_nvm_write);
431		if (ret)
432			goto err_nvm;
433	}
434
435	sw->nvm = nvm;
436	return 0;
437
438err_nvm:
439	tb_nvm_free(nvm);
440	return ret;
441}
442
443static void tb_switch_nvm_remove(struct tb_switch *sw)
444{
445	struct tb_nvm *nvm;
446
447	nvm = sw->nvm;
448	sw->nvm = NULL;
449
450	if (!nvm)
451		return;
452
453	/* Remove authentication status in case the switch is unplugged */
454	if (!nvm->authenticating)
455		nvm_clear_auth_status(sw);
456
457	tb_nvm_free(nvm);
458}
459
460/* port utility functions */
461
462static const char *tb_port_type(struct tb_regs_port_header *port)
463{
464	switch (port->type >> 16) {
465	case 0:
466		switch ((u8) port->type) {
467		case 0:
468			return "Inactive";
469		case 1:
470			return "Port";
471		case 2:
472			return "NHI";
473		default:
474			return "unknown";
475		}
476	case 0x2:
477		return "Ethernet";
478	case 0x8:
479		return "SATA";
480	case 0xe:
481		return "DP/HDMI";
482	case 0x10:
483		return "PCIe";
484	case 0x20:
485		return "USB";
486	default:
487		return "unknown";
488	}
489}
490
491static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
492{
493	tb_dbg(tb,
494	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495	       port->port_number, port->vendor_id, port->device_id,
496	       port->revision, port->thunderbolt_version, tb_port_type(port),
497	       port->type);
498	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
499	       port->max_in_hop_id, port->max_out_hop_id);
500	tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
501	tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
502}
503
504/**
505 * tb_port_state() - get connectedness state of a port
506 *
507 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
508 *
509 * Return: Returns an enum tb_port_state on success or an error code on failure.
510 */
511static int tb_port_state(struct tb_port *port)
512{
513	struct tb_cap_phy phy;
514	int res;
515	if (port->cap_phy == 0) {
516		tb_port_WARN(port, "does not have a PHY\n");
517		return -EINVAL;
518	}
519	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
520	if (res)
521		return res;
522	return phy.state;
523}
524
525/**
526 * tb_wait_for_port() - wait for a port to become ready
527 *
528 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
529 * wait_if_unplugged is set then we also wait if the port is in state
530 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
531 * switch resume). Otherwise we only wait if a device is registered but the link
532 * has not yet been established.
533 *
534 * Return: Returns an error code on failure. Returns 0 if the port is not
535 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
536 * if the port is connected and in state TB_PORT_UP.
537 */
538int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
539{
540	int retries = 10;
541	int state;
542	if (!port->cap_phy) {
543		tb_port_WARN(port, "does not have PHY\n");
544		return -EINVAL;
545	}
546	if (tb_is_upstream_port(port)) {
547		tb_port_WARN(port, "is the upstream port\n");
548		return -EINVAL;
549	}
550
551	while (retries--) {
552		state = tb_port_state(port);
553		if (state < 0)
554			return state;
555		if (state == TB_PORT_DISABLED) {
556			tb_port_dbg(port, "is disabled (state: 0)\n");
557			return 0;
558		}
559		if (state == TB_PORT_UNPLUGGED) {
560			if (wait_if_unplugged) {
561				/* used during resume */
562				tb_port_dbg(port,
563					    "is unplugged (state: 7), retrying...\n");
564				msleep(100);
565				continue;
566			}
567			tb_port_dbg(port, "is unplugged (state: 7)\n");
568			return 0;
569		}
570		if (state == TB_PORT_UP) {
571			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
572			return 1;
573		}
574
575		/*
576		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
577		 * time.
578		 */
579		tb_port_dbg(port,
580			    "is connected, link is not up (state: %d), retrying...\n",
581			    state);
582		msleep(100);
583	}
584	tb_port_warn(port,
585		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
586	return 0;
587}
588
589/**
590 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
591 *
592 * Change the number of NFC credits allocated to @port by @credits. To remove
593 * NFC credits pass a negative amount of credits.
594 *
595 * Return: Returns 0 on success or an error code on failure.
596 */
597int tb_port_add_nfc_credits(struct tb_port *port, int credits)
598{
599	u32 nfc_credits;
600
601	if (credits == 0 || port->sw->is_unplugged)
602		return 0;
603
604	/*
605	 * USB4 restricts programming NFC buffers to lane adapters only
606	 * so skip other ports.
607	 */
608	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
609		return 0;
610
611	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
612	nfc_credits += credits;
613
614	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
615		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
616
617	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
618	port->config.nfc_credits |= nfc_credits;
619
620	return tb_port_write(port, &port->config.nfc_credits,
621			     TB_CFG_PORT, ADP_CS_4, 1);
622}
623
624/**
625 * tb_port_set_initial_credits() - Set initial port link credits allocated
626 * @port: Port to set the initial credits
627 * @credits: Number of credits to to allocate
628 *
629 * Set initial credits value to be used for ingress shared buffering.
630 */
631int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
632{
633	u32 data;
634	int ret;
635
636	ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
637	if (ret)
638		return ret;
639
640	data &= ~ADP_CS_5_LCA_MASK;
641	data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
642
643	return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
644}
645
646/**
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
648 *
649 * Return: Returns 0 on success or an error code on failure.
650 */
651int tb_port_clear_counter(struct tb_port *port, int counter)
652{
653	u32 zero[3] = { 0, 0, 0 };
654	tb_port_dbg(port, "clearing counter %d\n", counter);
655	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
656}
657
658/**
659 * tb_port_unlock() - Unlock downstream port
660 * @port: Port to unlock
661 *
662 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
663 * downstream router accessible for CM.
664 */
665int tb_port_unlock(struct tb_port *port)
666{
667	if (tb_switch_is_icm(port->sw))
668		return 0;
669	if (!tb_port_is_null(port))
670		return -EINVAL;
671	if (tb_switch_is_usb4(port->sw))
672		return usb4_port_unlock(port);
673	return 0;
674}
675
676static int __tb_port_enable(struct tb_port *port, bool enable)
677{
678	int ret;
679	u32 phy;
680
681	if (!tb_port_is_null(port))
682		return -EINVAL;
683
684	ret = tb_port_read(port, &phy, TB_CFG_PORT,
685			   port->cap_phy + LANE_ADP_CS_1, 1);
686	if (ret)
687		return ret;
688
689	if (enable)
690		phy &= ~LANE_ADP_CS_1_LD;
691	else
692		phy |= LANE_ADP_CS_1_LD;
693
694	return tb_port_write(port, &phy, TB_CFG_PORT,
695			     port->cap_phy + LANE_ADP_CS_1, 1);
696}
697
698/**
699 * tb_port_enable() - Enable lane adapter
700 * @port: Port to enable (can be %NULL)
701 *
702 * This is used for lane 0 and 1 adapters to enable it.
703 */
704int tb_port_enable(struct tb_port *port)
705{
706	return __tb_port_enable(port, true);
707}
708
709/**
710 * tb_port_disable() - Disable lane adapter
711 * @port: Port to disable (can be %NULL)
712 *
713 * This is used for lane 0 and 1 adapters to disable it.
714 */
715int tb_port_disable(struct tb_port *port)
716{
717	return __tb_port_enable(port, false);
718}
719
720/**
721 * tb_init_port() - initialize a port
722 *
723 * This is a helper method for tb_switch_alloc. Does not check or initialize
724 * any downstream switches.
725 *
726 * Return: Returns 0 on success or an error code on failure.
727 */
728static int tb_init_port(struct tb_port *port)
729{
730	int res;
731	int cap;
732
733	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
734	if (res) {
735		if (res == -ENODEV) {
736			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
737			       port->port);
738			port->disabled = true;
739			return 0;
740		}
741		return res;
742	}
743
744	/* Port 0 is the switch itself and has no PHY. */
745	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
746		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
747
748		if (cap > 0)
749			port->cap_phy = cap;
750		else
751			tb_port_WARN(port, "non switch port without a PHY\n");
752
753		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
754		if (cap > 0)
755			port->cap_usb4 = cap;
756	} else if (port->port != 0) {
757		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
758		if (cap > 0)
759			port->cap_adap = cap;
760	}
761
762	tb_dump_port(port->sw->tb, &port->config);
763
764	INIT_LIST_HEAD(&port->list);
765	return 0;
766
767}
768
769static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
770			       int max_hopid)
771{
772	int port_max_hopid;
773	struct ida *ida;
774
775	if (in) {
776		port_max_hopid = port->config.max_in_hop_id;
777		ida = &port->in_hopids;
778	} else {
779		port_max_hopid = port->config.max_out_hop_id;
780		ida = &port->out_hopids;
781	}
782
783	/*
784	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
785	 * reserved.
786	 */
787	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
788		min_hopid = TB_PATH_MIN_HOPID;
789
790	if (max_hopid < 0 || max_hopid > port_max_hopid)
791		max_hopid = port_max_hopid;
792
793	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
794}
795
796/**
797 * tb_port_alloc_in_hopid() - Allocate input HopID from port
798 * @port: Port to allocate HopID for
799 * @min_hopid: Minimum acceptable input HopID
800 * @max_hopid: Maximum acceptable input HopID
801 *
802 * Return: HopID between @min_hopid and @max_hopid or negative errno in
803 * case of error.
804 */
805int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
806{
807	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
808}
809
810/**
811 * tb_port_alloc_out_hopid() - Allocate output HopID from port
812 * @port: Port to allocate HopID for
813 * @min_hopid: Minimum acceptable output HopID
814 * @max_hopid: Maximum acceptable output HopID
815 *
816 * Return: HopID between @min_hopid and @max_hopid or negative errno in
817 * case of error.
818 */
819int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
820{
821	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
822}
823
824/**
825 * tb_port_release_in_hopid() - Release allocated input HopID from port
826 * @port: Port whose HopID to release
827 * @hopid: HopID to release
828 */
829void tb_port_release_in_hopid(struct tb_port *port, int hopid)
830{
831	ida_simple_remove(&port->in_hopids, hopid);
832}
833
834/**
835 * tb_port_release_out_hopid() - Release allocated output HopID from port
836 * @port: Port whose HopID to release
837 * @hopid: HopID to release
838 */
839void tb_port_release_out_hopid(struct tb_port *port, int hopid)
840{
841	ida_simple_remove(&port->out_hopids, hopid);
842}
843
844static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
845					  const struct tb_switch *sw)
846{
847	u64 mask = (1ULL << parent->config.depth * 8) - 1;
848	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
849}
850
851/**
852 * tb_next_port_on_path() - Return next port for given port on a path
853 * @start: Start port of the walk
854 * @end: End port of the walk
855 * @prev: Previous port (%NULL if this is the first)
856 *
857 * This function can be used to walk from one port to another if they
858 * are connected through zero or more switches. If the @prev is dual
859 * link port, the function follows that link and returns another end on
860 * that same link.
861 *
862 * If the @end port has been reached, return %NULL.
863 *
864 * Domain tb->lock must be held when this function is called.
865 */
866struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
867				     struct tb_port *prev)
868{
869	struct tb_port *next;
870
871	if (!prev)
872		return start;
873
874	if (prev->sw == end->sw) {
875		if (prev == end)
876			return NULL;
877		return end;
878	}
879
880	if (tb_switch_is_reachable(prev->sw, end->sw)) {
881		next = tb_port_at(tb_route(end->sw), prev->sw);
882		/* Walk down the topology if next == prev */
883		if (prev->remote &&
884		    (next == prev || next->dual_link_port == prev))
885			next = prev->remote;
886	} else {
887		if (tb_is_upstream_port(prev)) {
888			next = prev->remote;
889		} else {
890			next = tb_upstream_port(prev->sw);
891			/*
892			 * Keep the same link if prev and next are both
893			 * dual link ports.
894			 */
895			if (next->dual_link_port &&
896			    next->link_nr != prev->link_nr) {
897				next = next->dual_link_port;
898			}
899		}
900	}
901
902	return next != prev ? next : NULL;
903}
904
905/**
906 * tb_port_get_link_speed() - Get current link speed
907 * @port: Port to check (USB4 or CIO)
908 *
909 * Returns link speed in Gb/s or negative errno in case of failure.
910 */
911int tb_port_get_link_speed(struct tb_port *port)
912{
913	u32 val, speed;
914	int ret;
915
916	if (!port->cap_phy)
917		return -EINVAL;
918
919	ret = tb_port_read(port, &val, TB_CFG_PORT,
920			   port->cap_phy + LANE_ADP_CS_1, 1);
921	if (ret)
922		return ret;
923
924	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
925		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
926	return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
927}
928
929static int tb_port_get_link_width(struct tb_port *port)
930{
931	u32 val;
932	int ret;
933
934	if (!port->cap_phy)
935		return -EINVAL;
936
937	ret = tb_port_read(port, &val, TB_CFG_PORT,
938			   port->cap_phy + LANE_ADP_CS_1, 1);
939	if (ret)
940		return ret;
941
942	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
943		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
944}
945
946static bool tb_port_is_width_supported(struct tb_port *port, int width)
947{
948	u32 phy, widths;
949	int ret;
950
951	if (!port->cap_phy)
952		return false;
953
954	ret = tb_port_read(port, &phy, TB_CFG_PORT,
955			   port->cap_phy + LANE_ADP_CS_0, 1);
956	if (ret)
957		return false;
958
959	widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
960		LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
961
962	return !!(widths & width);
963}
964
965static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
966{
967	u32 val;
968	int ret;
969
970	if (!port->cap_phy)
971		return -EINVAL;
972
973	ret = tb_port_read(port, &val, TB_CFG_PORT,
974			   port->cap_phy + LANE_ADP_CS_1, 1);
975	if (ret)
976		return ret;
977
978	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
979	switch (width) {
980	case 1:
981		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
982			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
983		break;
984	case 2:
985		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
986			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
987		break;
988	default:
989		return -EINVAL;
990	}
991
992	val |= LANE_ADP_CS_1_LB;
993
994	return tb_port_write(port, &val, TB_CFG_PORT,
995			     port->cap_phy + LANE_ADP_CS_1, 1);
996}
997
998static int tb_port_lane_bonding_enable(struct tb_port *port)
999{
1000	int ret;
1001
1002	/*
1003	 * Enable lane bonding for both links if not already enabled by
1004	 * for example the boot firmware.
1005	 */
1006	ret = tb_port_get_link_width(port);
1007	if (ret == 1) {
1008		ret = tb_port_set_link_width(port, 2);
1009		if (ret)
1010			return ret;
1011	}
1012
1013	ret = tb_port_get_link_width(port->dual_link_port);
1014	if (ret == 1) {
1015		ret = tb_port_set_link_width(port->dual_link_port, 2);
1016		if (ret) {
1017			tb_port_set_link_width(port, 1);
1018			return ret;
1019		}
1020	}
1021
1022	port->bonded = true;
1023	port->dual_link_port->bonded = true;
1024
1025	return 0;
1026}
1027
1028static void tb_port_lane_bonding_disable(struct tb_port *port)
1029{
1030	port->dual_link_port->bonded = false;
1031	port->bonded = false;
1032
1033	tb_port_set_link_width(port->dual_link_port, 1);
1034	tb_port_set_link_width(port, 1);
1035}
1036
1037/**
1038 * tb_port_is_enabled() - Is the adapter port enabled
1039 * @port: Port to check
1040 */
1041bool tb_port_is_enabled(struct tb_port *port)
1042{
1043	switch (port->config.type) {
1044	case TB_TYPE_PCIE_UP:
1045	case TB_TYPE_PCIE_DOWN:
1046		return tb_pci_port_is_enabled(port);
1047
1048	case TB_TYPE_DP_HDMI_IN:
1049	case TB_TYPE_DP_HDMI_OUT:
1050		return tb_dp_port_is_enabled(port);
1051
1052	case TB_TYPE_USB3_UP:
1053	case TB_TYPE_USB3_DOWN:
1054		return tb_usb3_port_is_enabled(port);
1055
1056	default:
1057		return false;
1058	}
1059}
1060
1061/**
1062 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1063 * @port: USB3 adapter port to check
1064 */
1065bool tb_usb3_port_is_enabled(struct tb_port *port)
1066{
1067	u32 data;
1068
1069	if (tb_port_read(port, &data, TB_CFG_PORT,
1070			 port->cap_adap + ADP_USB3_CS_0, 1))
1071		return false;
1072
1073	return !!(data & ADP_USB3_CS_0_PE);
1074}
1075
1076/**
1077 * tb_usb3_port_enable() - Enable USB3 adapter port
1078 * @port: USB3 adapter port to enable
1079 * @enable: Enable/disable the USB3 adapter
1080 */
1081int tb_usb3_port_enable(struct tb_port *port, bool enable)
1082{
1083	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1084			  : ADP_USB3_CS_0_V;
1085
1086	if (!port->cap_adap)
1087		return -ENXIO;
1088	return tb_port_write(port, &word, TB_CFG_PORT,
1089			     port->cap_adap + ADP_USB3_CS_0, 1);
1090}
1091
1092/**
1093 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1094 * @port: PCIe port to check
1095 */
1096bool tb_pci_port_is_enabled(struct tb_port *port)
1097{
1098	u32 data;
1099
1100	if (tb_port_read(port, &data, TB_CFG_PORT,
1101			 port->cap_adap + ADP_PCIE_CS_0, 1))
1102		return false;
1103
1104	return !!(data & ADP_PCIE_CS_0_PE);
1105}
1106
1107/**
1108 * tb_pci_port_enable() - Enable PCIe adapter port
1109 * @port: PCIe port to enable
1110 * @enable: Enable/disable the PCIe adapter
1111 */
1112int tb_pci_port_enable(struct tb_port *port, bool enable)
1113{
1114	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1115	if (!port->cap_adap)
1116		return -ENXIO;
1117	return tb_port_write(port, &word, TB_CFG_PORT,
1118			     port->cap_adap + ADP_PCIE_CS_0, 1);
1119}
1120
1121/**
1122 * tb_dp_port_hpd_is_active() - Is HPD already active
1123 * @port: DP out port to check
1124 *
1125 * Checks if the DP OUT adapter port has HDP bit already set.
1126 */
1127int tb_dp_port_hpd_is_active(struct tb_port *port)
1128{
1129	u32 data;
1130	int ret;
1131
1132	ret = tb_port_read(port, &data, TB_CFG_PORT,
1133			   port->cap_adap + ADP_DP_CS_2, 1);
1134	if (ret)
1135		return ret;
1136
1137	return !!(data & ADP_DP_CS_2_HDP);
1138}
1139
1140/**
1141 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1142 * @port: Port to clear HPD
1143 *
1144 * If the DP IN port has HDP set, this function can be used to clear it.
1145 */
1146int tb_dp_port_hpd_clear(struct tb_port *port)
1147{
1148	u32 data;
1149	int ret;
1150
1151	ret = tb_port_read(port, &data, TB_CFG_PORT,
1152			   port->cap_adap + ADP_DP_CS_3, 1);
1153	if (ret)
1154		return ret;
1155
1156	data |= ADP_DP_CS_3_HDPC;
1157	return tb_port_write(port, &data, TB_CFG_PORT,
1158			     port->cap_adap + ADP_DP_CS_3, 1);
1159}
1160
1161/**
1162 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1163 * @port: DP IN/OUT port to set hops
1164 * @video: Video Hop ID
1165 * @aux_tx: AUX TX Hop ID
1166 * @aux_rx: AUX RX Hop ID
1167 *
1168 * Programs specified Hop IDs for DP IN/OUT port.
1169 */
1170int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1171			unsigned int aux_tx, unsigned int aux_rx)
1172{
1173	u32 data[2];
1174	int ret;
1175
1176	ret = tb_port_read(port, data, TB_CFG_PORT,
1177			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1178	if (ret)
1179		return ret;
1180
1181	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1182	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1183	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1184
1185	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1186		ADP_DP_CS_0_VIDEO_HOPID_MASK;
1187	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1188	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1189		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1190
1191	return tb_port_write(port, data, TB_CFG_PORT,
1192			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1193}
1194
1195/**
1196 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1197 * @port: DP adapter port to check
1198 */
1199bool tb_dp_port_is_enabled(struct tb_port *port)
1200{
1201	u32 data[2];
1202
1203	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1204			 ARRAY_SIZE(data)))
1205		return false;
1206
1207	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1208}
1209
1210/**
1211 * tb_dp_port_enable() - Enables/disables DP paths of a port
1212 * @port: DP IN/OUT port
1213 * @enable: Enable/disable DP path
1214 *
1215 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1216 * calling this function.
1217 */
1218int tb_dp_port_enable(struct tb_port *port, bool enable)
1219{
1220	u32 data[2];
1221	int ret;
1222
1223	ret = tb_port_read(port, data, TB_CFG_PORT,
1224			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1225	if (ret)
1226		return ret;
1227
1228	if (enable)
1229		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1230	else
1231		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1232
1233	return tb_port_write(port, data, TB_CFG_PORT,
1234			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1235}
1236
1237/* switch utility functions */
1238
1239static const char *tb_switch_generation_name(const struct tb_switch *sw)
1240{
1241	switch (sw->generation) {
1242	case 1:
1243		return "Thunderbolt 1";
1244	case 2:
1245		return "Thunderbolt 2";
1246	case 3:
1247		return "Thunderbolt 3";
1248	case 4:
1249		return "USB4";
1250	default:
1251		return "Unknown";
1252	}
1253}
1254
1255static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1256{
1257	const struct tb_regs_switch_header *regs = &sw->config;
1258
1259	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1260	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1261	       regs->revision, regs->thunderbolt_version);
1262	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1263	tb_dbg(tb, "  Config:\n");
1264	tb_dbg(tb,
1265		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1266	       regs->upstream_port_number, regs->depth,
1267	       (((u64) regs->route_hi) << 32) | regs->route_lo,
1268	       regs->enabled, regs->plug_events_delay);
1269	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1270	       regs->__unknown1, regs->__unknown4);
1271}
1272
1273/**
1274 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1275 * @sw: Switch to reset
1276 *
1277 * Return: Returns 0 on success or an error code on failure.
1278 */
1279int tb_switch_reset(struct tb_switch *sw)
1280{
1281	struct tb_cfg_result res;
1282
1283	if (sw->generation > 1)
1284		return 0;
1285
1286	tb_sw_dbg(sw, "resetting switch\n");
1287
1288	res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1289			      TB_CFG_SWITCH, 2, 2);
1290	if (res.err)
1291		return res.err;
1292	res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
1293	if (res.err > 0)
1294		return -EIO;
1295	return res.err;
1296}
1297
1298/**
1299 * tb_plug_events_active() - enable/disable plug events on a switch
1300 *
1301 * Also configures a sane plug_events_delay of 255ms.
1302 *
1303 * Return: Returns 0 on success or an error code on failure.
1304 */
1305static int tb_plug_events_active(struct tb_switch *sw, bool active)
1306{
1307	u32 data;
1308	int res;
1309
1310	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1311		return 0;
1312
1313	sw->config.plug_events_delay = 0xff;
1314	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1315	if (res)
1316		return res;
1317
1318	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1319	if (res)
1320		return res;
1321
1322	if (active) {
1323		data = data & 0xFFFFFF83;
1324		switch (sw->config.device_id) {
1325		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1326		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1327		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1328			break;
1329		default:
1330			data |= 4;
1331		}
1332	} else {
1333		data = data | 0x7c;
1334	}
1335	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1336			   sw->cap_plug_events + 1, 1);
1337}
1338
1339static ssize_t authorized_show(struct device *dev,
1340			       struct device_attribute *attr,
1341			       char *buf)
1342{
1343	struct tb_switch *sw = tb_to_switch(dev);
1344
1345	return sprintf(buf, "%u\n", sw->authorized);
1346}
1347
1348static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1349{
1350	int ret = -EINVAL;
1351
1352	if (!mutex_trylock(&sw->tb->lock))
1353		return restart_syscall();
1354
1355	if (sw->authorized)
1356		goto unlock;
1357
1358	switch (val) {
1359	/* Approve switch */
1360	case 1:
1361		if (sw->key)
1362			ret = tb_domain_approve_switch_key(sw->tb, sw);
1363		else
1364			ret = tb_domain_approve_switch(sw->tb, sw);
1365		break;
1366
1367	/* Challenge switch */
1368	case 2:
1369		if (sw->key)
1370			ret = tb_domain_challenge_switch_key(sw->tb, sw);
1371		break;
1372
1373	default:
1374		break;
1375	}
1376
1377	if (!ret) {
1378		sw->authorized = val;
1379		/* Notify status change to the userspace */
1380		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1381	}
1382
1383unlock:
1384	mutex_unlock(&sw->tb->lock);
1385	return ret;
1386}
1387
1388static ssize_t authorized_store(struct device *dev,
1389				struct device_attribute *attr,
1390				const char *buf, size_t count)
1391{
1392	struct tb_switch *sw = tb_to_switch(dev);
1393	unsigned int val;
1394	ssize_t ret;
1395
1396	ret = kstrtouint(buf, 0, &val);
1397	if (ret)
1398		return ret;
1399	if (val > 2)
1400		return -EINVAL;
1401
1402	pm_runtime_get_sync(&sw->dev);
1403	ret = tb_switch_set_authorized(sw, val);
1404	pm_runtime_mark_last_busy(&sw->dev);
1405	pm_runtime_put_autosuspend(&sw->dev);
1406
1407	return ret ? ret : count;
1408}
1409static DEVICE_ATTR_RW(authorized);
1410
1411static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1412			 char *buf)
1413{
1414	struct tb_switch *sw = tb_to_switch(dev);
1415
1416	return sprintf(buf, "%u\n", sw->boot);
1417}
1418static DEVICE_ATTR_RO(boot);
1419
1420static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1421			   char *buf)
1422{
1423	struct tb_switch *sw = tb_to_switch(dev);
1424
1425	return sprintf(buf, "%#x\n", sw->device);
1426}
1427static DEVICE_ATTR_RO(device);
1428
1429static ssize_t
1430device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1431{
1432	struct tb_switch *sw = tb_to_switch(dev);
1433
1434	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1435}
1436static DEVICE_ATTR_RO(device_name);
1437
1438static ssize_t
1439generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1440{
1441	struct tb_switch *sw = tb_to_switch(dev);
1442
1443	return sprintf(buf, "%u\n", sw->generation);
1444}
1445static DEVICE_ATTR_RO(generation);
1446
1447static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1448			char *buf)
1449{
1450	struct tb_switch *sw = tb_to_switch(dev);
1451	ssize_t ret;
1452
1453	if (!mutex_trylock(&sw->tb->lock))
1454		return restart_syscall();
1455
1456	if (sw->key)
1457		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1458	else
1459		ret = sprintf(buf, "\n");
1460
1461	mutex_unlock(&sw->tb->lock);
1462	return ret;
1463}
1464
1465static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1466			 const char *buf, size_t count)
1467{
1468	struct tb_switch *sw = tb_to_switch(dev);
1469	u8 key[TB_SWITCH_KEY_SIZE];
1470	ssize_t ret = count;
1471	bool clear = false;
1472
1473	if (!strcmp(buf, "\n"))
1474		clear = true;
1475	else if (hex2bin(key, buf, sizeof(key)))
1476		return -EINVAL;
1477
1478	if (!mutex_trylock(&sw->tb->lock))
1479		return restart_syscall();
1480
1481	if (sw->authorized) {
1482		ret = -EBUSY;
1483	} else {
1484		kfree(sw->key);
1485		if (clear) {
1486			sw->key = NULL;
1487		} else {
1488			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1489			if (!sw->key)
1490				ret = -ENOMEM;
1491		}
1492	}
1493
1494	mutex_unlock(&sw->tb->lock);
1495	return ret;
1496}
1497static DEVICE_ATTR(key, 0600, key_show, key_store);
1498
1499static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1500			  char *buf)
1501{
1502	struct tb_switch *sw = tb_to_switch(dev);
1503
1504	return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1505}
1506
1507/*
1508 * Currently all lanes must run at the same speed but we expose here
1509 * both directions to allow possible asymmetric links in the future.
1510 */
1511static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1512static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1513
1514static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1515			  char *buf)
1516{
1517	struct tb_switch *sw = tb_to_switch(dev);
1518
1519	return sprintf(buf, "%u\n", sw->link_width);
1520}
1521
1522/*
1523 * Currently link has same amount of lanes both directions (1 or 2) but
1524 * expose them separately to allow possible asymmetric links in the future.
1525 */
1526static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1527static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1528
1529static ssize_t nvm_authenticate_show(struct device *dev,
1530	struct device_attribute *attr, char *buf)
1531{
1532	struct tb_switch *sw = tb_to_switch(dev);
1533	u32 status;
1534
1535	nvm_get_auth_status(sw, &status);
1536	return sprintf(buf, "%#x\n", status);
1537}
1538
1539static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1540				      bool disconnect)
1541{
1542	struct tb_switch *sw = tb_to_switch(dev);
1543	int val;
1544	int ret;
1545
1546	pm_runtime_get_sync(&sw->dev);
1547
1548	if (!mutex_trylock(&sw->tb->lock)) {
1549		ret = restart_syscall();
1550		goto exit_rpm;
1551	}
1552
1553	/* If NVMem devices are not yet added */
1554	if (!sw->nvm) {
1555		ret = -EAGAIN;
1556		goto exit_unlock;
1557	}
1558
1559	ret = kstrtoint(buf, 10, &val);
1560	if (ret)
1561		goto exit_unlock;
1562
1563	/* Always clear the authentication status */
1564	nvm_clear_auth_status(sw);
1565
1566	if (val > 0) {
1567		if (!sw->nvm->flushed) {
1568			if (!sw->nvm->buf) {
1569				ret = -EINVAL;
1570				goto exit_unlock;
1571			}
1572
1573			ret = nvm_validate_and_write(sw);
1574			if (ret || val == WRITE_ONLY)
1575				goto exit_unlock;
1576		}
1577		if (val == WRITE_AND_AUTHENTICATE) {
1578			if (disconnect) {
1579				ret = tb_lc_force_power(sw);
1580			} else {
1581				sw->nvm->authenticating = true;
1582				ret = nvm_authenticate(sw);
1583			}
1584		}
1585	}
1586
1587exit_unlock:
1588	mutex_unlock(&sw->tb->lock);
1589exit_rpm:
1590	pm_runtime_mark_last_busy(&sw->dev);
1591	pm_runtime_put_autosuspend(&sw->dev);
1592
1593	return ret;
1594}
1595
1596static ssize_t nvm_authenticate_store(struct device *dev,
1597	struct device_attribute *attr, const char *buf, size_t count)
1598{
1599	int ret = nvm_authenticate_sysfs(dev, buf, false);
1600	if (ret)
1601		return ret;
1602	return count;
1603}
1604static DEVICE_ATTR_RW(nvm_authenticate);
1605
1606static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1607	struct device_attribute *attr, char *buf)
1608{
1609	return nvm_authenticate_show(dev, attr, buf);
1610}
1611
1612static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1613	struct device_attribute *attr, const char *buf, size_t count)
1614{
1615	int ret;
1616
1617	ret = nvm_authenticate_sysfs(dev, buf, true);
1618	return ret ? ret : count;
1619}
1620static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1621
1622static ssize_t nvm_version_show(struct device *dev,
1623				struct device_attribute *attr, char *buf)
1624{
1625	struct tb_switch *sw = tb_to_switch(dev);
1626	int ret;
1627
1628	if (!mutex_trylock(&sw->tb->lock))
1629		return restart_syscall();
1630
1631	if (sw->safe_mode)
1632		ret = -ENODATA;
1633	else if (!sw->nvm)
1634		ret = -EAGAIN;
1635	else
1636		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1637
1638	mutex_unlock(&sw->tb->lock);
1639
1640	return ret;
1641}
1642static DEVICE_ATTR_RO(nvm_version);
1643
1644static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1645			   char *buf)
1646{
1647	struct tb_switch *sw = tb_to_switch(dev);
1648
1649	return sprintf(buf, "%#x\n", sw->vendor);
1650}
1651static DEVICE_ATTR_RO(vendor);
1652
1653static ssize_t
1654vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1655{
1656	struct tb_switch *sw = tb_to_switch(dev);
1657
1658	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1659}
1660static DEVICE_ATTR_RO(vendor_name);
1661
1662static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1663			      char *buf)
1664{
1665	struct tb_switch *sw = tb_to_switch(dev);
1666
1667	return sprintf(buf, "%pUb\n", sw->uuid);
1668}
1669static DEVICE_ATTR_RO(unique_id);
1670
1671static struct attribute *switch_attrs[] = {
1672	&dev_attr_authorized.attr,
1673	&dev_attr_boot.attr,
1674	&dev_attr_device.attr,
1675	&dev_attr_device_name.attr,
1676	&dev_attr_generation.attr,
1677	&dev_attr_key.attr,
1678	&dev_attr_nvm_authenticate.attr,
1679	&dev_attr_nvm_authenticate_on_disconnect.attr,
1680	&dev_attr_nvm_version.attr,
1681	&dev_attr_rx_speed.attr,
1682	&dev_attr_rx_lanes.attr,
1683	&dev_attr_tx_speed.attr,
1684	&dev_attr_tx_lanes.attr,
1685	&dev_attr_vendor.attr,
1686	&dev_attr_vendor_name.attr,
1687	&dev_attr_unique_id.attr,
1688	NULL,
1689};
1690
1691static umode_t switch_attr_is_visible(struct kobject *kobj,
1692				      struct attribute *attr, int n)
1693{
1694	struct device *dev = kobj_to_dev(kobj);
1695	struct tb_switch *sw = tb_to_switch(dev);
1696
1697	if (attr == &dev_attr_device.attr) {
1698		if (!sw->device)
1699			return 0;
1700	} else if (attr == &dev_attr_device_name.attr) {
1701		if (!sw->device_name)
1702			return 0;
1703	} else if (attr == &dev_attr_vendor.attr)  {
1704		if (!sw->vendor)
1705			return 0;
1706	} else if (attr == &dev_attr_vendor_name.attr)  {
1707		if (!sw->vendor_name)
1708			return 0;
1709	} else if (attr == &dev_attr_key.attr) {
1710		if (tb_route(sw) &&
1711		    sw->tb->security_level == TB_SECURITY_SECURE &&
1712		    sw->security_level == TB_SECURITY_SECURE)
1713			return attr->mode;
1714		return 0;
1715	} else if (attr == &dev_attr_rx_speed.attr ||
1716		   attr == &dev_attr_rx_lanes.attr ||
1717		   attr == &dev_attr_tx_speed.attr ||
1718		   attr == &dev_attr_tx_lanes.attr) {
1719		if (tb_route(sw))
1720			return attr->mode;
1721		return 0;
1722	} else if (attr == &dev_attr_nvm_authenticate.attr) {
1723		if (nvm_upgradeable(sw))
1724			return attr->mode;
1725		return 0;
1726	} else if (attr == &dev_attr_nvm_version.attr) {
1727		if (nvm_readable(sw))
1728			return attr->mode;
1729		return 0;
1730	} else if (attr == &dev_attr_boot.attr) {
1731		if (tb_route(sw))
1732			return attr->mode;
1733		return 0;
1734	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1735		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1736			return attr->mode;
1737		return 0;
1738	}
1739
1740	return sw->safe_mode ? 0 : attr->mode;
1741}
1742
1743static struct attribute_group switch_group = {
1744	.is_visible = switch_attr_is_visible,
1745	.attrs = switch_attrs,
1746};
1747
1748static const struct attribute_group *switch_groups[] = {
1749	&switch_group,
1750	NULL,
1751};
1752
1753static void tb_switch_release(struct device *dev)
1754{
1755	struct tb_switch *sw = tb_to_switch(dev);
1756	struct tb_port *port;
1757
1758	dma_port_free(sw->dma_port);
1759
1760	tb_switch_for_each_port(sw, port) {
1761		ida_destroy(&port->in_hopids);
1762		ida_destroy(&port->out_hopids);
1763	}
1764
1765	kfree(sw->uuid);
1766	kfree(sw->device_name);
1767	kfree(sw->vendor_name);
1768	kfree(sw->ports);
1769	kfree(sw->drom);
1770	kfree(sw->key);
1771	kfree(sw);
1772}
1773
1774/*
1775 * Currently only need to provide the callbacks. Everything else is handled
1776 * in the connection manager.
1777 */
1778static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1779{
1780	struct tb_switch *sw = tb_to_switch(dev);
1781	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1782
1783	if (cm_ops->runtime_suspend_switch)
1784		return cm_ops->runtime_suspend_switch(sw);
1785
1786	return 0;
1787}
1788
1789static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1790{
1791	struct tb_switch *sw = tb_to_switch(dev);
1792	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1793
1794	if (cm_ops->runtime_resume_switch)
1795		return cm_ops->runtime_resume_switch(sw);
1796	return 0;
1797}
1798
1799static const struct dev_pm_ops tb_switch_pm_ops = {
1800	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1801			   NULL)
1802};
1803
1804struct device_type tb_switch_type = {
1805	.name = "thunderbolt_device",
1806	.release = tb_switch_release,
1807	.pm = &tb_switch_pm_ops,
1808};
1809
1810static int tb_switch_get_generation(struct tb_switch *sw)
1811{
1812	switch (sw->config.device_id) {
1813	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1814	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1815	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1816	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1817	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1818	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1819	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1820	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1821		return 1;
1822
1823	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1824	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1825	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1826		return 2;
1827
1828	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1829	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1830	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1831	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1832	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1833	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1834	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1835	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1836	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1837	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1838		return 3;
1839
1840	default:
1841		if (tb_switch_is_usb4(sw))
1842			return 4;
1843
1844		/*
1845		 * For unknown switches assume generation to be 1 to be
1846		 * on the safe side.
1847		 */
1848		tb_sw_warn(sw, "unsupported switch device id %#x\n",
1849			   sw->config.device_id);
1850		return 1;
1851	}
1852}
1853
1854static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1855{
1856	int max_depth;
1857
1858	if (tb_switch_is_usb4(sw) ||
1859	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1860		max_depth = USB4_SWITCH_MAX_DEPTH;
1861	else
1862		max_depth = TB_SWITCH_MAX_DEPTH;
1863
1864	return depth > max_depth;
1865}
1866
1867/**
1868 * tb_switch_alloc() - allocate a switch
1869 * @tb: Pointer to the owning domain
1870 * @parent: Parent device for this switch
1871 * @route: Route string for this switch
1872 *
1873 * Allocates and initializes a switch. Will not upload configuration to
1874 * the switch. For that you need to call tb_switch_configure()
1875 * separately. The returned switch should be released by calling
1876 * tb_switch_put().
1877 *
1878 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1879 * failure.
1880 */
1881struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1882				  u64 route)
1883{
1884	struct tb_switch *sw;
1885	int upstream_port;
1886	int i, ret, depth;
1887
1888	/* Unlock the downstream port so we can access the switch below */
1889	if (route) {
1890		struct tb_switch *parent_sw = tb_to_switch(parent);
1891		struct tb_port *down;
1892
1893		down = tb_port_at(route, parent_sw);
1894		tb_port_unlock(down);
1895	}
1896
1897	depth = tb_route_length(route);
1898
1899	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1900	if (upstream_port < 0)
1901		return ERR_PTR(upstream_port);
1902
1903	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1904	if (!sw)
1905		return ERR_PTR(-ENOMEM);
1906
1907	sw->tb = tb;
1908	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1909	if (ret)
1910		goto err_free_sw_ports;
1911
1912	sw->generation = tb_switch_get_generation(sw);
1913
1914	tb_dbg(tb, "current switch config:\n");
1915	tb_dump_switch(tb, sw);
1916
1917	/* configure switch */
1918	sw->config.upstream_port_number = upstream_port;
1919	sw->config.depth = depth;
1920	sw->config.route_hi = upper_32_bits(route);
1921	sw->config.route_lo = lower_32_bits(route);
1922	sw->config.enabled = 0;
1923
1924	/* Make sure we do not exceed maximum topology limit */
1925	if (tb_switch_exceeds_max_depth(sw, depth)) {
1926		ret = -EADDRNOTAVAIL;
1927		goto err_free_sw_ports;
1928	}
1929
1930	/* initialize ports */
1931	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1932				GFP_KERNEL);
1933	if (!sw->ports) {
1934		ret = -ENOMEM;
1935		goto err_free_sw_ports;
1936	}
1937
1938	for (i = 0; i <= sw->config.max_port_number; i++) {
1939		/* minimum setup for tb_find_cap and tb_drom_read to work */
1940		sw->ports[i].sw = sw;
1941		sw->ports[i].port = i;
1942
1943		/* Control port does not need HopID allocation */
1944		if (i) {
1945			ida_init(&sw->ports[i].in_hopids);
1946			ida_init(&sw->ports[i].out_hopids);
1947		}
1948	}
1949
1950	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1951	if (ret > 0)
1952		sw->cap_plug_events = ret;
1953
1954	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1955	if (ret > 0)
1956		sw->cap_lc = ret;
1957
1958	/* Root switch is always authorized */
1959	if (!route)
1960		sw->authorized = true;
1961
1962	device_initialize(&sw->dev);
1963	sw->dev.parent = parent;
1964	sw->dev.bus = &tb_bus_type;
1965	sw->dev.type = &tb_switch_type;
1966	sw->dev.groups = switch_groups;
1967	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1968
1969	return sw;
1970
1971err_free_sw_ports:
1972	kfree(sw->ports);
1973	kfree(sw);
1974
1975	return ERR_PTR(ret);
1976}
1977
1978/**
1979 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1980 * @tb: Pointer to the owning domain
1981 * @parent: Parent device for this switch
1982 * @route: Route string for this switch
1983 *
1984 * This creates a switch in safe mode. This means the switch pretty much
1985 * lacks all capabilities except DMA configuration port before it is
1986 * flashed with a valid NVM firmware.
1987 *
1988 * The returned switch must be released by calling tb_switch_put().
1989 *
1990 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1991 */
1992struct tb_switch *
1993tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1994{
1995	struct tb_switch *sw;
1996
1997	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1998	if (!sw)
1999		return ERR_PTR(-ENOMEM);
2000
2001	sw->tb = tb;
2002	sw->config.depth = tb_route_length(route);
2003	sw->config.route_hi = upper_32_bits(route);
2004	sw->config.route_lo = lower_32_bits(route);
2005	sw->safe_mode = true;
2006
2007	device_initialize(&sw->dev);
2008	sw->dev.parent = parent;
2009	sw->dev.bus = &tb_bus_type;
2010	sw->dev.type = &tb_switch_type;
2011	sw->dev.groups = switch_groups;
2012	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2013
2014	return sw;
2015}
2016
2017/**
2018 * tb_switch_configure() - Uploads configuration to the switch
2019 * @sw: Switch to configure
2020 *
2021 * Call this function before the switch is added to the system. It will
2022 * upload configuration to the switch and makes it available for the
2023 * connection manager to use. Can be called to the switch again after
2024 * resume from low power states to re-initialize it.
2025 *
2026 * Return: %0 in case of success and negative errno in case of failure
2027 */
2028int tb_switch_configure(struct tb_switch *sw)
2029{
2030	struct tb *tb = sw->tb;
2031	u64 route;
2032	int ret;
2033
2034	route = tb_route(sw);
2035
2036	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2037	       sw->config.enabled ? "restoring" : "initializing", route,
2038	       tb_route_length(route), sw->config.upstream_port_number);
2039
2040	sw->config.enabled = 1;
2041
2042	if (tb_switch_is_usb4(sw)) {
2043		/*
2044		 * For USB4 devices, we need to program the CM version
2045		 * accordingly so that it knows to expose all the
2046		 * additional capabilities.
2047		 */
2048		sw->config.cmuv = USB4_VERSION_1_0;
2049		sw->config.plug_events_delay = 0xa;
2050
2051		/* Enumerate the switch */
2052		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2053				  ROUTER_CS_1, 4);
2054		if (ret)
2055			return ret;
2056
2057		ret = usb4_switch_setup(sw);
2058	} else {
2059		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2060			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2061				   sw->config.vendor_id);
2062
2063		if (!sw->cap_plug_events) {
2064			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2065			return -ENODEV;
2066		}
2067
2068		/* Enumerate the switch */
2069		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2070				  ROUTER_CS_1, 3);
2071	}
2072	if (ret)
2073		return ret;
2074
2075	return tb_plug_events_active(sw, true);
2076}
2077
2078static int tb_switch_set_uuid(struct tb_switch *sw)
2079{
2080	bool uid = false;
2081	u32 uuid[4];
2082	int ret;
2083
2084	if (sw->uuid)
2085		return 0;
2086
2087	if (tb_switch_is_usb4(sw)) {
2088		ret = usb4_switch_read_uid(sw, &sw->uid);
2089		if (ret)
2090			return ret;
2091		uid = true;
2092	} else {
2093		/*
2094		 * The newer controllers include fused UUID as part of
2095		 * link controller specific registers
2096		 */
2097		ret = tb_lc_read_uuid(sw, uuid);
2098		if (ret) {
2099			if (ret != -EINVAL)
2100				return ret;
2101			uid = true;
2102		}
2103	}
2104
2105	if (uid) {
2106		/*
2107		 * ICM generates UUID based on UID and fills the upper
2108		 * two words with ones. This is not strictly following
2109		 * UUID format but we want to be compatible with it so
2110		 * we do the same here.
2111		 */
2112		uuid[0] = sw->uid & 0xffffffff;
2113		uuid[1] = (sw->uid >> 32) & 0xffffffff;
2114		uuid[2] = 0xffffffff;
2115		uuid[3] = 0xffffffff;
2116	}
2117
2118	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2119	if (!sw->uuid)
2120		return -ENOMEM;
2121	return 0;
2122}
2123
2124static int tb_switch_add_dma_port(struct tb_switch *sw)
2125{
2126	u32 status;
2127	int ret;
2128
2129	switch (sw->generation) {
2130	case 2:
2131		/* Only root switch can be upgraded */
2132		if (tb_route(sw))
2133			return 0;
2134
2135		fallthrough;
2136	case 3:
2137		ret = tb_switch_set_uuid(sw);
2138		if (ret)
2139			return ret;
2140		break;
2141
2142	default:
2143		/*
2144		 * DMA port is the only thing available when the switch
2145		 * is in safe mode.
2146		 */
2147		if (!sw->safe_mode)
2148			return 0;
2149		break;
2150	}
2151
2152	/* Root switch DMA port requires running firmware */
2153	if (!tb_route(sw) && !tb_switch_is_icm(sw))
2154		return 0;
2155
2156	sw->dma_port = dma_port_alloc(sw);
2157	if (!sw->dma_port)
2158		return 0;
2159
2160	if (sw->no_nvm_upgrade)
2161		return 0;
2162
2163	/*
2164	 * If there is status already set then authentication failed
2165	 * when the dma_port_flash_update_auth() returned. Power cycling
2166	 * is not needed (it was done already) so only thing we do here
2167	 * is to unblock runtime PM of the root port.
2168	 */
2169	nvm_get_auth_status(sw, &status);
2170	if (status) {
2171		if (!tb_route(sw))
2172			nvm_authenticate_complete_dma_port(sw);
2173		return 0;
2174	}
2175
2176	/*
2177	 * Check status of the previous flash authentication. If there
2178	 * is one we need to power cycle the switch in any case to make
2179	 * it functional again.
2180	 */
2181	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2182	if (ret <= 0)
2183		return ret;
2184
2185	/* Now we can allow root port to suspend again */
2186	if (!tb_route(sw))
2187		nvm_authenticate_complete_dma_port(sw);
2188
2189	if (status) {
2190		tb_sw_info(sw, "switch flash authentication failed\n");
2191		nvm_set_auth_status(sw, status);
2192	}
2193
2194	tb_sw_info(sw, "power cycling the switch now\n");
2195	dma_port_power_cycle(sw->dma_port);
2196
2197	/*
2198	 * We return error here which causes the switch adding failure.
2199	 * It should appear back after power cycle is complete.
2200	 */
2201	return -ESHUTDOWN;
2202}
2203
2204static void tb_switch_default_link_ports(struct tb_switch *sw)
2205{
2206	int i;
2207
2208	for (i = 1; i <= sw->config.max_port_number; i++) {
2209		struct tb_port *port = &sw->ports[i];
2210		struct tb_port *subordinate;
2211
2212		if (!tb_port_is_null(port))
2213			continue;
2214
2215		/* Check for the subordinate port */
2216		if (i == sw->config.max_port_number ||
2217		    !tb_port_is_null(&sw->ports[i + 1]))
2218			continue;
2219
2220		/* Link them if not already done so (by DROM) */
2221		subordinate = &sw->ports[i + 1];
2222		if (!port->dual_link_port && !subordinate->dual_link_port) {
2223			port->link_nr = 0;
2224			port->dual_link_port = subordinate;
2225			subordinate->link_nr = 1;
2226			subordinate->dual_link_port = port;
2227
2228			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2229				  port->port, subordinate->port);
2230		}
2231	}
2232}
2233
2234static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2235{
2236	const struct tb_port *up = tb_upstream_port(sw);
2237
2238	if (!up->dual_link_port || !up->dual_link_port->remote)
2239		return false;
2240
2241	if (tb_switch_is_usb4(sw))
2242		return usb4_switch_lane_bonding_possible(sw);
2243	return tb_lc_lane_bonding_possible(sw);
2244}
2245
2246static int tb_switch_update_link_attributes(struct tb_switch *sw)
2247{
2248	struct tb_port *up;
2249	bool change = false;
2250	int ret;
2251
2252	if (!tb_route(sw) || tb_switch_is_icm(sw))
2253		return 0;
2254
2255	up = tb_upstream_port(sw);
2256
2257	ret = tb_port_get_link_speed(up);
2258	if (ret < 0)
2259		return ret;
2260	if (sw->link_speed != ret)
2261		change = true;
2262	sw->link_speed = ret;
2263
2264	ret = tb_port_get_link_width(up);
2265	if (ret < 0)
2266		return ret;
2267	if (sw->link_width != ret)
2268		change = true;
2269	sw->link_width = ret;
2270
2271	/* Notify userspace that there is possible link attribute change */
2272	if (device_is_registered(&sw->dev) && change)
2273		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2274
2275	return 0;
2276}
2277
2278/**
2279 * tb_switch_lane_bonding_enable() - Enable lane bonding
2280 * @sw: Switch to enable lane bonding
2281 *
2282 * Connection manager can call this function to enable lane bonding of a
2283 * switch. If conditions are correct and both switches support the feature,
2284 * lanes are bonded. It is safe to call this to any switch.
2285 */
2286int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2287{
2288	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2289	struct tb_port *up, *down;
2290	u64 route = tb_route(sw);
2291	int ret;
2292
2293	if (!route)
2294		return 0;
2295
2296	if (!tb_switch_lane_bonding_possible(sw))
2297		return 0;
2298
2299	up = tb_upstream_port(sw);
2300	down = tb_port_at(route, parent);
2301
2302	if (!tb_port_is_width_supported(up, 2) ||
2303	    !tb_port_is_width_supported(down, 2))
2304		return 0;
2305
2306	/*
2307	 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2308	 * CL0 and check just for lane 1.
2309	 */
2310	if (tb_wait_for_port(down->dual_link_port, false) <= 0)
2311		return -ENOTCONN;
2312
2313	ret = tb_port_lane_bonding_enable(up);
2314	if (ret) {
2315		tb_port_warn(up, "failed to enable lane bonding\n");
2316		return ret;
2317	}
2318
2319	ret = tb_port_lane_bonding_enable(down);
2320	if (ret) {
2321		tb_port_warn(down, "failed to enable lane bonding\n");
2322		tb_port_lane_bonding_disable(up);
2323		return ret;
2324	}
2325
2326	tb_switch_update_link_attributes(sw);
2327
2328	tb_sw_dbg(sw, "lane bonding enabled\n");
2329	return ret;
2330}
2331
2332/**
2333 * tb_switch_lane_bonding_disable() - Disable lane bonding
2334 * @sw: Switch whose lane bonding to disable
2335 *
2336 * Disables lane bonding between @sw and parent. This can be called even
2337 * if lanes were not bonded originally.
2338 */
2339void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2340{
2341	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2342	struct tb_port *up, *down;
2343
2344	if (!tb_route(sw))
2345		return;
2346
2347	up = tb_upstream_port(sw);
2348	if (!up->bonded)
2349		return;
2350
2351	down = tb_port_at(tb_route(sw), parent);
2352
2353	tb_port_lane_bonding_disable(up);
2354	tb_port_lane_bonding_disable(down);
2355
2356	tb_switch_update_link_attributes(sw);
2357	tb_sw_dbg(sw, "lane bonding disabled\n");
2358}
2359
2360/**
2361 * tb_switch_configure_link() - Set link configured
2362 * @sw: Switch whose link is configured
2363 *
2364 * Sets the link upstream from @sw configured (from both ends) so that
2365 * it will not be disconnected when the domain exits sleep. Can be
2366 * called for any switch.
2367 *
2368 * It is recommended that this is called after lane bonding is enabled.
2369 *
2370 * Returns %0 on success and negative errno in case of error.
2371 */
2372int tb_switch_configure_link(struct tb_switch *sw)
2373{
2374	struct tb_port *up, *down;
2375	int ret;
2376
2377	if (!tb_route(sw) || tb_switch_is_icm(sw))
2378		return 0;
2379
2380	up = tb_upstream_port(sw);
2381	if (tb_switch_is_usb4(up->sw))
2382		ret = usb4_port_configure(up);
2383	else
2384		ret = tb_lc_configure_port(up);
2385	if (ret)
2386		return ret;
2387
2388	down = up->remote;
2389	if (tb_switch_is_usb4(down->sw))
2390		return usb4_port_configure(down);
2391	return tb_lc_configure_port(down);
2392}
2393
2394/**
2395 * tb_switch_unconfigure_link() - Unconfigure link
2396 * @sw: Switch whose link is unconfigured
2397 *
2398 * Sets the link unconfigured so the @sw will be disconnected if the
2399 * domain exists sleep.
2400 */
2401void tb_switch_unconfigure_link(struct tb_switch *sw)
2402{
2403	struct tb_port *up, *down;
2404
2405	if (sw->is_unplugged)
2406		return;
2407	if (!tb_route(sw) || tb_switch_is_icm(sw))
2408		return;
2409
2410	up = tb_upstream_port(sw);
2411	if (tb_switch_is_usb4(up->sw))
2412		usb4_port_unconfigure(up);
2413	else
2414		tb_lc_unconfigure_port(up);
2415
2416	down = up->remote;
2417	if (tb_switch_is_usb4(down->sw))
2418		usb4_port_unconfigure(down);
2419	else
2420		tb_lc_unconfigure_port(down);
2421}
2422
2423static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
2424{
2425	struct tb_port *port;
2426
2427	if (tb_switch_is_icm(sw))
2428		return 0;
2429
2430	tb_switch_for_each_port(sw, port) {
2431		int res;
2432
2433		if (!port->cap_usb4)
2434			continue;
2435
2436		res = usb4_port_hotplug_enable(port);
2437		if (res)
2438			return res;
2439	}
2440	return 0;
2441}
2442
2443/**
2444 * tb_switch_add() - Add a switch to the domain
2445 * @sw: Switch to add
2446 *
2447 * This is the last step in adding switch to the domain. It will read
2448 * identification information from DROM and initializes ports so that
2449 * they can be used to connect other switches. The switch will be
2450 * exposed to the userspace when this function successfully returns. To
2451 * remove and release the switch, call tb_switch_remove().
2452 *
2453 * Return: %0 in case of success and negative errno in case of failure
2454 */
2455int tb_switch_add(struct tb_switch *sw)
2456{
2457	int i, ret;
2458
2459	/*
2460	 * Initialize DMA control port now before we read DROM. Recent
2461	 * host controllers have more complete DROM on NVM that includes
2462	 * vendor and model identification strings which we then expose
2463	 * to the userspace. NVM can be accessed through DMA
2464	 * configuration based mailbox.
2465	 */
2466	ret = tb_switch_add_dma_port(sw);
2467	if (ret) {
2468		dev_err(&sw->dev, "failed to add DMA port\n");
2469		return ret;
2470	}
2471
2472	if (!sw->safe_mode) {
2473		/* read drom */
2474		ret = tb_drom_read(sw);
2475		if (ret) {
2476			dev_err(&sw->dev, "reading DROM failed\n");
2477			return ret;
2478		}
2479		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2480
2481		ret = tb_switch_set_uuid(sw);
2482		if (ret) {
2483			dev_err(&sw->dev, "failed to set UUID\n");
2484			return ret;
2485		}
2486
2487		for (i = 0; i <= sw->config.max_port_number; i++) {
2488			if (sw->ports[i].disabled) {
2489				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2490				continue;
2491			}
2492			ret = tb_init_port(&sw->ports[i]);
2493			if (ret) {
2494				dev_err(&sw->dev, "failed to initialize port %d\n", i);
2495				return ret;
2496			}
2497		}
2498
2499		tb_switch_default_link_ports(sw);
2500
2501		ret = tb_switch_update_link_attributes(sw);
2502		if (ret)
2503			return ret;
2504
2505		ret = tb_switch_tmu_init(sw);
2506		if (ret)
2507			return ret;
2508	}
2509
2510	ret = tb_switch_port_hotplug_enable(sw);
2511	if (ret)
2512		return ret;
2513
2514	ret = device_add(&sw->dev);
2515	if (ret) {
2516		dev_err(&sw->dev, "failed to add device: %d\n", ret);
2517		return ret;
2518	}
2519
2520	if (tb_route(sw)) {
2521		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2522			 sw->vendor, sw->device);
2523		if (sw->vendor_name && sw->device_name)
2524			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2525				 sw->device_name);
2526	}
2527
2528	ret = tb_switch_nvm_add(sw);
2529	if (ret) {
2530		dev_err(&sw->dev, "failed to add NVM devices\n");
2531		device_del(&sw->dev);
2532		return ret;
2533	}
2534
2535	/*
2536	 * Thunderbolt routers do not generate wakeups themselves but
2537	 * they forward wakeups from tunneled protocols, so enable it
2538	 * here.
2539	 */
2540	device_init_wakeup(&sw->dev, true);
2541
2542	pm_runtime_set_active(&sw->dev);
2543	if (sw->rpm) {
2544		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2545		pm_runtime_use_autosuspend(&sw->dev);
2546		pm_runtime_mark_last_busy(&sw->dev);
2547		pm_runtime_enable(&sw->dev);
2548		pm_request_autosuspend(&sw->dev);
2549	}
2550
2551	tb_switch_debugfs_init(sw);
2552	return 0;
2553}
2554
2555/**
2556 * tb_switch_remove() - Remove and release a switch
2557 * @sw: Switch to remove
2558 *
2559 * This will remove the switch from the domain and release it after last
2560 * reference count drops to zero. If there are switches connected below
2561 * this switch, they will be removed as well.
2562 */
2563void tb_switch_remove(struct tb_switch *sw)
2564{
2565	struct tb_port *port;
2566
2567	tb_switch_debugfs_remove(sw);
2568
2569	if (sw->rpm) {
2570		pm_runtime_get_sync(&sw->dev);
2571		pm_runtime_disable(&sw->dev);
2572	}
2573
2574	/* port 0 is the switch itself and never has a remote */
2575	tb_switch_for_each_port(sw, port) {
2576		if (tb_port_has_remote(port)) {
2577			tb_switch_remove(port->remote->sw);
2578			port->remote = NULL;
2579		} else if (port->xdomain) {
2580			tb_xdomain_remove(port->xdomain);
2581			port->xdomain = NULL;
2582		}
2583
2584		/* Remove any downstream retimers */
2585		tb_retimer_remove_all(port);
2586	}
2587
2588	if (!sw->is_unplugged)
2589		tb_plug_events_active(sw, false);
2590
2591	tb_switch_nvm_remove(sw);
2592
2593	if (tb_route(sw))
2594		dev_info(&sw->dev, "device disconnected\n");
2595	device_unregister(&sw->dev);
2596}
2597
2598/**
2599 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2600 */
2601void tb_sw_set_unplugged(struct tb_switch *sw)
2602{
2603	struct tb_port *port;
2604
2605	if (sw == sw->tb->root_switch) {
2606		tb_sw_WARN(sw, "cannot unplug root switch\n");
2607		return;
2608	}
2609	if (sw->is_unplugged) {
2610		tb_sw_WARN(sw, "is_unplugged already set\n");
2611		return;
2612	}
2613	sw->is_unplugged = true;
2614	tb_switch_for_each_port(sw, port) {
2615		if (tb_port_has_remote(port))
2616			tb_sw_set_unplugged(port->remote->sw);
2617		else if (port->xdomain)
2618			port->xdomain->is_unplugged = true;
2619	}
2620}
2621
2622static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2623{
2624	if (flags)
2625		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2626	else
2627		tb_sw_dbg(sw, "disabling wakeup\n");
2628
2629	if (tb_switch_is_usb4(sw))
2630		return usb4_switch_set_wake(sw, flags);
2631	return tb_lc_set_wake(sw, flags);
2632}
2633
2634int tb_switch_resume(struct tb_switch *sw)
2635{
2636	struct tb_port *port;
2637	int err;
2638
2639	tb_sw_dbg(sw, "resuming switch\n");
2640
2641	/*
2642	 * Check for UID of the connected switches except for root
2643	 * switch which we assume cannot be removed.
2644	 */
2645	if (tb_route(sw)) {
2646		u64 uid;
2647
2648		/*
2649		 * Check first that we can still read the switch config
2650		 * space. It may be that there is now another domain
2651		 * connected.
2652		 */
2653		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2654		if (err < 0) {
2655			tb_sw_info(sw, "switch not present anymore\n");
2656			return err;
2657		}
2658
2659		if (tb_switch_is_usb4(sw))
2660			err = usb4_switch_read_uid(sw, &uid);
2661		else
2662			err = tb_drom_read_uid_only(sw, &uid);
2663		if (err) {
2664			tb_sw_warn(sw, "uid read failed\n");
2665			return err;
2666		}
2667		if (sw->uid != uid) {
2668			tb_sw_info(sw,
2669				"changed while suspended (uid %#llx -> %#llx)\n",
2670				sw->uid, uid);
2671			return -ENODEV;
2672		}
2673	}
2674
2675	err = tb_switch_configure(sw);
2676	if (err)
2677		return err;
2678
2679	/* Disable wakes */
2680	tb_switch_set_wake(sw, 0);
2681
2682	err = tb_switch_tmu_init(sw);
2683	if (err)
2684		return err;
2685
2686	/* check for surviving downstream switches */
2687	tb_switch_for_each_port(sw, port) {
2688		if (!tb_port_has_remote(port) && !port->xdomain)
2689			continue;
2690
2691		if (tb_wait_for_port(port, true) <= 0) {
2692			tb_port_warn(port,
2693				     "lost during suspend, disconnecting\n");
2694			if (tb_port_has_remote(port))
2695				tb_sw_set_unplugged(port->remote->sw);
2696			else if (port->xdomain)
2697				port->xdomain->is_unplugged = true;
2698		} else if (tb_port_has_remote(port) || port->xdomain) {
2699			/*
2700			 * Always unlock the port so the downstream
2701			 * switch/domain is accessible.
2702			 */
2703			if (tb_port_unlock(port))
2704				tb_port_warn(port, "failed to unlock port\n");
2705			if (port->remote && tb_switch_resume(port->remote->sw)) {
2706				tb_port_warn(port,
2707					     "lost during suspend, disconnecting\n");
2708				tb_sw_set_unplugged(port->remote->sw);
2709			}
2710		}
2711	}
2712	return 0;
2713}
2714
2715/**
2716 * tb_switch_suspend() - Put a switch to sleep
2717 * @sw: Switch to suspend
2718 * @runtime: Is this runtime suspend or system sleep
2719 *
2720 * Suspends router and all its children. Enables wakes according to
2721 * value of @runtime and then sets sleep bit for the router. If @sw is
2722 * host router the domain is ready to go to sleep once this function
2723 * returns.
2724 */
2725void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2726{
2727	unsigned int flags = 0;
2728	struct tb_port *port;
2729	int err;
2730
2731	tb_sw_dbg(sw, "suspending switch\n");
2732
2733	err = tb_plug_events_active(sw, false);
2734	if (err)
2735		return;
2736
2737	tb_switch_for_each_port(sw, port) {
2738		if (tb_port_has_remote(port))
2739			tb_switch_suspend(port->remote->sw, runtime);
2740	}
2741
2742	if (runtime) {
2743		/* Trigger wake when something is plugged in/out */
2744		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
2745		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2746	} else if (device_may_wakeup(&sw->dev)) {
2747		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2748	}
2749
2750	tb_switch_set_wake(sw, flags);
2751
2752	if (tb_switch_is_usb4(sw))
2753		usb4_switch_set_sleep(sw);
2754	else
2755		tb_lc_set_sleep(sw);
2756}
2757
2758/**
2759 * tb_switch_query_dp_resource() - Query availability of DP resource
2760 * @sw: Switch whose DP resource is queried
2761 * @in: DP IN port
2762 *
2763 * Queries availability of DP resource for DP tunneling using switch
2764 * specific means. Returns %true if resource is available.
2765 */
2766bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2767{
2768	if (tb_switch_is_usb4(sw))
2769		return usb4_switch_query_dp_resource(sw, in);
2770	return tb_lc_dp_sink_query(sw, in);
2771}
2772
2773/**
2774 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2775 * @sw: Switch whose DP resource is allocated
2776 * @in: DP IN port
2777 *
2778 * Allocates DP resource for DP tunneling. The resource must be
2779 * available for this to succeed (see tb_switch_query_dp_resource()).
2780 * Returns %0 in success and negative errno otherwise.
2781 */
2782int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2783{
2784	if (tb_switch_is_usb4(sw))
2785		return usb4_switch_alloc_dp_resource(sw, in);
2786	return tb_lc_dp_sink_alloc(sw, in);
2787}
2788
2789/**
2790 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2791 * @sw: Switch whose DP resource is de-allocated
2792 * @in: DP IN port
2793 *
2794 * De-allocates DP resource that was previously allocated for DP
2795 * tunneling.
2796 */
2797void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2798{
2799	int ret;
2800
2801	if (tb_switch_is_usb4(sw))
2802		ret = usb4_switch_dealloc_dp_resource(sw, in);
2803	else
2804		ret = tb_lc_dp_sink_dealloc(sw, in);
2805
2806	if (ret)
2807		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2808			   in->port);
2809}
2810
2811struct tb_sw_lookup {
2812	struct tb *tb;
2813	u8 link;
2814	u8 depth;
2815	const uuid_t *uuid;
2816	u64 route;
2817};
2818
2819static int tb_switch_match(struct device *dev, const void *data)
2820{
2821	struct tb_switch *sw = tb_to_switch(dev);
2822	const struct tb_sw_lookup *lookup = data;
2823
2824	if (!sw)
2825		return 0;
2826	if (sw->tb != lookup->tb)
2827		return 0;
2828
2829	if (lookup->uuid)
2830		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2831
2832	if (lookup->route) {
2833		return sw->config.route_lo == lower_32_bits(lookup->route) &&
2834		       sw->config.route_hi == upper_32_bits(lookup->route);
2835	}
2836
2837	/* Root switch is matched only by depth */
2838	if (!lookup->depth)
2839		return !sw->depth;
2840
2841	return sw->link == lookup->link && sw->depth == lookup->depth;
2842}
2843
2844/**
2845 * tb_switch_find_by_link_depth() - Find switch by link and depth
2846 * @tb: Domain the switch belongs
2847 * @link: Link number the switch is connected
2848 * @depth: Depth of the switch in link
2849 *
2850 * Returned switch has reference count increased so the caller needs to
2851 * call tb_switch_put() when done with the switch.
2852 */
2853struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2854{
2855	struct tb_sw_lookup lookup;
2856	struct device *dev;
2857
2858	memset(&lookup, 0, sizeof(lookup));
2859	lookup.tb = tb;
2860	lookup.link = link;
2861	lookup.depth = depth;
2862
2863	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2864	if (dev)
2865		return tb_to_switch(dev);
2866
2867	return NULL;
2868}
2869
2870/**
2871 * tb_switch_find_by_uuid() - Find switch by UUID
2872 * @tb: Domain the switch belongs
2873 * @uuid: UUID to look for
2874 *
2875 * Returned switch has reference count increased so the caller needs to
2876 * call tb_switch_put() when done with the switch.
2877 */
2878struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2879{
2880	struct tb_sw_lookup lookup;
2881	struct device *dev;
2882
2883	memset(&lookup, 0, sizeof(lookup));
2884	lookup.tb = tb;
2885	lookup.uuid = uuid;
2886
2887	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2888	if (dev)
2889		return tb_to_switch(dev);
2890
2891	return NULL;
2892}
2893
2894/**
2895 * tb_switch_find_by_route() - Find switch by route string
2896 * @tb: Domain the switch belongs
2897 * @route: Route string to look for
2898 *
2899 * Returned switch has reference count increased so the caller needs to
2900 * call tb_switch_put() when done with the switch.
2901 */
2902struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2903{
2904	struct tb_sw_lookup lookup;
2905	struct device *dev;
2906
2907	if (!route)
2908		return tb_switch_get(tb->root_switch);
2909
2910	memset(&lookup, 0, sizeof(lookup));
2911	lookup.tb = tb;
2912	lookup.route = route;
2913
2914	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2915	if (dev)
2916		return tb_to_switch(dev);
2917
2918	return NULL;
2919}
2920
2921/**
2922 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2923 * @sw: Switch to find the port from
2924 * @type: Port type to look for
2925 */
2926struct tb_port *tb_switch_find_port(struct tb_switch *sw,
2927				    enum tb_port_type type)
2928{
2929	struct tb_port *port;
2930
2931	tb_switch_for_each_port(sw, port) {
2932		if (port->config.type == type)
2933			return port;
2934	}
2935
2936	return NULL;
2937}
2938