1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt bus support
4 *
5 * Copyright (C) 2017, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/device.h>
10#include <linux/dmar.h>
11#include <linux/idr.h>
12#include <linux/iommu.h>
13#include <linux/module.h>
14#include <linux/pm_runtime.h>
15#include <linux/slab.h>
16#include <linux/random.h>
17#include <crypto/hash.h>
18
19#include "tb.h"
20
21static DEFINE_IDA(tb_domain_ida);
22
23static bool match_service_id(const struct tb_service_id *id,
24			     const struct tb_service *svc)
25{
26	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27		if (strcmp(id->protocol_key, svc->key))
28			return false;
29	}
30
31	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32		if (id->protocol_id != svc->prtcid)
33			return false;
34	}
35
36	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37		if (id->protocol_version != svc->prtcvers)
38			return false;
39	}
40
41	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42		if (id->protocol_revision != svc->prtcrevs)
43			return false;
44	}
45
46	return true;
47}
48
49static const struct tb_service_id *__tb_service_match(struct device *dev,
50						      struct device_driver *drv)
51{
52	struct tb_service_driver *driver;
53	const struct tb_service_id *ids;
54	struct tb_service *svc;
55
56	svc = tb_to_service(dev);
57	if (!svc)
58		return NULL;
59
60	driver = container_of(drv, struct tb_service_driver, driver);
61	if (!driver->id_table)
62		return NULL;
63
64	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65		if (match_service_id(ids, svc))
66			return ids;
67	}
68
69	return NULL;
70}
71
72static int tb_service_match(struct device *dev, struct device_driver *drv)
73{
74	return !!__tb_service_match(dev, drv);
75}
76
77static int tb_service_probe(struct device *dev)
78{
79	struct tb_service *svc = tb_to_service(dev);
80	struct tb_service_driver *driver;
81	const struct tb_service_id *id;
82
83	driver = container_of(dev->driver, struct tb_service_driver, driver);
84	id = __tb_service_match(dev, &driver->driver);
85
86	return driver->probe(svc, id);
87}
88
89static int tb_service_remove(struct device *dev)
90{
91	struct tb_service *svc = tb_to_service(dev);
92	struct tb_service_driver *driver;
93
94	driver = container_of(dev->driver, struct tb_service_driver, driver);
95	if (driver->remove)
96		driver->remove(svc);
97
98	return 0;
99}
100
101static void tb_service_shutdown(struct device *dev)
102{
103	struct tb_service_driver *driver;
104	struct tb_service *svc;
105
106	svc = tb_to_service(dev);
107	if (!svc || !dev->driver)
108		return;
109
110	driver = container_of(dev->driver, struct tb_service_driver, driver);
111	if (driver->shutdown)
112		driver->shutdown(svc);
113}
114
115static const char * const tb_security_names[] = {
116	[TB_SECURITY_NONE] = "none",
117	[TB_SECURITY_USER] = "user",
118	[TB_SECURITY_SECURE] = "secure",
119	[TB_SECURITY_DPONLY] = "dponly",
120	[TB_SECURITY_USBONLY] = "usbonly",
121};
122
123static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
124			     char *buf)
125{
126	struct tb *tb = container_of(dev, struct tb, dev);
127	uuid_t *uuids;
128	ssize_t ret;
129	int i;
130
131	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132	if (!uuids)
133		return -ENOMEM;
134
135	pm_runtime_get_sync(&tb->dev);
136
137	if (mutex_lock_interruptible(&tb->lock)) {
138		ret = -ERESTARTSYS;
139		goto out;
140	}
141	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
142	if (ret) {
143		mutex_unlock(&tb->lock);
144		goto out;
145	}
146	mutex_unlock(&tb->lock);
147
148	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
149		if (!uuid_is_null(&uuids[i]))
150			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
151					&uuids[i]);
152
153		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
154			       i < tb->nboot_acl - 1 ? "," : "\n");
155	}
156
157out:
158	pm_runtime_mark_last_busy(&tb->dev);
159	pm_runtime_put_autosuspend(&tb->dev);
160	kfree(uuids);
161
162	return ret;
163}
164
165static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
166			      const char *buf, size_t count)
167{
168	struct tb *tb = container_of(dev, struct tb, dev);
169	char *str, *s, *uuid_str;
170	ssize_t ret = 0;
171	uuid_t *acl;
172	int i = 0;
173
174	/*
175	 * Make sure the value is not bigger than tb->nboot_acl * UUID
176	 * length + commas and optional "\n". Also the smallest allowable
177	 * string is tb->nboot_acl * ",".
178	 */
179	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
180		return -EINVAL;
181	if (count < tb->nboot_acl - 1)
182		return -EINVAL;
183
184	str = kstrdup(buf, GFP_KERNEL);
185	if (!str)
186		return -ENOMEM;
187
188	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
189	if (!acl) {
190		ret = -ENOMEM;
191		goto err_free_str;
192	}
193
194	uuid_str = strim(str);
195	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
196		size_t len = strlen(s);
197
198		if (len) {
199			if (len != UUID_STRING_LEN) {
200				ret = -EINVAL;
201				goto err_free_acl;
202			}
203			ret = uuid_parse(s, &acl[i]);
204			if (ret)
205				goto err_free_acl;
206		}
207
208		i++;
209	}
210
211	if (s || i < tb->nboot_acl) {
212		ret = -EINVAL;
213		goto err_free_acl;
214	}
215
216	pm_runtime_get_sync(&tb->dev);
217
218	if (mutex_lock_interruptible(&tb->lock)) {
219		ret = -ERESTARTSYS;
220		goto err_rpm_put;
221	}
222	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
223	if (!ret) {
224		/* Notify userspace about the change */
225		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
226	}
227	mutex_unlock(&tb->lock);
228
229err_rpm_put:
230	pm_runtime_mark_last_busy(&tb->dev);
231	pm_runtime_put_autosuspend(&tb->dev);
232err_free_acl:
233	kfree(acl);
234err_free_str:
235	kfree(str);
236
237	return ret ?: count;
238}
239static DEVICE_ATTR_RW(boot_acl);
240
241static ssize_t iommu_dma_protection_show(struct device *dev,
242					 struct device_attribute *attr,
243					 char *buf)
244{
245	/*
246	 * Kernel DMA protection is a feature where Thunderbolt security is
247	 * handled natively using IOMMU. It is enabled when IOMMU is
248	 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
249	 */
250	return sprintf(buf, "%d\n",
251		       iommu_present(&pci_bus_type) && dmar_platform_optin());
252}
253static DEVICE_ATTR_RO(iommu_dma_protection);
254
255static ssize_t security_show(struct device *dev, struct device_attribute *attr,
256			     char *buf)
257{
258	struct tb *tb = container_of(dev, struct tb, dev);
259	const char *name = "unknown";
260
261	if (tb->security_level < ARRAY_SIZE(tb_security_names))
262		name = tb_security_names[tb->security_level];
263
264	return sprintf(buf, "%s\n", name);
265}
266static DEVICE_ATTR_RO(security);
267
268static struct attribute *domain_attrs[] = {
269	&dev_attr_boot_acl.attr,
270	&dev_attr_iommu_dma_protection.attr,
271	&dev_attr_security.attr,
272	NULL,
273};
274
275static umode_t domain_attr_is_visible(struct kobject *kobj,
276				      struct attribute *attr, int n)
277{
278	struct device *dev = kobj_to_dev(kobj);
279	struct tb *tb = container_of(dev, struct tb, dev);
280
281	if (attr == &dev_attr_boot_acl.attr) {
282		if (tb->nboot_acl &&
283		    tb->cm_ops->get_boot_acl &&
284		    tb->cm_ops->set_boot_acl)
285			return attr->mode;
286		return 0;
287	}
288
289	return attr->mode;
290}
291
292static struct attribute_group domain_attr_group = {
293	.is_visible = domain_attr_is_visible,
294	.attrs = domain_attrs,
295};
296
297static const struct attribute_group *domain_attr_groups[] = {
298	&domain_attr_group,
299	NULL,
300};
301
302struct bus_type tb_bus_type = {
303	.name = "thunderbolt",
304	.match = tb_service_match,
305	.probe = tb_service_probe,
306	.remove = tb_service_remove,
307	.shutdown = tb_service_shutdown,
308};
309
310static void tb_domain_release(struct device *dev)
311{
312	struct tb *tb = container_of(dev, struct tb, dev);
313
314	tb_ctl_free(tb->ctl);
315	destroy_workqueue(tb->wq);
316	ida_simple_remove(&tb_domain_ida, tb->index);
317	mutex_destroy(&tb->lock);
318	kfree(tb);
319}
320
321struct device_type tb_domain_type = {
322	.name = "thunderbolt_domain",
323	.release = tb_domain_release,
324};
325
326/**
327 * tb_domain_alloc() - Allocate a domain
328 * @nhi: Pointer to the host controller
329 * @privsize: Size of the connection manager private data
330 *
331 * Allocates and initializes a new Thunderbolt domain. Connection
332 * managers are expected to call this and then fill in @cm_ops
333 * accordingly.
334 *
335 * Call tb_domain_put() to release the domain before it has been added
336 * to the system.
337 *
338 * Return: allocated domain structure on %NULL in case of error
339 */
340struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
341{
342	struct tb *tb;
343
344	/*
345	 * Make sure the structure sizes map with that the hardware
346	 * expects because bit-fields are being used.
347	 */
348	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
349	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
350	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
351
352	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
353	if (!tb)
354		return NULL;
355
356	tb->nhi = nhi;
357	mutex_init(&tb->lock);
358
359	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
360	if (tb->index < 0)
361		goto err_free;
362
363	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
364	if (!tb->wq)
365		goto err_remove_ida;
366
367	tb->dev.parent = &nhi->pdev->dev;
368	tb->dev.bus = &tb_bus_type;
369	tb->dev.type = &tb_domain_type;
370	tb->dev.groups = domain_attr_groups;
371	dev_set_name(&tb->dev, "domain%d", tb->index);
372	device_initialize(&tb->dev);
373
374	return tb;
375
376err_remove_ida:
377	ida_simple_remove(&tb_domain_ida, tb->index);
378err_free:
379	kfree(tb);
380
381	return NULL;
382}
383
384static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
385			       const void *buf, size_t size)
386{
387	struct tb *tb = data;
388
389	if (!tb->cm_ops->handle_event) {
390		tb_warn(tb, "domain does not have event handler\n");
391		return true;
392	}
393
394	switch (type) {
395	case TB_CFG_PKG_XDOMAIN_REQ:
396	case TB_CFG_PKG_XDOMAIN_RESP:
397		return tb_xdomain_handle_request(tb, type, buf, size);
398
399	default:
400		tb->cm_ops->handle_event(tb, type, buf, size);
401	}
402
403	return true;
404}
405
406/**
407 * tb_domain_add() - Add domain to the system
408 * @tb: Domain to add
409 *
410 * Starts the domain and adds it to the system. Hotplugging devices will
411 * work after this has been returned successfully. In order to remove
412 * and release the domain after this function has been called, call
413 * tb_domain_remove().
414 *
415 * Return: %0 in case of success and negative errno in case of error
416 */
417int tb_domain_add(struct tb *tb)
418{
419	int ret;
420
421	if (WARN_ON(!tb->cm_ops))
422		return -EINVAL;
423
424	mutex_lock(&tb->lock);
425
426	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
427	if (!tb->ctl) {
428		ret = -ENOMEM;
429		goto err_unlock;
430	}
431
432	/*
433	 * tb_schedule_hotplug_handler may be called as soon as the config
434	 * channel is started. Thats why we have to hold the lock here.
435	 */
436	tb_ctl_start(tb->ctl);
437
438	if (tb->cm_ops->driver_ready) {
439		ret = tb->cm_ops->driver_ready(tb);
440		if (ret)
441			goto err_ctl_stop;
442	}
443
444	ret = device_add(&tb->dev);
445	if (ret)
446		goto err_ctl_stop;
447
448	/* Start the domain */
449	if (tb->cm_ops->start) {
450		ret = tb->cm_ops->start(tb);
451		if (ret)
452			goto err_domain_del;
453	}
454
455	/* This starts event processing */
456	mutex_unlock(&tb->lock);
457
458	device_init_wakeup(&tb->dev, true);
459
460	pm_runtime_no_callbacks(&tb->dev);
461	pm_runtime_set_active(&tb->dev);
462	pm_runtime_enable(&tb->dev);
463	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
464	pm_runtime_mark_last_busy(&tb->dev);
465	pm_runtime_use_autosuspend(&tb->dev);
466
467	return 0;
468
469err_domain_del:
470	device_del(&tb->dev);
471err_ctl_stop:
472	tb_ctl_stop(tb->ctl);
473err_unlock:
474	mutex_unlock(&tb->lock);
475
476	return ret;
477}
478
479/**
480 * tb_domain_remove() - Removes and releases a domain
481 * @tb: Domain to remove
482 *
483 * Stops the domain, removes it from the system and releases all
484 * resources once the last reference has been released.
485 */
486void tb_domain_remove(struct tb *tb)
487{
488	mutex_lock(&tb->lock);
489	if (tb->cm_ops->stop)
490		tb->cm_ops->stop(tb);
491	/* Stop the domain control traffic */
492	tb_ctl_stop(tb->ctl);
493	mutex_unlock(&tb->lock);
494
495	flush_workqueue(tb->wq);
496	device_unregister(&tb->dev);
497}
498
499/**
500 * tb_domain_suspend_noirq() - Suspend a domain
501 * @tb: Domain to suspend
502 *
503 * Suspends all devices in the domain and stops the control channel.
504 */
505int tb_domain_suspend_noirq(struct tb *tb)
506{
507	int ret = 0;
508
509	/*
510	 * The control channel interrupt is left enabled during suspend
511	 * and taking the lock here prevents any events happening before
512	 * we actually have stopped the domain and the control channel.
513	 */
514	mutex_lock(&tb->lock);
515	if (tb->cm_ops->suspend_noirq)
516		ret = tb->cm_ops->suspend_noirq(tb);
517	if (!ret)
518		tb_ctl_stop(tb->ctl);
519	mutex_unlock(&tb->lock);
520
521	return ret;
522}
523
524/**
525 * tb_domain_resume_noirq() - Resume a domain
526 * @tb: Domain to resume
527 *
528 * Re-starts the control channel, and resumes all devices connected to
529 * the domain.
530 */
531int tb_domain_resume_noirq(struct tb *tb)
532{
533	int ret = 0;
534
535	mutex_lock(&tb->lock);
536	tb_ctl_start(tb->ctl);
537	if (tb->cm_ops->resume_noirq)
538		ret = tb->cm_ops->resume_noirq(tb);
539	mutex_unlock(&tb->lock);
540
541	return ret;
542}
543
544int tb_domain_suspend(struct tb *tb)
545{
546	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
547}
548
549int tb_domain_freeze_noirq(struct tb *tb)
550{
551	int ret = 0;
552
553	mutex_lock(&tb->lock);
554	if (tb->cm_ops->freeze_noirq)
555		ret = tb->cm_ops->freeze_noirq(tb);
556	if (!ret)
557		tb_ctl_stop(tb->ctl);
558	mutex_unlock(&tb->lock);
559
560	return ret;
561}
562
563int tb_domain_thaw_noirq(struct tb *tb)
564{
565	int ret = 0;
566
567	mutex_lock(&tb->lock);
568	tb_ctl_start(tb->ctl);
569	if (tb->cm_ops->thaw_noirq)
570		ret = tb->cm_ops->thaw_noirq(tb);
571	mutex_unlock(&tb->lock);
572
573	return ret;
574}
575
576void tb_domain_complete(struct tb *tb)
577{
578	if (tb->cm_ops->complete)
579		tb->cm_ops->complete(tb);
580}
581
582int tb_domain_runtime_suspend(struct tb *tb)
583{
584	if (tb->cm_ops->runtime_suspend) {
585		int ret = tb->cm_ops->runtime_suspend(tb);
586		if (ret)
587			return ret;
588	}
589	tb_ctl_stop(tb->ctl);
590	return 0;
591}
592
593int tb_domain_runtime_resume(struct tb *tb)
594{
595	tb_ctl_start(tb->ctl);
596	if (tb->cm_ops->runtime_resume) {
597		int ret = tb->cm_ops->runtime_resume(tb);
598		if (ret)
599			return ret;
600	}
601	return 0;
602}
603
604/**
605 * tb_domain_approve_switch() - Approve switch
606 * @tb: Domain the switch belongs to
607 * @sw: Switch to approve
608 *
609 * This will approve switch by connection manager specific means. In
610 * case of success the connection manager will create tunnels for all
611 * supported protocols.
612 */
613int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
614{
615	struct tb_switch *parent_sw;
616
617	if (!tb->cm_ops->approve_switch)
618		return -EPERM;
619
620	/* The parent switch must be authorized before this one */
621	parent_sw = tb_to_switch(sw->dev.parent);
622	if (!parent_sw || !parent_sw->authorized)
623		return -EINVAL;
624
625	return tb->cm_ops->approve_switch(tb, sw);
626}
627
628/**
629 * tb_domain_approve_switch_key() - Approve switch and add key
630 * @tb: Domain the switch belongs to
631 * @sw: Switch to approve
632 *
633 * For switches that support secure connect, this function first adds
634 * key to the switch NVM using connection manager specific means. If
635 * adding the key is successful, the switch is approved and connected.
636 *
637 * Return: %0 on success and negative errno in case of failure.
638 */
639int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
640{
641	struct tb_switch *parent_sw;
642	int ret;
643
644	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
645		return -EPERM;
646
647	/* The parent switch must be authorized before this one */
648	parent_sw = tb_to_switch(sw->dev.parent);
649	if (!parent_sw || !parent_sw->authorized)
650		return -EINVAL;
651
652	ret = tb->cm_ops->add_switch_key(tb, sw);
653	if (ret)
654		return ret;
655
656	return tb->cm_ops->approve_switch(tb, sw);
657}
658
659/**
660 * tb_domain_challenge_switch_key() - Challenge and approve switch
661 * @tb: Domain the switch belongs to
662 * @sw: Switch to approve
663 *
664 * For switches that support secure connect, this function generates
665 * random challenge and sends it to the switch. The switch responds to
666 * this and if the response matches our random challenge, the switch is
667 * approved and connected.
668 *
669 * Return: %0 on success and negative errno in case of failure.
670 */
671int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
672{
673	u8 challenge[TB_SWITCH_KEY_SIZE];
674	u8 response[TB_SWITCH_KEY_SIZE];
675	u8 hmac[TB_SWITCH_KEY_SIZE];
676	struct tb_switch *parent_sw;
677	struct crypto_shash *tfm;
678	struct shash_desc *shash;
679	int ret;
680
681	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
682		return -EPERM;
683
684	/* The parent switch must be authorized before this one */
685	parent_sw = tb_to_switch(sw->dev.parent);
686	if (!parent_sw || !parent_sw->authorized)
687		return -EINVAL;
688
689	get_random_bytes(challenge, sizeof(challenge));
690	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
691	if (ret)
692		return ret;
693
694	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
695	if (IS_ERR(tfm))
696		return PTR_ERR(tfm);
697
698	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
699	if (ret)
700		goto err_free_tfm;
701
702	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
703			GFP_KERNEL);
704	if (!shash) {
705		ret = -ENOMEM;
706		goto err_free_tfm;
707	}
708
709	shash->tfm = tfm;
710
711	memset(hmac, 0, sizeof(hmac));
712	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
713	if (ret)
714		goto err_free_shash;
715
716	/* The returned HMAC must match the one we calculated */
717	if (memcmp(response, hmac, sizeof(hmac))) {
718		ret = -EKEYREJECTED;
719		goto err_free_shash;
720	}
721
722	crypto_free_shash(tfm);
723	kfree(shash);
724
725	return tb->cm_ops->approve_switch(tb, sw);
726
727err_free_shash:
728	kfree(shash);
729err_free_tfm:
730	crypto_free_shash(tfm);
731
732	return ret;
733}
734
735/**
736 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
737 * @tb: Domain whose PCIe paths to disconnect
738 *
739 * This needs to be called in preparation for NVM upgrade of the host
740 * controller. Makes sure all PCIe paths are disconnected.
741 *
742 * Return %0 on success and negative errno in case of error.
743 */
744int tb_domain_disconnect_pcie_paths(struct tb *tb)
745{
746	if (!tb->cm_ops->disconnect_pcie_paths)
747		return -EPERM;
748
749	return tb->cm_ops->disconnect_pcie_paths(tb);
750}
751
752/**
753 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
754 * @tb: Domain enabling the DMA paths
755 * @xd: XDomain DMA paths are created to
756 *
757 * Calls connection manager specific method to enable DMA paths to the
758 * XDomain in question.
759 *
760 * Return: 0% in case of success and negative errno otherwise. In
761 * particular returns %-ENOTSUPP if the connection manager
762 * implementation does not support XDomains.
763 */
764int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
765{
766	if (!tb->cm_ops->approve_xdomain_paths)
767		return -ENOTSUPP;
768
769	return tb->cm_ops->approve_xdomain_paths(tb, xd);
770}
771
772/**
773 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
774 * @tb: Domain disabling the DMA paths
775 * @xd: XDomain whose DMA paths are disconnected
776 *
777 * Calls connection manager specific method to disconnect DMA paths to
778 * the XDomain in question.
779 *
780 * Return: 0% in case of success and negative errno otherwise. In
781 * particular returns %-ENOTSUPP if the connection manager
782 * implementation does not support XDomains.
783 */
784int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
785{
786	if (!tb->cm_ops->disconnect_xdomain_paths)
787		return -ENOTSUPP;
788
789	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
790}
791
792static int disconnect_xdomain(struct device *dev, void *data)
793{
794	struct tb_xdomain *xd;
795	struct tb *tb = data;
796	int ret = 0;
797
798	xd = tb_to_xdomain(dev);
799	if (xd && xd->tb == tb)
800		ret = tb_xdomain_disable_paths(xd);
801
802	return ret;
803}
804
805/**
806 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
807 * @tb: Domain whose paths are disconnected
808 *
809 * This function can be used to disconnect all paths (PCIe, XDomain) for
810 * example in preparation for host NVM firmware upgrade. After this is
811 * called the paths cannot be established without resetting the switch.
812 *
813 * Return: %0 in case of success and negative errno otherwise.
814 */
815int tb_domain_disconnect_all_paths(struct tb *tb)
816{
817	int ret;
818
819	ret = tb_domain_disconnect_pcie_paths(tb);
820	if (ret)
821		return ret;
822
823	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
824}
825
826int tb_domain_init(void)
827{
828	int ret;
829
830	tb_test_init();
831
832	tb_debugfs_init();
833	ret = tb_xdomain_init();
834	if (ret)
835		goto err_debugfs;
836	ret = bus_register(&tb_bus_type);
837	if (ret)
838		goto err_xdomain;
839
840	return 0;
841
842err_xdomain:
843	tb_xdomain_exit();
844err_debugfs:
845	tb_debugfs_exit();
846	tb_test_exit();
847
848	return ret;
849}
850
851void tb_domain_exit(void)
852{
853	bus_unregister(&tb_bus_type);
854	ida_destroy(&tb_domain_ida);
855	tb_nvm_exit();
856	tb_xdomain_exit();
857	tb_debugfs_exit();
858	tb_test_exit();
859}
860