Lines Matching refs:tb
19 #include "tb.h"
126 struct tb *tb = container_of(dev, struct tb, dev);
131 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
135 pm_runtime_get_sync(&tb->dev);
137 if (mutex_lock_interruptible(&tb->lock)) {
141 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
143 mutex_unlock(&tb->lock);
146 mutex_unlock(&tb->lock);
148 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
154 i < tb->nboot_acl - 1 ? "," : "\n");
158 pm_runtime_mark_last_busy(&tb->dev);
159 pm_runtime_put_autosuspend(&tb->dev);
168 struct tb *tb = container_of(dev, struct tb, dev);
175 * Make sure the value is not bigger than tb->nboot_acl * UUID
177 * string is tb->nboot_acl * ",".
179 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
181 if (count < tb->nboot_acl - 1)
188 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
195 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
211 if (s || i < tb->nboot_acl) {
216 pm_runtime_get_sync(&tb->dev);
218 if (mutex_lock_interruptible(&tb->lock)) {
222 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
225 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
227 mutex_unlock(&tb->lock);
230 pm_runtime_mark_last_busy(&tb->dev);
231 pm_runtime_put_autosuspend(&tb->dev);
258 struct tb *tb = container_of(dev, struct tb, dev);
261 if (tb->security_level < ARRAY_SIZE(tb_security_names))
262 name = tb_security_names[tb->security_level];
279 struct tb *tb = container_of(dev, struct tb, dev);
282 if (tb->nboot_acl &&
283 tb->cm_ops->get_boot_acl &&
284 tb->cm_ops->set_boot_acl)
312 struct tb *tb = container_of(dev, struct tb, dev);
314 tb_ctl_free(tb->ctl);
315 destroy_workqueue(tb->wq);
316 ida_simple_remove(&tb_domain_ida, tb->index);
317 mutex_destroy(&tb->lock);
318 kfree(tb);
340 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
342 struct tb *tb;
352 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
353 if (!tb)
356 tb->nhi = nhi;
357 mutex_init(&tb->lock);
359 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
360 if (tb->index < 0)
363 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
364 if (!tb->wq)
367 tb->dev.parent = &nhi->pdev->dev;
368 tb->dev.bus = &tb_bus_type;
369 tb->dev.type = &tb_domain_type;
370 tb->dev.groups = domain_attr_groups;
371 dev_set_name(&tb->dev, "domain%d", tb->index);
372 device_initialize(&tb->dev);
374 return tb;
377 ida_simple_remove(&tb_domain_ida, tb->index);
379 kfree(tb);
387 struct tb *tb = data;
389 if (!tb->cm_ops->handle_event) {
390 tb_warn(tb, "domain does not have event handler\n");
397 return tb_xdomain_handle_request(tb, type, buf, size);
400 tb->cm_ops->handle_event(tb, type, buf, size);
408 * @tb: Domain to add
417 int tb_domain_add(struct tb *tb)
421 if (WARN_ON(!tb->cm_ops))
424 mutex_lock(&tb->lock);
426 tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
427 if (!tb->ctl) {
436 tb_ctl_start(tb->ctl);
438 if (tb->cm_ops->driver_ready) {
439 ret = tb->cm_ops->driver_ready(tb);
444 ret = device_add(&tb->dev);
449 if (tb->cm_ops->start) {
450 ret = tb->cm_ops->start(tb);
456 mutex_unlock(&tb->lock);
458 device_init_wakeup(&tb->dev, true);
460 pm_runtime_no_callbacks(&tb->dev);
461 pm_runtime_set_active(&tb->dev);
462 pm_runtime_enable(&tb->dev);
463 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
464 pm_runtime_mark_last_busy(&tb->dev);
465 pm_runtime_use_autosuspend(&tb->dev);
470 device_del(&tb->dev);
472 tb_ctl_stop(tb->ctl);
474 mutex_unlock(&tb->lock);
481 * @tb: Domain to remove
486 void tb_domain_remove(struct tb *tb)
488 mutex_lock(&tb->lock);
489 if (tb->cm_ops->stop)
490 tb->cm_ops->stop(tb);
492 tb_ctl_stop(tb->ctl);
493 mutex_unlock(&tb->lock);
495 flush_workqueue(tb->wq);
496 device_unregister(&tb->dev);
501 * @tb: Domain to suspend
505 int tb_domain_suspend_noirq(struct tb *tb)
514 mutex_lock(&tb->lock);
515 if (tb->cm_ops->suspend_noirq)
516 ret = tb->cm_ops->suspend_noirq(tb);
518 tb_ctl_stop(tb->ctl);
519 mutex_unlock(&tb->lock);
526 * @tb: Domain to resume
531 int tb_domain_resume_noirq(struct tb *tb)
535 mutex_lock(&tb->lock);
536 tb_ctl_start(tb->ctl);
537 if (tb->cm_ops->resume_noirq)
538 ret = tb->cm_ops->resume_noirq(tb);
539 mutex_unlock(&tb->lock);
544 int tb_domain_suspend(struct tb *tb)
546 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
549 int tb_domain_freeze_noirq(struct tb *tb)
553 mutex_lock(&tb->lock);
554 if (tb->cm_ops->freeze_noirq)
555 ret = tb->cm_ops->freeze_noirq(tb);
557 tb_ctl_stop(tb->ctl);
558 mutex_unlock(&tb->lock);
563 int tb_domain_thaw_noirq(struct tb *tb)
567 mutex_lock(&tb->lock);
568 tb_ctl_start(tb->ctl);
569 if (tb->cm_ops->thaw_noirq)
570 ret = tb->cm_ops->thaw_noirq(tb);
571 mutex_unlock(&tb->lock);
576 void tb_domain_complete(struct tb *tb)
578 if (tb->cm_ops->complete)
579 tb->cm_ops->complete(tb);
582 int tb_domain_runtime_suspend(struct tb *tb)
584 if (tb->cm_ops->runtime_suspend) {
585 int ret = tb->cm_ops->runtime_suspend(tb);
589 tb_ctl_stop(tb->ctl);
593 int tb_domain_runtime_resume(struct tb *tb)
595 tb_ctl_start(tb->ctl);
596 if (tb->cm_ops->runtime_resume) {
597 int ret = tb->cm_ops->runtime_resume(tb);
606 * @tb: Domain the switch belongs to
613 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
617 if (!tb->cm_ops->approve_switch)
625 return tb->cm_ops->approve_switch(tb, sw);
630 * @tb: Domain the switch belongs to
639 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
644 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
652 ret = tb->cm_ops->add_switch_key(tb, sw);
656 return tb->cm_ops->approve_switch(tb, sw);
661 * @tb: Domain the switch belongs to
671 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
681 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
690 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
725 return tb->cm_ops->approve_switch(tb, sw);
737 * @tb: Domain whose PCIe paths to disconnect
744 int tb_domain_disconnect_pcie_paths(struct tb *tb)
746 if (!tb->cm_ops->disconnect_pcie_paths)
749 return tb->cm_ops->disconnect_pcie_paths(tb);
754 * @tb: Domain enabling the DMA paths
764 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
766 if (!tb->cm_ops->approve_xdomain_paths)
769 return tb->cm_ops->approve_xdomain_paths(tb, xd);
774 * @tb: Domain disabling the DMA paths
784 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
786 if (!tb->cm_ops->disconnect_xdomain_paths)
789 return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
795 struct tb *tb = data;
799 if (xd && xd->tb == tb)
807 * @tb: Domain whose paths are disconnected
815 int tb_domain_disconnect_all_paths(struct tb *tb)
819 ret = tb_domain_disconnect_pcie_paths(tb);
823 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);