Lines Matching refs:tb
17 #include "tb.h"
123 struct tb *tb = container_of(dev, struct tb, dev);
128 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 pm_runtime_get_sync(&tb->dev);
134 if (mutex_lock_interruptible(&tb->lock)) {
138 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
140 mutex_unlock(&tb->lock);
143 mutex_unlock(&tb->lock);
145 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
149 ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
153 pm_runtime_mark_last_busy(&tb->dev);
154 pm_runtime_put_autosuspend(&tb->dev);
163 struct tb *tb = container_of(dev, struct tb, dev);
170 * Make sure the value is not bigger than tb->nboot_acl * UUID
172 * string is tb->nboot_acl * ",".
174 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
176 if (count < tb->nboot_acl - 1)
183 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
190 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
206 if (s || i < tb->nboot_acl) {
211 pm_runtime_get_sync(&tb->dev);
213 if (mutex_lock_interruptible(&tb->lock)) {
217 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
220 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
222 mutex_unlock(&tb->lock);
225 pm_runtime_mark_last_busy(&tb->dev);
226 pm_runtime_put_autosuspend(&tb->dev);
240 const struct tb *tb = container_of(dev, struct tb, dev);
244 if (tb->security_level == TB_SECURITY_USER ||
245 tb->security_level == TB_SECURITY_SECURE)
246 deauthorization = !!tb->cm_ops->disapprove_switch;
256 struct tb *tb = container_of(dev, struct tb, dev);
258 return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
265 struct tb *tb = container_of(dev, struct tb, dev);
268 if (tb->security_level < ARRAY_SIZE(tb_security_names))
269 name = tb_security_names[tb->security_level];
287 struct tb *tb = container_of(dev, struct tb, dev);
290 if (tb->nboot_acl &&
291 tb->cm_ops->get_boot_acl &&
292 tb->cm_ops->set_boot_acl)
320 struct tb *tb = container_of(dev, struct tb, dev);
322 tb_ctl_free(tb->ctl);
323 destroy_workqueue(tb->wq);
324 ida_simple_remove(&tb_domain_ida, tb->index);
325 mutex_destroy(&tb->lock);
326 kfree(tb);
337 struct tb *tb = data;
339 if (!tb->cm_ops->handle_event) {
340 tb_warn(tb, "domain does not have event handler\n");
348 return tb_xdomain_handle_request(tb, type, buf, size);
352 tb->cm_ops->handle_event(tb, type, buf, size);
373 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
375 struct tb *tb;
385 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
386 if (!tb)
389 tb->nhi = nhi;
390 mutex_init(&tb->lock);
392 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
393 if (tb->index < 0)
396 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
397 if (!tb->wq)
400 tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
401 if (!tb->ctl)
404 tb->dev.parent = &nhi->pdev->dev;
405 tb->dev.bus = &tb_bus_type;
406 tb->dev.type = &tb_domain_type;
407 tb->dev.groups = domain_attr_groups;
408 dev_set_name(&tb->dev, "domain%d", tb->index);
409 device_initialize(&tb->dev);
411 return tb;
414 destroy_workqueue(tb->wq);
416 ida_simple_remove(&tb_domain_ida, tb->index);
418 kfree(tb);
425 * @tb: Domain to add
434 int tb_domain_add(struct tb *tb)
438 if (WARN_ON(!tb->cm_ops))
441 mutex_lock(&tb->lock);
446 tb_ctl_start(tb->ctl);
448 if (tb->cm_ops->driver_ready) {
449 ret = tb->cm_ops->driver_ready(tb);
454 tb_dbg(tb, "security level set to %s\n",
455 tb_security_names[tb->security_level]);
457 ret = device_add(&tb->dev);
462 if (tb->cm_ops->start) {
463 ret = tb->cm_ops->start(tb);
469 mutex_unlock(&tb->lock);
471 device_init_wakeup(&tb->dev, true);
473 pm_runtime_no_callbacks(&tb->dev);
474 pm_runtime_set_active(&tb->dev);
475 pm_runtime_enable(&tb->dev);
476 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
477 pm_runtime_mark_last_busy(&tb->dev);
478 pm_runtime_use_autosuspend(&tb->dev);
483 device_del(&tb->dev);
485 tb_ctl_stop(tb->ctl);
486 mutex_unlock(&tb->lock);
493 * @tb: Domain to remove
498 void tb_domain_remove(struct tb *tb)
500 mutex_lock(&tb->lock);
501 if (tb->cm_ops->stop)
502 tb->cm_ops->stop(tb);
504 tb_ctl_stop(tb->ctl);
505 mutex_unlock(&tb->lock);
507 flush_workqueue(tb->wq);
508 device_unregister(&tb->dev);
513 * @tb: Domain to suspend
517 int tb_domain_suspend_noirq(struct tb *tb)
526 mutex_lock(&tb->lock);
527 if (tb->cm_ops->suspend_noirq)
528 ret = tb->cm_ops->suspend_noirq(tb);
530 tb_ctl_stop(tb->ctl);
531 mutex_unlock(&tb->lock);
538 * @tb: Domain to resume
543 int tb_domain_resume_noirq(struct tb *tb)
547 mutex_lock(&tb->lock);
548 tb_ctl_start(tb->ctl);
549 if (tb->cm_ops->resume_noirq)
550 ret = tb->cm_ops->resume_noirq(tb);
551 mutex_unlock(&tb->lock);
556 int tb_domain_suspend(struct tb *tb)
558 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
561 int tb_domain_freeze_noirq(struct tb *tb)
565 mutex_lock(&tb->lock);
566 if (tb->cm_ops->freeze_noirq)
567 ret = tb->cm_ops->freeze_noirq(tb);
569 tb_ctl_stop(tb->ctl);
570 mutex_unlock(&tb->lock);
575 int tb_domain_thaw_noirq(struct tb *tb)
579 mutex_lock(&tb->lock);
580 tb_ctl_start(tb->ctl);
581 if (tb->cm_ops->thaw_noirq)
582 ret = tb->cm_ops->thaw_noirq(tb);
583 mutex_unlock(&tb->lock);
588 void tb_domain_complete(struct tb *tb)
590 if (tb->cm_ops->complete)
591 tb->cm_ops->complete(tb);
594 int tb_domain_runtime_suspend(struct tb *tb)
596 if (tb->cm_ops->runtime_suspend) {
597 int ret = tb->cm_ops->runtime_suspend(tb);
601 tb_ctl_stop(tb->ctl);
605 int tb_domain_runtime_resume(struct tb *tb)
607 tb_ctl_start(tb->ctl);
608 if (tb->cm_ops->runtime_resume) {
609 int ret = tb->cm_ops->runtime_resume(tb);
618 * @tb: Domain the switch belongs to
625 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
627 if (!tb->cm_ops->disapprove_switch)
630 return tb->cm_ops->disapprove_switch(tb, sw);
635 * @tb: Domain the switch belongs to
642 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
646 if (!tb->cm_ops->approve_switch)
654 return tb->cm_ops->approve_switch(tb, sw);
659 * @tb: Domain the switch belongs to
668 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
673 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
681 ret = tb->cm_ops->add_switch_key(tb, sw);
685 return tb->cm_ops->approve_switch(tb, sw);
690 * @tb: Domain the switch belongs to
700 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
710 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
719 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
754 return tb->cm_ops->approve_switch(tb, sw);
766 * @tb: Domain whose PCIe paths to disconnect
773 int tb_domain_disconnect_pcie_paths(struct tb *tb)
775 if (!tb->cm_ops->disconnect_pcie_paths)
778 return tb->cm_ops->disconnect_pcie_paths(tb);
783 * @tb: Domain enabling the DMA paths
797 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
801 if (!tb->cm_ops->approve_xdomain_paths)
804 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
810 * @tb: Domain disabling the DMA paths
824 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
828 if (!tb->cm_ops->disconnect_xdomain_paths)
831 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
838 struct tb *tb = data;
842 if (xd && xd->tb == tb)
850 * @tb: Domain whose paths are disconnected
858 int tb_domain_disconnect_all_paths(struct tb *tb)
862 ret = tb_domain_disconnect_pcie_paths(tb);
866 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);