Lines Matching refs:tcm
42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
44 return ((void *)tcm - sizeof(struct tb));
55 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
60 struct tb_bandwidth_group *group = &tcm->groups[i];
62 group->tb = tcm_to_tb(tcm);
80 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
85 struct tb_bandwidth_group *group = &tcm->groups[i];
95 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
122 group = tb_find_free_bandwidth_group(tcm);
131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
139 if (tcm->groups[i].index == index) {
140 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
146 tb_attach_bandwidth_group(tcm, in, out);
183 struct tb_cm *tcm = tb_priv(sw->tb);
193 list_add_tail(&port->list, &tcm->dp_resources);
200 struct tb_cm *tcm = tb_priv(sw->tb);
209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
219 struct tb_cm *tcm = tb_priv(tb);
222 list_for_each_entry(p, &tcm->dp_resources, list) {
229 list_add_tail(&port->list, &tcm->dp_resources);
234 struct tb_cm *tcm = tb_priv(tb);
237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
246 struct tb_cm *tcm = tb_priv(sw->tb);
271 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
426 struct tb_cm *tcm = tb_priv(tb);
429 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
431 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
447 tb_discover_bandwidth_group(tcm, in, out);
534 struct tb_cm *tcm = tb_priv(tb);
537 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
579 struct tb_cm *tcm = tb_priv(tb);
658 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
756 struct tb_cm *tcm = tb_priv(tb);
819 list_add_tail(&tunnel->list, &tcm->tunnel_list);
882 struct tb_cm *tcm = tb_priv(port->sw->tb);
951 if (!tcm->hotplug_active) {
1007 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1070 struct tb_cm *tcm = tb_priv(tb);
1074 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1233 struct tb_cm *tcm = tb_priv(tb);
1238 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1239 struct tb_bandwidth_group *group = &tcm->groups[i];
1251 struct tb_cm *tcm = tb_priv(tb);
1256 list_for_each_entry(port, &tcm->dp_resources, list) {
1288 struct tb_cm *tcm = tb_priv(tb);
1305 list_for_each_entry(port, &tcm->dp_resources, list) {
1339 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1362 if (!tb_attach_bandwidth_group(tcm, in, out))
1391 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1448 struct tb_cm *tcm = tb_priv(tb);
1454 list_for_each_entry(p, &tcm->dp_resources, list) {
1461 list_add_tail(&port->list, &tcm->dp_resources);
1469 struct tb_cm *tcm = tb_priv(tb);
1476 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1481 while (!list_empty(&tcm->dp_resources)) {
1484 port = list_first_entry(&tcm->dp_resources,
1514 struct tb_cm *tcm = tb_priv(tb);
1551 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1559 struct tb_cm *tcm = tb_priv(tb);
1591 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1608 struct tb_cm *tcm = tb_priv(tb);
1617 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1661 struct tb_cm *tcm = tb_priv(tb);
1669 if (!tcm->hotplug_active)
1891 struct tb_cm *tcm = tb_priv(tb);
1897 if (!tcm->hotplug_active)
2046 struct tb_cm *tcm = tb_priv(tb);
2050 cancel_delayed_work(&tcm->remove_work);
2052 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2063 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2089 struct tb_cm *tcm = tb_priv(tb);
2146 tcm->hotplug_active = true;
2152 struct tb_cm *tcm = tb_priv(tb);
2157 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2196 struct tb_cm *tcm = tb_priv(tb);
2226 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2235 if (!list_empty(&tcm->tunnel_list)) {
2244 tcm->hotplug_active = true;
2274 struct tb_cm *tcm = tb_priv(tb);
2276 tcm->hotplug_active = false;
2282 struct tb_cm *tcm = tb_priv(tb);
2284 tcm->hotplug_active = true;
2303 struct tb_cm *tcm = tb_priv(tb);
2307 tcm->hotplug_active = false;
2315 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2316 struct tb *tb = tcm_to_tb(tcm);
2328 struct tb_cm *tcm = tb_priv(tb);
2335 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2337 tcm->hotplug_active = true;
2345 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2436 struct tb_cm *tcm;
2439 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2450 tcm = tb_priv(tb);
2451 INIT_LIST_HEAD(&tcm->tunnel_list);
2452 INIT_LIST_HEAD(&tcm->dp_resources);
2453 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2454 tb_init_bandwidth_groups(tcm);