Lines Matching defs:port
40 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
42 return netif_is_team_port(dev) ? port : NULL;
46 * Since the ability to change device address for open port device is tested in
59 static int team_port_set_orig_dev_addr(struct team_port *port)
61 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
65 struct team_port *port)
67 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
70 int team_modeop_port_enter(struct team *team, struct team_port *port)
72 return team_port_set_team_dev_addr(team, port);
77 struct team_port *port)
79 team_port_set_team_dev_addr(team, port);
83 static void team_lower_state_changed(struct team_port *port)
87 info.link_up = port->linkup;
88 info.tx_enabled = team_port_enabled(port);
89 netdev_lower_state_changed(port->dev, &info);
92 static void team_refresh_port_linkup(struct team_port *port)
94 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
95 port->state.linkup;
97 if (port->linkup != new_linkup) {
98 port->linkup = new_linkup;
99 team_lower_state_changed(port);
147 struct team_port *port)
162 opt_inst->info.port = port;
205 struct team_port *port)
211 opt_inst->info.port == port)
217 struct team_port *port)
225 err = __team_option_inst_add(team, option, port);
232 __team_option_inst_del_port(team, port);
237 struct team_port *port)
242 if (opt_inst->info.port == port) {
506 struct team_port *port,
548 * We can benefit from the fact that it's ensured no port is present
724 struct team_port *port;
734 port = team_port_get_rcu(skb->dev);
735 team = port->team;
736 if (!team_port_enabled(port)) {
745 res = team->ops.receive(team, port, skb);
770 * Multiqueue Tx port select override
807 struct team_port *port;
812 list_for_each_entry_rcu(port, qom_list, qom_list) {
813 if (!team_dev_queue_xmit(team, port, skb))
820 struct team_port *port)
822 if (!port->queue_id)
824 list_del_rcu(&port->qom_list);
827 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
830 if (port->priority < cur->priority)
832 if (port->priority > cur->priority)
834 if (port->index < cur->index)
840 struct team_port *port)
846 if (!port->queue_id)
848 qom_list = __team_get_qom_list(team, port->queue_id);
851 if (team_queue_override_port_has_gt_prio_than(port, cur))
855 list_add_tail_rcu(&port->qom_list, node);
860 struct team_port *port;
863 list_for_each_entry(port, &team->port_list, list) {
864 if (port->queue_id) {
877 struct team_port *port)
879 if (!port->queue_id || team_port_enabled(port))
881 __team_queue_override_port_del(team, port);
882 __team_queue_override_port_add(team, port);
887 struct team_port *port,
890 if (team_port_enabled(port)) {
891 __team_queue_override_port_del(team, port);
892 port->queue_id = new_queue_id;
893 __team_queue_override_port_add(team, port);
896 port->queue_id = new_queue_id;
901 struct team_port *port)
903 __team_queue_override_port_add(team, port);
908 struct team_port *port)
910 __team_queue_override_port_del(team, port);
920 const struct team_port *port)
925 if (cur == port)
931 * Enable/disable port by adding to enabled port hashlist and setting
932 * port->index (Might be racy so reader could see incorrect ifindex when
937 struct team_port *port)
939 if (team_port_enabled(port))
941 port->index = team->en_port_count++;
942 hlist_add_head_rcu(&port->hlist,
943 team_port_index_hash(team, port->index));
945 team_queue_override_port_add(team, port);
947 team->ops.port_enabled(team, port);
950 team_lower_state_changed(port);
956 struct team_port *port;
959 port = team_get_port_by_index(team, i);
960 hlist_del_rcu(&port->hlist);
961 port->index--;
962 hlist_add_head_rcu(&port->hlist,
963 team_port_index_hash(team, port->index));
968 struct team_port *port)
970 if (!team_port_enabled(port))
973 team->ops.port_disabled(team, port);
974 hlist_del_rcu(&port->hlist);
975 __reconstruct_port_hlist(team, port->index);
976 port->index = -1;
978 team_queue_override_port_del(team, port);
980 team_lower_state_changed(port);
992 struct team_port *port;
1001 list_for_each_entry_rcu(port, &team->port_list, list) {
1003 port->dev->vlan_features,
1007 port->dev->hw_enc_features,
1011 dst_release_flag &= port->dev->priv_flags;
1012 if (port->dev->hard_header_len > max_hard_header_len)
1013 max_hard_header_len = port->dev->hard_header_len;
1034 static int team_port_enter(struct team *team, struct team_port *port)
1040 err = team->ops.port_enter(team, port);
1043 port->dev->name);
1056 static void team_port_leave(struct team *team, struct team_port *port)
1059 team->ops.port_leave(team, port);
1064 static int __team_port_enable_netpoll(struct team_port *port)
1073 err = __netpoll_setup(np, port->dev);
1078 port->np = np;
1082 static int team_port_enable_netpoll(struct team_port *port)
1084 if (!port->team->dev->npinfo)
1087 return __team_port_enable_netpoll(port);
1090 static void team_port_disable_netpoll(struct team_port *port)
1092 struct netpoll *np = port->np;
1096 port->np = NULL;
1101 static int team_port_enable_netpoll(struct team_port *port)
1105 static void team_port_disable_netpoll(struct team_port *port)
1110 static int team_upper_dev_link(struct team *team, struct team_port *port,
1118 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1122 port->dev->priv_flags |= IFF_TEAM_PORT;
1126 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1128 netdev_upper_dev_unlink(port->dev, team->dev);
1129 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1132 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1140 struct team_port *port;
1145 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1146 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1152 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1153 netdev_err(dev, "Device %s is already a port "
1184 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1185 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1190 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1192 if (!port)
1195 port->dev = port_dev;
1196 port->team = team;
1197 INIT_LIST_HEAD(&port->qom_list);
1199 port->orig.mtu = port_dev->mtu;
1206 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1208 err = team_port_enter(team, port);
1229 err = team_port_enable_netpoll(port);
1240 port);
1247 err = team_upper_dev_link(team, port, extack);
1254 err = __team_option_inst_add_port(team, port);
1256 netdev_err(dev, "Device %s failed to add per-port options\n",
1285 port->index = -1;
1286 list_add_tail_rcu(&port->list, &team->port_list);
1287 team_port_enable(team, port);
1289 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1297 __team_option_inst_del_port(team, port);
1300 team_upper_dev_unlink(team, port);
1306 team_port_disable_netpoll(port);
1315 team_port_leave(team, port);
1316 team_port_set_orig_dev_addr(port);
1319 dev_set_mtu(port_dev, port->orig.mtu);
1322 kfree(port);
1327 static void __team_port_change_port_removed(struct team_port *port);
1332 struct team_port *port;
1335 port = team_port_get_rtnl(port_dev);
1336 if (!port || !team_port_find(team, port)) {
1337 netdev_err(dev, "Device %s does not act as a port of this team\n",
1342 team_port_disable(team, port);
1343 list_del_rcu(&port->list);
1350 team_upper_dev_unlink(team, port);
1352 team_port_disable_netpoll(port);
1359 team_port_leave(team, port);
1361 __team_option_inst_mark_removed_port(team, port);
1363 __team_option_inst_del_port(team, port);
1364 __team_port_change_port_removed(port);
1366 team_port_set_orig_dev_addr(port);
1367 dev_set_mtu(port_dev, port->orig.mtu);
1368 kfree_rcu(port, rcu);
1445 struct team_port *port = ctx->info->port;
1447 ctx->data.bool_val = team_port_enabled(port);
1453 struct team_port *port = ctx->info->port;
1456 team_port_enable(team, port);
1458 team_port_disable(team, port);
1465 struct team_port *port = ctx->info->port;
1467 ctx->data.bool_val = port->user.linkup;
1475 struct team_port *port = ctx->info->port;
1477 port->user.linkup = ctx->data.bool_val;
1478 team_refresh_port_linkup(port);
1479 __team_carrier_check(port->team);
1486 struct team_port *port = ctx->info->port;
1488 ctx->data.bool_val = port->user.linkup_enabled;
1494 struct team_port *port = ctx->info->port;
1496 port->user.linkup_enabled = ctx->data.bool_val;
1497 team_refresh_port_linkup(port);
1498 __team_carrier_check(port->team);
1505 struct team_port *port = ctx->info->port;
1507 ctx->data.s32_val = port->priority;
1513 struct team_port *port = ctx->info->port;
1516 if (port->priority == priority)
1518 port->priority = priority;
1519 team_queue_override_port_prio_changed(team, port);
1526 struct team_port *port = ctx->info->port;
1528 ctx->data.u32_val = port->queue_id;
1534 struct team_port *port = ctx->info->port;
1537 if (port->queue_id == new_queue_id)
1541 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1667 struct team_port *port;
1671 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1672 team_port_del(team, port->dev);
1699 struct team_port *port;
1701 list_for_each_entry(port, &team->port_list, list) {
1702 dev_uc_unsync(port->dev, dev);
1703 dev_mc_unsync(port->dev, dev);
1763 struct team_port *port;
1767 list_for_each_entry_rcu(port, &team->port_list, list) {
1770 dev_set_promiscuity(port->dev, inc);
1774 dev_set_allmulti(port->dev, inc);
1783 struct team_port *port;
1786 list_for_each_entry_rcu(port, &team->port_list, list) {
1787 dev_uc_sync_multiple(port->dev, dev);
1788 dev_mc_sync_multiple(port->dev, dev);
1797 struct team_port *port;
1803 list_for_each_entry(port, &team->port_list, list)
1805 team->ops.port_change_dev_addr(team, port);
1813 struct team_port *port;
1822 list_for_each_entry(port, &team->port_list, list) {
1823 err = dev_set_mtu(port->dev, new_mtu);
1826 port->dev->name);
1838 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1839 dev_set_mtu(port->dev, dev->mtu);
1888 struct team_port *port;
1896 list_for_each_entry(port, &team->port_list, list) {
1897 err = vlan_vid_add(port->dev, proto, vid);
1906 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1907 vlan_vid_del(port->dev, proto, vid);
1916 struct team_port *port;
1919 list_for_each_entry(port, &team->port_list, list)
1920 vlan_vid_del(port->dev, proto, vid);
1933 struct team_port *port;
1935 list_for_each_entry(port, &team->port_list, list)
1936 team_port_disable_netpoll(port);
1952 struct team_port *port;
1956 list_for_each_entry(port, &team->port_list, list) {
1957 err = __team_port_enable_netpoll(port);
2009 struct team_port *port;
2018 list_for_each_entry_rcu(port, &team->port_list, list) {
2020 port->dev->features,
2085 struct team_port *port;
2088 cmd->base.port = PORT_OTHER;
2091 list_for_each_entry_rcu(port, &team->port_list, list) {
2092 if (team_port_txable(port)) {
2093 if (port->state.speed != SPEED_UNKNOWN)
2094 speed += port->state.speed;
2096 port->state.duplex != DUPLEX_UNKNOWN)
2097 cmd->base.duplex = port->state.duplex;
2367 if (opt_inst_info->port &&
2369 opt_inst_info->port->dev->ifindex))
2570 int opt_port_ifindex = 0; /* != 0 for per-port options */
2637 tmp_ifindex = opt_inst_info->port ?
2638 opt_inst_info->port->dev->ifindex : 0;
2695 struct team_port *port)
2702 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2704 if (port->changed) {
2707 port->changed = false;
2709 if ((port->removed &&
2711 (port->state.linkup &&
2713 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2714 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2731 struct team_port *port;
2737 port = list_first_entry_or_null(&team->port_list,
2761 /* If one port is selected, called wants to send port list containing
2762 * only this port. Otherwise go through all listed ports and send all
2768 } else if (port) {
2769 list_for_each_entry_from(port, &team->port_list, list) {
2770 err = team_nl_fill_one_port_get(skb, port);
2884 struct team_port *port)
2887 port);
2923 static void __team_port_change_send(struct team_port *port, bool linkup)
2927 port->changed = true;
2928 port->state.linkup = linkup;
2929 team_refresh_port_linkup(port);
2933 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2935 port->state.speed = ecmd.base.speed;
2936 port->state.duplex = ecmd.base.duplex;
2940 port->state.speed = 0;
2941 port->state.duplex = 0;
2944 err = team_nl_send_event_port_get(port->team, port);
2946 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2947 port->dev->name, err);
2953 struct team_port *port;
2960 list_for_each_entry(port, &team->port_list, list) {
2961 if (port->linkup) {
2973 static void __team_port_change_check(struct team_port *port, bool linkup)
2975 if (port->state.linkup != linkup)
2976 __team_port_change_send(port, linkup);
2977 __team_carrier_check(port->team);
2980 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2982 __team_port_change_send(port, linkup);
2983 __team_carrier_check(port->team);
2986 static void __team_port_change_port_removed(struct team_port *port)
2988 port->removed = true;
2989 __team_port_change_send(port, false);
2990 __team_carrier_check(port->team);
2993 static void team_port_change_check(struct team_port *port, bool linkup)
2995 struct team *team = port->team;
2998 __team_port_change_check(port, linkup);
3011 struct team_port *port;
3013 port = team_port_get_rtnl(dev);
3014 if (!port)
3020 team_port_change_check(port, true);
3023 team_port_change_check(port, false);
3026 if (netif_running(port->dev))
3027 team_port_change_check(port,
3028 !!netif_oper_up(port->dev));
3031 team_del_slave(port->team->dev, dev);
3034 if (!port->team->notifier_ctx) {
3035 port->team->notifier_ctx = true;
3036 team_compute_features(port->team);
3037 port->team->notifier_ctx = false;
3042 if (!port->team->port_mtu_change_allowed)
3050 call_netdevice_notifiers(event, port->team->dev);