Lines Matching defs:dev
5 * Derived from the non IP parts of dev.c 1.0.19
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
66 * indefinitely on dev->refcnt
157 #include "dev.h"
166 struct net_device *dev,
254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
263 name_node->dev = dev;
269 netdev_name_node_head_alloc(struct net_device *dev)
273 name_node = netdev_name_node_alloc(dev, dev->name);
327 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
330 struct net *net = dev_net(dev);
335 name_node = netdev_name_node_alloc(dev, name);
339 /* The node that holds dev->name acts as a head of per-device list. */
340 list_add_tail(&name_node->list, &dev->name_node->list);
352 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
355 struct net *net = dev_net(dev);
363 if (name_node == dev->name_node || name_node->dev != dev)
373 static void netdev_name_node_alt_flush(struct net_device *dev)
377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
382 static void list_netdevice(struct net_device *dev)
385 struct net *net = dev_net(dev);
390 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
391 netdev_name_node_add(net, dev->name_node);
392 hlist_add_head_rcu(&dev->index_hlist,
393 dev_index_hash(net, dev->ifindex));
396 netdev_for_each_altname(dev, name_node)
400 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
406 * caller must respect a RCU grace period before freeing/reusing dev
408 static void unlist_netdevice(struct net_device *dev, bool lock)
411 struct net *net = dev_net(dev);
415 xa_erase(&net->dev_by_index, dev->ifindex);
417 netdev_for_each_altname(dev, name_node)
420 /* Unlink dev from the device chain */
423 list_del_rcu(&dev->dev_list);
424 netdev_name_node_del(dev->name_node);
425 hlist_del_rcu(&dev->index_hlist);
429 dev_base_seq_inc(dev_net(dev));
449 * according to dev->type
509 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
513 i = netdev_lock_pos(dev->type);
514 lockdep_set_class_and_name(&dev->addr_list_lock,
524 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
555 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
557 return pt->dev ? &pt->dev->ptype_specific :
646 * @dev: targeted interface
652 int dev_get_iflink(const struct net_device *dev)
654 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
655 return dev->netdev_ops->ndo_get_iflink(dev);
657 return dev->ifindex;
663 * @dev: targeted interface
670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
674 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
697 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
702 .dev = dev,
709 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
710 last_dev = ctx.dev;
716 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
720 if (WARN_ON_ONCE(last_dev == ctx.dev))
724 if (!ctx.dev)
731 path->dev = ctx.dev;
754 return node_name ? node_name->dev : NULL;
775 return node_name ? node_name->dev : NULL;
782 struct net_device *dev;
785 dev = dev_get_by_name_rcu(net, name);
786 dev_hold(dev);
788 return dev;
808 struct net_device *dev;
810 dev = dev_get_by_name(net, name);
811 if (dev)
812 netdev_tracker_alloc(dev, tracker, gfp);
813 return dev;
831 struct net_device *dev;
834 hlist_for_each_entry(dev, head, index_hlist)
835 if (dev->ifindex == ifindex)
836 return dev;
855 struct net_device *dev;
858 hlist_for_each_entry_rcu(dev, head, index_hlist)
859 if (dev->ifindex == ifindex)
860 return dev;
869 struct net_device *dev;
872 dev = dev_get_by_index_rcu(net, ifindex);
873 dev_hold(dev);
875 return dev;
894 struct net_device *dev;
896 dev = dev_get_by_index(net, ifindex);
897 if (dev)
898 netdev_tracker_alloc(dev, tracker, gfp);
899 return dev;
924 return napi ? napi->dev : NULL;
936 struct net_device *dev;
942 dev = dev_get_by_index_rcu(net, ifindex);
943 if (!dev) {
948 strcpy(name, dev->name);
974 struct net_device *dev;
976 for_each_netdev_rcu(net, dev)
977 if (dev->type == type &&
978 !memcmp(dev->dev_addr, ha, dev->addr_len))
979 return dev;
987 struct net_device *dev, *ret = NULL;
990 for_each_netdev_rcu(net, dev)
991 if (dev->type == type) {
992 dev_hold(dev);
993 ret = dev;
1015 struct net_device *dev, *ret;
1020 for_each_netdev(net, dev) {
1021 if (((dev->flags ^ if_flags) & mask) == 0) {
1022 ret = dev;
1137 static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1158 struct net_device *dev,
1167 strscpy(dev->name, buf, IFNAMSIZ);
1173 * @dev: device
1185 int dev_alloc_name(struct net_device *dev, const char *name)
1187 return dev_alloc_name_ns(dev_net(dev), dev, name);
1191 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1197 ret = dev_prep_valid_name(net, dev, name, buf);
1199 strscpy(dev->name, buf, IFNAMSIZ);
1205 * @dev: device
1211 int dev_change_name(struct net_device *dev, const char *newname)
1220 BUG_ON(!dev_net(dev));
1222 net = dev_net(dev);
1226 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1231 memcpy(oldname, dev->name, IFNAMSIZ);
1233 err = dev_get_valid_name(net, dev, newname);
1240 netdev_info(dev, "renamed from %s%s\n", oldname,
1241 dev->flags & IFF_UP ? " (while UP)" : "");
1243 old_assign_type = dev->name_assign_type;
1244 dev->name_assign_type = NET_NAME_RENAMED;
1247 ret = device_rename(&dev->dev, dev->name);
1249 memcpy(dev->name, oldname, IFNAMSIZ);
1250 dev->name_assign_type = old_assign_type;
1257 netdev_adjacent_rename_links(dev, oldname);
1260 netdev_name_node_del(dev->name_node);
1266 netdev_name_node_add(net, dev->name_node);
1269 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1277 memcpy(dev->name, oldname, IFNAMSIZ);
1279 dev->name_assign_type = old_assign_type;
1283 netdev_err(dev, "name change rollback failed: %d\n",
1293 * @dev: device
1299 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1316 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1329 * @dev: device
1333 * get ifalias for a device. Caller must make sure dev cannot go
1336 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1342 alias = rcu_dereference(dev->ifalias);
1352 * @dev: device to cause notification
1356 void netdev_features_change(struct net_device *dev)
1358 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1364 * @dev: device to cause notification
1370 void netdev_state_change(struct net_device *dev)
1372 if (dev->flags & IFF_UP) {
1374 .info.dev = dev,
1379 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1385 * __netdev_notify_peers - notify network peers about existence of @dev,
1387 * @dev: network device
1390 * @dev, such as by generating a gratuitous ARP. This may be used when
1395 void __netdev_notify_peers(struct net_device *dev)
1398 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1399 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1404 * netdev_notify_peers - notify network peers about existence of @dev
1405 * @dev: network device
1408 * @dev, such as by generating a gratuitous ARP. This may be used when
1413 void netdev_notify_peers(struct net_device *dev)
1416 __netdev_notify_peers(dev);
1432 n->dev->name, n->napi_id);
1442 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1444 const struct net_device_ops *ops = dev->netdev_ops;
1448 dev_addr_check(dev);
1450 if (!netif_device_present(dev)) {
1452 if (dev->dev.parent)
1453 pm_runtime_resume(dev->dev.parent);
1454 if (!netif_device_present(dev))
1462 netpoll_poll_disable(dev);
1464 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1469 set_bit(__LINK_STATE_START, &dev->state);
1472 ret = ops->ndo_validate_addr(dev);
1475 ret = ops->ndo_open(dev);
1477 netpoll_poll_enable(dev);
1480 clear_bit(__LINK_STATE_START, &dev->state);
1482 dev->flags |= IFF_UP;
1483 dev_set_rx_mode(dev);
1484 dev_activate(dev);
1485 add_device_randomness(dev->dev_addr, dev->addr_len);
1493 * @dev: device to open
1504 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1508 if (dev->flags & IFF_UP)
1511 ret = __dev_open(dev, extack);
1515 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1516 call_netdevice_notifiers(NETDEV_UP, dev);
1524 struct net_device *dev;
1529 list_for_each_entry(dev, head, close_list) {
1531 netpoll_poll_disable(dev);
1533 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1535 clear_bit(__LINK_STATE_START, &dev->state);
1540 * dev->stop() will invoke napi_disable() on all of it's
1548 list_for_each_entry(dev, head, close_list) {
1549 const struct net_device_ops *ops = dev->netdev_ops;
1559 ops->ndo_stop(dev);
1561 dev->flags &= ~IFF_UP;
1562 netpoll_poll_enable(dev);
1566 static void __dev_close(struct net_device *dev)
1570 list_add(&dev->close_list, &single);
1577 struct net_device *dev, *tmp;
1580 list_for_each_entry_safe(dev, tmp, head, close_list)
1581 if (!(dev->flags & IFF_UP))
1582 list_del_init(&dev->close_list);
1586 list_for_each_entry_safe(dev, tmp, head, close_list) {
1587 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1588 call_netdevice_notifiers(NETDEV_DOWN, dev);
1590 list_del_init(&dev->close_list);
1597 * @dev: device to shutdown
1604 void dev_close(struct net_device *dev)
1606 if (dev->flags & IFF_UP) {
1609 list_add(&dev->close_list, &single);
1619 * @dev: device
1625 void dev_disable_lro(struct net_device *dev)
1630 dev->wanted_features &= ~NETIF_F_LRO;
1631 netdev_update_features(dev);
1633 if (unlikely(dev->features & NETIF_F_LRO))
1634 netdev_WARN(dev, "failed to disable LRO!\n");
1636 netdev_for_each_lower_dev(dev, lower_dev, iter)
1643 * @dev: device
1649 static void dev_disable_gro_hw(struct net_device *dev)
1651 dev->wanted_features &= ~NETIF_F_GRO_HW;
1652 netdev_update_features(dev);
1654 if (unlikely(dev->features & NETIF_F_GRO_HW))
1655 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1683 struct net_device *dev)
1686 .dev = dev,
1693 struct net_device *dev)
1697 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1702 if (!(dev->flags & IFF_UP))
1705 call_netdevice_notifier(nb, NETDEV_UP, dev);
1710 struct net_device *dev)
1712 if (dev->flags & IFF_UP) {
1714 dev);
1715 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1717 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1723 struct net_device *dev;
1726 for_each_netdev(net, dev) {
1727 err = call_netdevice_register_notifiers(nb, dev);
1734 for_each_netdev_continue_reverse(net, dev)
1735 call_netdevice_unregister_notifiers(nb, dev);
1742 struct net_device *dev;
1744 for_each_netdev(net, dev)
1745 call_netdevice_unregister_notifiers(nb, dev);
1931 int register_netdevice_notifier_dev_net(struct net_device *dev,
1938 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1941 list_add(&nn->list, &dev->net_notifier_list);
1948 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1956 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1962 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1967 list_for_each_entry(nn, &dev->net_notifier_list, list)
1968 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1983 struct net *net = dev_net(info->dev);
2016 struct net *net = dev_net(info->dev);
2025 struct net_device *dev,
2029 .dev = dev,
2039 * @dev: net_device pointer passed unmodified to notifier function
2045 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2047 return call_netdevice_notifiers_extack(val, dev, NULL);
2054 * @dev: net_device pointer passed unmodified to notifier function
2061 struct net_device *dev, u32 arg)
2064 .info.dev = dev,
2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2174 return __is_skb_forwardable(dev, skb, true);
2178 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2181 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2184 skb->protocol = eth_type_trans(skb, dev);
2191 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2193 return __dev_forward_skb2(dev, skb, true);
2200 * @dev: destination network device
2215 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2217 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2221 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2223 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2233 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2270 * @dev: network device to check for the presence of taps
2272 bool dev_nit_active(struct net_device *dev)
2274 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2283 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2303 deliver_skb(skb2, pt_prev, skb->dev);
2323 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2325 dev->name);
2335 ptype_list = &dev->ptype_all;
2341 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2351 * @dev: Network device
2362 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2365 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2369 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2370 dev->num_tc = 0;
2376 int q = netdev_get_prio_tc_map(dev, i);
2378 tc = &dev->tc_to_txq[q];
2380 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2382 netdev_set_prio_tc_map(dev, i, 0);
2387 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2389 if (dev->num_tc) {
2390 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2443 static bool remove_xps_queue_cpu(struct net_device *dev,
2465 static void reset_xps_maps(struct net_device *dev,
2473 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2478 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2485 dev_maps = xmap_dereference(dev->xps_maps[type]);
2490 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2492 reset_xps_maps(dev, dev_maps, type);
2497 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2501 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2511 clean_xps_maps(dev, XPS_RXQS, offset, count);
2513 clean_xps_maps(dev, XPS_CPUS, offset, count);
2519 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2521 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2584 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2595 WARN_ON_ONCE(index >= dev->num_tx_queues);
2597 if (dev->num_tc) {
2599 num_tc = dev->num_tc;
2603 /* If queue belongs to subordinate dev use its map */
2604 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2606 tc = netdev_txq_to_tc(dev, index);
2613 dev_maps = xmap_dereference(dev->xps_maps[type]);
2615 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2616 nr_ids = dev->num_rx_queues;
2628 * setting up now, as dev->num_tc or nr_ids could have been updated in
2702 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2734 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2762 reset_xps_maps(dev, dev_maps, type);
2788 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2794 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2802 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2804 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2807 while (txq-- != &dev->_tx[0]) {
2809 netdev_unbind_sb_channel(dev, txq->sb_dev);
2813 void netdev_reset_tc(struct net_device *dev)
2816 netif_reset_xps_queues_gt(dev, 0);
2818 netdev_unbind_all_sb_channels(dev);
2821 dev->num_tc = 0;
2822 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2823 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2827 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2829 if (tc >= dev->num_tc)
2833 netif_reset_xps_queues(dev, offset, count);
2835 dev->tc_to_txq[tc].count = count;
2836 dev->tc_to_txq[tc].offset = offset;
2841 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2847 netif_reset_xps_queues_gt(dev, 0);
2849 netdev_unbind_all_sb_channels(dev);
2851 dev->num_tc = num_tc;
2856 void netdev_unbind_sb_channel(struct net_device *dev,
2859 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2867 while (txq-- != &dev->_tx[0]) {
2874 int netdev_bind_sb_channel_queue(struct net_device *dev,
2878 /* Make certain the sb_dev and dev are already configured */
2879 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2883 if ((offset + count) > dev->real_num_tx_queues)
2894 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2900 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2903 if (netif_is_multiqueue(dev))
2914 dev->num_tc = -channel;
2924 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2929 disabling = txq < dev->real_num_tx_queues;
2931 if (txq < 1 || txq > dev->num_tx_queues)
2934 if (dev->reg_state == NETREG_REGISTERED ||
2935 dev->reg_state == NETREG_UNREGISTERING) {
2938 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2943 if (dev->num_tc)
2944 netif_setup_tc(dev, txq);
2946 dev_qdisc_change_real_num_tx(dev, txq);
2948 dev->real_num_tx_queues = txq;
2952 qdisc_reset_all_tx_gt(dev, txq);
2954 netif_reset_xps_queues_gt(dev, txq);
2958 dev->real_num_tx_queues = txq;
2968 * @dev: Network device
2976 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2980 if (rxq < 1 || rxq > dev->num_rx_queues)
2983 if (dev->reg_state == NETREG_REGISTERED) {
2986 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2992 dev->real_num_rx_queues = rxq;
3000 * @dev: Network device
3007 int netif_set_real_num_queues(struct net_device *dev,
3010 unsigned int old_rxq = dev->real_num_rx_queues;
3013 if (txq < 1 || txq > dev->num_tx_queues ||
3014 rxq < 1 || rxq > dev->num_rx_queues)
3020 if (rxq > dev->real_num_rx_queues) {
3021 err = netif_set_real_num_rx_queues(dev, rxq);
3025 if (txq > dev->real_num_tx_queues) {
3026 err = netif_set_real_num_tx_queues(dev, txq);
3030 if (rxq < dev->real_num_rx_queues)
3031 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3032 if (txq < dev->real_num_tx_queues)
3033 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3037 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3044 * @dev: netdev to update
3051 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3053 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3054 if (size < READ_ONCE(dev->gso_max_size))
3055 netif_set_gso_max_size(dev, size);
3056 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3057 netif_set_gso_ipv4_max_size(dev, size);
3063 * @dev: netdev to update
3070 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3072 dev->tso_max_segs = segs;
3073 if (segs < READ_ONCE(dev->gso_max_segs))
3074 netif_set_gso_max_segs(dev, segs);
3204 * @dev: network device
3208 void netif_device_detach(struct net_device *dev)
3210 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3211 netif_running(dev)) {
3212 netif_tx_stop_all_queues(dev);
3219 * @dev: network device
3223 void netif_device_attach(struct net_device *dev)
3225 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3226 netif_running(dev)) {
3227 netif_tx_wake_all_queues(dev);
3228 __netdev_watchdog_up(dev);
3237 static u16 skb_tx_hash(const struct net_device *dev,
3243 u16 qcount = dev->real_num_tx_queues;
3245 if (dev->num_tc) {
3246 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3254 qcount = dev->real_num_tx_queues;
3274 struct net_device *dev = skb->dev;
3280 if (dev) {
3281 if (dev->dev.parent)
3282 name = dev_driver_string(dev->dev.parent);
3284 name = netdev_name(dev);
3288 name, dev ? &dev->features : &null_features,
3407 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3409 netdev_err(dev, "hw csum failure\n");
3414 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3416 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3422 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3427 if (!(dev->features & NETIF_F_HIGHDMA)) {
3448 features &= skb->dev->mpls_features;
3473 if (illegal_highdma(skb->dev, skb))
3480 struct net_device *dev,
3488 struct net_device *dev,
3495 struct net_device *dev,
3500 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3503 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
3518 features &= ~dev->gso_partial_features;
3536 struct net_device *dev = skb->dev;
3537 netdev_features_t features = dev->features;
3540 features = gso_features_check(skb, dev, features);
3547 features &= dev->hw_enc_features;
3551 dev->vlan_features |
3555 if (dev->netdev_ops->ndo_features_check)
3556 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3559 features &= dflt_features_check(skb, dev, features);
3565 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3571 if (dev_nit_active(dev))
3572 dev_queue_xmit_nit(skb, dev);
3575 trace_net_dev_start_xmit(skb, dev);
3576 rc = netdev_start_xmit(skb, dev, txq, more);
3577 trace_net_dev_xmit(skb, rc, dev, len);
3582 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3592 rc = xmit_one(skb, dev, txq, next != NULL);
3641 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3650 skb = sk_validate_xmit_skb(skb, dev);
3692 dev_core_stats_tx_dropped_inc(dev);
3696 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3707 skb = validate_xmit_skb(skb, dev, again);
3778 struct net_device *dev,
3803 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3849 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3887 map = rcu_dereference_bh(skb->dev->priomap);
3925 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3929 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
4011 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4064 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4066 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4114 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4121 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4124 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4141 if (unlikely(queue_index >= dev->real_num_tx_queues))
4148 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4168 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4178 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4190 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4197 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4200 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4204 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4210 sb_dev = sb_dev ? : dev;
4213 queue_index >= dev->real_num_tx_queues) {
4214 int new_index = get_xps_queue(dev, sb_dev, skb);
4217 new_index = skb_tx_hash(dev, sb_dev, skb);
4231 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4244 if (dev->real_num_tx_queues != 1) {
4245 const struct net_device_ops *ops = dev->netdev_ops;
4248 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4250 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4252 queue_index = netdev_cap_txqueue(dev, queue_index);
4256 return netdev_get_tx_queue(dev, queue_index);
4282 struct net_device *dev = skb->dev;
4306 skb = nf_hook_egress(skb, &rc, dev);
4314 skb = sch_handle_egress(skb, &rc, dev);
4320 txq = netdev_tx_queue_mapping(dev, skb);
4326 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4332 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4338 rc = __dev_xmit_skb(skb, q, dev, txq);
4354 if (dev->flags & IFF_UP) {
4364 skb = validate_xmit_skb(skb, dev, &again);
4368 HARD_TX_LOCK(dev, txq, cpu);
4372 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4375 HARD_TX_UNLOCK(dev, txq);
4379 HARD_TX_UNLOCK(dev, txq);
4381 dev->name);
4388 dev->name);
4395 dev_core_stats_tx_dropped_inc(dev);
4406 struct net_device *dev = skb->dev;
4412 if (unlikely(!netif_running(dev) ||
4413 !netif_carrier_ok(dev)))
4416 skb = validate_xmit_skb_list(skb, dev, &again);
4421 txq = skb_get_tx_queue(dev, skb);
4426 HARD_TX_LOCK(dev, txq, smp_processor_id());
4428 ret = netdev_start_xmit(skb, dev, txq, false);
4429 HARD_TX_UNLOCK(dev, txq);
4435 dev_core_stats_tx_dropped_inc(dev);
4511 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4524 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4525 !(dev->features & NETIF_F_NTUPLE))
4527 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4531 rxqueue = dev->_rx + rxq_index;
4536 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4560 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4564 struct netdev_rx_queue *rxqueue = dev->_rx;
4574 if (unlikely(index >= dev->real_num_rx_queues)) {
4575 WARN_ONCE(dev->real_num_rx_queues > 1,
4578 dev->name, index, dev->real_num_rx_queues);
4633 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4661 * @dev: Device on which the filter was set
4670 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4673 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4803 if (!netif_running(skb->dev))
4828 dev_core_stats_rx_dropped_inc(skb->dev);
4835 struct net_device *dev = skb->dev;
4838 rxqueue = dev->_rx;
4843 if (unlikely(index >= dev->real_num_rx_queues)) {
4844 WARN_ONCE(dev->real_num_rx_queues > 1,
4847 dev->name, index, dev->real_num_rx_queues);
4886 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4915 skb->dev->dev_addr)) ||
4919 skb->protocol = eth_type_trans(skb, skb->dev);
4983 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4986 trace_xdp_exception(skb->dev, xdp_prog, act);
5005 struct net_device *dev = skb->dev;
5010 txq = netdev_core_pick_tx(dev, skb, NULL);
5012 HARD_TX_LOCK(dev, txq, cpu);
5014 rc = netdev_start_xmit(skb, dev, txq, 0);
5018 HARD_TX_UNLOCK(dev, txq);
5020 trace_xdp_exception(dev, xdp_prog, XDP_TX);
5021 dev_core_stats_tx_dropped_inc(dev);
5039 err = xdp_do_generic_redirect(skb->dev, skb,
5073 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5231 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5238 * @dev: device to check
5245 bool netdev_is_rx_handler_busy(struct net_device *dev)
5248 return dev && rtnl_dereference(dev->rx_handler);
5254 * @dev: device to register a handler for
5266 int netdev_rx_handler_register(struct net_device *dev,
5270 if (netdev_is_rx_handler_busy(dev))
5273 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5277 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5278 rcu_assign_pointer(dev->rx_handler, rx_handler);
5286 * @dev: device to unregister a handler from
5292 void netdev_rx_handler_unregister(struct net_device *dev)
5296 RCU_INIT_POINTER(dev->rx_handler, NULL);
5302 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5358 orig_dev = skb->dev;
5368 skb->skb_iif = skb->dev->ifindex;
5376 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5403 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5443 rx_handler = rcu_dereference(skb->dev->rx_handler);
5465 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5475 * not find vlan dev for vlan id 0.
5483 * vlan dev is found for inner header.
5490 * But could not find vlan dev.
5514 if (unlikely(skb->dev != orig_dev)) {
5516 &skb->dev->ptype_specific);
5526 dev_core_stats_rx_dropped_inc(skb->dev);
5528 dev_core_stats_rx_nohandler_inc(skb->dev);
5549 struct net_device *orig_dev = skb->dev;
5556 skb->dev, pt_prev, orig_dev);
5603 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5627 struct net_device *orig_dev = skb->dev;
5704 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5706 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5712 rcu_assign_pointer(dev->xdp_prog, new);
5720 dev_disable_lro(dev);
5721 dev_disable_gro_hw(dev);
5746 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5779 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5859 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5868 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6112 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6113 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6117 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6223 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6224 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6297 __NET_ADD_STATS(dev_net(napi->dev),
6389 int dev_set_threaded(struct net_device *dev, bool threaded)
6394 if (dev->threaded == threaded)
6398 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6409 dev->threaded = threaded;
6422 list_for_each_entry(napi, &dev->napi_list, dev_list)
6429 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6445 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6448 napi->dev = dev;
6455 list_add_rcu(&napi->dev_list, &dev->napi_list);
6458 /* Create kthread for this napi if dev->threaded is set.
6459 * Clear dev->threaded if kthread creation failed so that
6462 if (dev->threaded && napi_kthread_create(napi))
6463 dev->threaded = 0;
6506 if (n->dev->threaded && n->thread)
6564 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6607 n->dev ? n->dev->name : "backlog");
6790 struct net_device *dev;
6815 if (adj->dev == adj_dev)
6824 struct net_device *dev = (struct net_device *)priv->data;
6826 return upper_dev == dev;
6831 * @dev: device
6838 bool netdev_has_upper_dev(struct net_device *dev,
6847 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6854 * @dev: device
6862 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6869 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6876 * @dev: device
6881 bool netdev_has_any_upper_dev(struct net_device *dev)
6885 return !list_empty(&dev->adj_list.upper);
6891 * @dev: device
6896 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6902 if (list_empty(&dev->adj_list.upper))
6905 upper = list_first_entry(&dev->adj_list.upper,
6908 return upper->dev;
6913 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6919 if (list_empty(&dev->adj_list.upper))
6922 upper = list_first_entry(&dev->adj_list.upper,
6925 return upper->dev;
6931 * @dev: device
6936 static bool netdev_has_any_lower_dev(struct net_device *dev)
6940 return !list_empty(&dev->adj_list.lower);
6954 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6955 * @dev: device
6958 * Gets the next device from the dev's upper list, starting from iter
6961 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6970 if (&upper->list == &dev->adj_list.upper)
6975 return upper->dev;
6979 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6987 if (&upper->list == &dev->adj_list.upper)
6993 return upper->dev;
6996 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7005 if (&upper->list == &dev->adj_list.upper)
7010 return upper->dev;
7013 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7014 int (*fn)(struct net_device *dev,
7023 now = dev;
7024 iter = &dev->adj_list.upper;
7027 if (now != dev) {
7062 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7063 int (*fn)(struct net_device *dev,
7071 now = dev;
7072 iter = &dev->adj_list.upper;
7075 if (now != dev) {
7109 static bool __netdev_has_upper_dev(struct net_device *dev,
7119 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7126 * @dev: device
7129 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7134 void *netdev_lower_get_next_private(struct net_device *dev,
7141 if (&lower->list == &dev->adj_list.lower)
7154 * @dev: device
7157 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7160 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7169 if (&lower->list == &dev->adj_list.lower)
7181 * @dev: device
7184 * Gets the next netdev_adjacent from the dev's lower neighbour
7189 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7195 if (&lower->list == &dev->adj_list.lower)
7200 return lower->dev;
7204 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7211 if (&lower->list == &dev->adj_list.lower)
7216 return lower->dev;
7219 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7227 if (&lower->list == &dev->adj_list.lower)
7233 return lower->dev;
7236 int netdev_walk_all_lower_dev(struct net_device *dev,
7237 int (*fn)(struct net_device *dev,
7245 now = dev;
7246 iter = &dev->adj_list.lower;
7249 if (now != dev) {
7283 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7284 int (*fn)(struct net_device *dev,
7293 now = dev;
7294 iter = &dev->adj_list.lower;
7297 if (now != dev) {
7332 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7338 if (&lower->list == &dev->adj_list.lower)
7343 return lower->dev;
7347 static u8 __netdev_upper_depth(struct net_device *dev)
7354 for (iter = &dev->adj_list.upper,
7355 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7357 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7367 static u8 __netdev_lower_depth(struct net_device *dev)
7374 for (iter = &dev->adj_list.lower,
7375 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7377 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7387 static int __netdev_update_upper_level(struct net_device *dev,
7390 dev->upper_level = __netdev_upper_depth(dev) + 1;
7397 static void net_unlink_todo(struct net_device *dev)
7399 if (list_empty(&dev->unlink_list))
7400 list_add_tail(&dev->unlink_list, &net_unlink_list);
7404 static int __netdev_update_lower_level(struct net_device *dev,
7407 dev->lower_level = __netdev_lower_depth(dev) + 1;
7414 dev->nested_level = dev->lower_level - 1;
7416 net_unlink_todo(dev);
7421 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7422 int (*fn)(struct net_device *dev,
7430 now = dev;
7431 iter = &dev->adj_list.lower;
7434 if (now != dev) {
7472 * @dev: device
7474 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7477 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7481 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7491 * @dev: device
7496 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7500 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7503 return upper->dev;
7508 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7514 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7516 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7519 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7525 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7527 sysfs_remove_link(&(dev->dev.kobj), linkname);
7530 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7534 return (dev_list == &dev->adj_list.upper ||
7535 dev_list == &dev->adj_list.lower) &&
7536 net_eq(dev_net(dev), dev_net(adj_dev));
7539 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7551 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7552 dev->name, adj_dev->name, adj->ref_nr);
7561 adj->dev = adj_dev;
7568 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7569 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7571 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7572 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7579 ret = sysfs_create_link(&(dev->dev.kobj),
7580 &(adj_dev->dev.kobj), "master");
7592 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7593 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7601 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7608 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7609 dev->name, adj_dev->name, ref_nr);
7615 dev->name, adj_dev->name);
7622 dev->name, adj_dev->name, ref_nr,
7629 sysfs_remove_link(&(dev->dev.kobj), "master");
7631 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7632 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7636 adj_dev->name, dev->name, adj_dev->name);
7641 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7649 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7654 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7657 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7664 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7670 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7671 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7674 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7678 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7679 &dev->adj_list.upper,
7684 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7687 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7688 &dev->adj_list.upper,
7692 static int __netdev_upper_dev_link(struct net_device *dev,
7700 .dev = dev,
7713 if (dev == upper_dev)
7716 /* To prevent loops, check if dev is not upper device to upper_dev. */
7717 if (__netdev_has_upper_dev(upper_dev, dev))
7720 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7724 if (__netdev_has_upper_dev(dev, upper_dev))
7727 master_dev = __netdev_master_upper_dev_get(dev);
7738 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7749 __netdev_update_upper_level(dev, NULL);
7750 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7759 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7766 * @dev: device
7775 int netdev_upper_dev_link(struct net_device *dev,
7784 return __netdev_upper_dev_link(dev, upper_dev, false,
7791 * @dev: device
7803 int netdev_master_upper_dev_link(struct net_device *dev,
7813 return __netdev_upper_dev_link(dev, upper_dev, true,
7818 static void __netdev_upper_dev_unlink(struct net_device *dev,
7824 .dev = dev,
7832 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7837 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7842 __netdev_update_upper_level(dev, NULL);
7843 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7852 * @dev: device
7858 void netdev_upper_dev_unlink(struct net_device *dev,
7866 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
7899 struct net_device *dev,
7912 netdev_adjacent_dev_disable(dev, old_dev);
7913 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7917 netdev_adjacent_dev_enable(dev, old_dev);
7927 struct net_device *dev)
7940 netdev_adjacent_dev_enable(dev, old_dev);
7941 __netdev_upper_dev_unlink(old_dev, dev, &priv);
7947 struct net_device *dev)
7958 netdev_adjacent_dev_enable(dev, old_dev);
7960 __netdev_upper_dev_unlink(new_dev, dev, &priv);
7966 * @dev: device
7972 void netdev_bonding_info_change(struct net_device *dev,
7976 .info.dev = dev,
7986 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7990 .info.dev = dev,
7997 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
7999 if (!dev->offload_xstats_l3)
8012 kfree(dev->offload_xstats_l3);
8013 dev->offload_xstats_l3 = NULL;
8017 int netdev_offload_xstats_enable(struct net_device *dev,
8023 if (netdev_offload_xstats_enabled(dev, type))
8028 return netdev_offload_xstats_enable_l3(dev, extack);
8036 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8039 .info.dev = dev,
8045 kfree(dev->offload_xstats_l3);
8046 dev->offload_xstats_l3 = NULL;
8049 int netdev_offload_xstats_disable(struct net_device *dev,
8054 if (!netdev_offload_xstats_enabled(dev, type))
8059 netdev_offload_xstats_disable_l3(dev);
8068 static void netdev_offload_xstats_disable_all(struct net_device *dev)
8070 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8074 netdev_offload_xstats_get_ptr(const struct net_device *dev,
8079 return dev->offload_xstats_l3;
8086 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8091 return netdev_offload_xstats_get_ptr(dev, type);
8118 static int netdev_offload_xstats_get_used(struct net_device *dev,
8125 .info.dev = dev,
8132 WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8139 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8147 .info.dev = dev,
8155 stats = netdev_offload_xstats_get_ptr(dev, type);
8174 int netdev_offload_xstats_get(struct net_device *dev,
8182 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8185 return netdev_offload_xstats_get_used(dev, type, p_used,
8206 void netdev_offload_xstats_push_delta(struct net_device *dev,
8214 stats = netdev_offload_xstats_get_ptr(dev, type);
8224 * @dev: device
8233 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8237 const struct net_device_ops *ops = dev->netdev_ops;
8241 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8245 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8248 const struct net_device_ops *ops = dev->netdev_ops;
8252 return ops->ndo_sk_get_lower_dev(dev, sk);
8257 * @dev: device
8263 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8268 lower = netdev_sk_get_lower_dev(dev, sk);
8270 dev = lower;
8271 lower = netdev_sk_get_lower_dev(dev, sk);
8274 return dev;
8278 static void netdev_adjacent_add_links(struct net_device *dev)
8282 struct net *net = dev_net(dev);
8284 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8285 if (!net_eq(net, dev_net(iter->dev)))
8287 netdev_adjacent_sysfs_add(iter->dev, dev,
8288 &iter->dev->adj_list.lower);
8289 netdev_adjacent_sysfs_add(dev, iter->dev,
8290 &dev->adj_list.upper);
8293 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8294 if (!net_eq(net, dev_net(iter->dev)))
8296 netdev_adjacent_sysfs_add(iter->dev, dev,
8297 &iter->dev->adj_list.upper);
8298 netdev_adjacent_sysfs_add(dev, iter->dev,
8299 &dev->adj_list.lower);
8303 static void netdev_adjacent_del_links(struct net_device *dev)
8307 struct net *net = dev_net(dev);
8309 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8310 if (!net_eq(net, dev_net(iter->dev)))
8312 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8313 &iter->dev->adj_list.lower);
8314 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8315 &dev->adj_list.upper);
8318 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8319 if (!net_eq(net, dev_net(iter->dev)))
8321 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8322 &iter->dev->adj_list.upper);
8323 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8324 &dev->adj_list.lower);
8328 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8332 struct net *net = dev_net(dev);
8334 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8335 if (!net_eq(net, dev_net(iter->dev)))
8337 netdev_adjacent_sysfs_del(iter->dev, oldname,
8338 &iter->dev->adj_list.lower);
8339 netdev_adjacent_sysfs_add(iter->dev, dev,
8340 &iter->dev->adj_list.lower);
8343 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8344 if (!net_eq(net, dev_net(iter->dev)))
8346 netdev_adjacent_sysfs_del(iter->dev, oldname,
8347 &iter->dev->adj_list.upper);
8348 netdev_adjacent_sysfs_add(iter->dev, dev,
8349 &iter->dev->adj_list.upper);
8353 void *netdev_lower_dev_get_private(struct net_device *dev,
8360 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8381 .info.dev = lower_dev,
8391 static void dev_change_rx_flags(struct net_device *dev, int flags)
8393 const struct net_device_ops *ops = dev->netdev_ops;
8396 ops->ndo_change_rx_flags(dev, flags);
8399 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8401 unsigned int old_flags = dev->flags;
8407 dev->flags |= IFF_PROMISC;
8408 dev->promiscuity += inc;
8409 if (dev->promiscuity == 0) {
8415 dev->flags &= ~IFF_PROMISC;
8417 dev->promiscuity -= inc;
8418 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8422 if (dev->flags != old_flags) {
8423 netdev_info(dev, "%s promiscuous mode\n",
8424 dev->flags & IFF_PROMISC ? "entered" : "left");
8429 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8430 dev->name, (dev->flags & IFF_PROMISC),
8438 dev_change_rx_flags(dev, IFF_PROMISC);
8441 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8447 * @dev: device
8456 int dev_set_promiscuity(struct net_device *dev, int inc)
8458 unsigned int old_flags = dev->flags;
8461 err = __dev_set_promiscuity(dev, inc, true);
8464 if (dev->flags != old_flags)
8465 dev_set_rx_mode(dev);
8470 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8472 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8476 dev->flags |= IFF_ALLMULTI;
8477 dev->allmulti += inc;
8478 if (dev->allmulti == 0) {
8484 dev->flags &= ~IFF_ALLMULTI;
8486 dev->allmulti -= inc;
8487 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8491 if (dev->flags ^ old_flags) {
8492 netdev_info(dev, "%s allmulticast mode\n",
8493 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8494 dev_change_rx_flags(dev, IFF_ALLMULTI);
8495 dev_set_rx_mode(dev);
8497 __dev_notify_flags(dev, old_flags,
8498 dev->gflags ^ old_gflags, 0, NULL);
8505 * @dev: device
8516 int dev_set_allmulti(struct net_device *dev, int inc)
8518 return __dev_set_allmulti(dev, inc, true);
8528 void __dev_set_rx_mode(struct net_device *dev)
8530 const struct net_device_ops *ops = dev->netdev_ops;
8533 if (!(dev->flags&IFF_UP))
8536 if (!netif_device_present(dev))
8539 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8543 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8544 __dev_set_promiscuity(dev, 1, false);
8545 dev->uc_promisc = true;
8546 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8547 __dev_set_promiscuity(dev, -1, false);
8548 dev->uc_promisc = false;
8553 ops->ndo_set_rx_mode(dev);
8556 void dev_set_rx_mode(struct net_device *dev)
8558 netif_addr_lock_bh(dev);
8559 __dev_set_rx_mode(dev);
8560 netif_addr_unlock_bh(dev);
8565 * @dev: device
8569 unsigned int dev_get_flags(const struct net_device *dev)
8573 flags = (dev->flags & ~(IFF_PROMISC |
8578 (dev->gflags & (IFF_PROMISC |
8581 if (netif_running(dev)) {
8582 if (netif_oper_up(dev))
8584 if (netif_carrier_ok(dev))
8586 if (netif_dormant(dev))
8594 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8597 unsigned int old_flags = dev->flags;
8606 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8609 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8617 dev_change_rx_flags(dev, IFF_MULTICAST);
8619 dev_set_rx_mode(dev);
8630 __dev_close(dev);
8632 ret = __dev_open(dev, extack);
8635 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8637 unsigned int old_flags = dev->flags;
8639 dev->gflags ^= IFF_PROMISC;
8641 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8642 if (dev->flags != old_flags)
8643 dev_set_rx_mode(dev);
8650 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8653 dev->gflags ^= IFF_ALLMULTI;
8654 __dev_set_allmulti(dev, inc, false);
8660 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8664 unsigned int changes = dev->flags ^ old_flags;
8667 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8670 if (dev->flags & IFF_UP)
8671 call_netdevice_notifiers(NETDEV_UP, dev);
8673 call_netdevice_notifiers(NETDEV_DOWN, dev);
8676 if (dev->flags & IFF_UP &&
8680 .dev = dev,
8691 * @dev: device
8698 int dev_change_flags(struct net_device *dev, unsigned int flags,
8702 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8704 ret = __dev_change_flags(dev, flags, extack);
8708 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8709 __dev_notify_flags(dev, old_flags, changes, 0, NULL);
8714 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8716 const struct net_device_ops *ops = dev->netdev_ops;
8719 return ops->ndo_change_mtu(dev, new_mtu);
8721 /* Pairs with all the lockless reads of dev->mtu in the stack */
8722 WRITE_ONCE(dev->mtu, new_mtu);
8727 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8731 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8736 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8745 * @dev: device
8751 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8756 if (new_mtu == dev->mtu)
8759 err = dev_validate_mtu(dev, new_mtu, extack);
8763 if (!netif_device_present(dev))
8766 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8771 orig_mtu = dev->mtu;
8772 err = __dev_set_mtu(dev, new_mtu);
8775 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8782 __dev_set_mtu(dev, orig_mtu);
8783 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8790 int dev_set_mtu(struct net_device *dev, int new_mtu)
8796 err = dev_set_mtu_ext(dev, new_mtu, &extack);
8798 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8805 * @dev: device
8808 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8810 unsigned int orig_len = dev->tx_queue_len;
8817 dev->tx_queue_len = new_len;
8818 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8822 res = dev_qdisc_change_tx_queue_len(dev);
8830 netdev_err(dev, "refused to change device tx_queue_len\n");
8831 dev->tx_queue_len = orig_len;
8837 * @dev: device
8840 void dev_set_group(struct net_device *dev, int new_group)
8842 dev->group = new_group;
8847 * @dev: device
8851 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8855 .info.dev = dev,
8868 * @dev: device
8874 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8877 const struct net_device_ops *ops = dev->netdev_ops;
8882 if (sa->sa_family != dev->type)
8884 if (!netif_device_present(dev))
8886 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8889 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8890 err = ops->ndo_set_mac_address(dev, sa);
8894 dev->addr_assign_type = NET_ADDR_SET;
8895 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8896 add_device_randomness(dev->dev_addr, dev->addr_len);
8903 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8909 ret = dev_set_mac_address(dev, sa, extack);
8918 struct net_device *dev;
8924 dev = dev_get_by_name_rcu(net, dev_name);
8925 if (!dev) {
8929 if (!dev->addr_len)
8932 memcpy(sa->sa_data, dev->dev_addr,
8933 min_t(size_t, size, dev->addr_len));
8934 sa->sa_family = dev->type;
8945 * @dev: device
8950 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8952 const struct net_device_ops *ops = dev->netdev_ops;
8956 if (!netif_device_present(dev))
8958 return ops->ndo_change_carrier(dev, new_carrier);
8963 * @dev: device
8968 int dev_get_phys_port_id(struct net_device *dev,
8971 const struct net_device_ops *ops = dev->netdev_ops;
8975 return ops->ndo_get_phys_port_id(dev, ppid);
8980 * @dev: device
8986 int dev_get_phys_port_name(struct net_device *dev,
8989 const struct net_device_ops *ops = dev->netdev_ops;
8993 err = ops->ndo_get_phys_port_name(dev, name, len);
8997 return devlink_compat_phys_port_name_get(dev, name, len);
9002 * @dev: network device
9008 int dev_get_port_parent_id(struct net_device *dev,
9012 const struct net_device_ops *ops = dev->netdev_ops;
9019 err = ops->ndo_get_port_parent_id(dev, ppid);
9024 err = devlink_compat_switch_id_get(dev, ppid);
9028 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9064 * @dev: device
9067 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9069 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9071 if (!netif_device_present(dev))
9074 netif_carrier_off(dev);
9076 netif_carrier_on(dev);
9077 dev->proto_down = proto_down;
9084 * @dev: device
9088 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9094 dev->proto_down_reason = value;
9098 dev->proto_down_reason |= BIT(b);
9100 dev->proto_down_reason &= ~BIT(b);
9107 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9111 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9119 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9122 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9129 return dev->netdev_ops->ndo_bpf;
9135 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9138 return dev->xdp_state[mode].link;
9141 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9144 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9148 return dev->xdp_state[mode].prog;
9151 u8 dev_xdp_prog_count(struct net_device *dev)
9157 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9163 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9165 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9170 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9173 dev->xdp_state[mode].link = link;
9174 dev->xdp_state[mode].prog = NULL;
9177 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9180 dev->xdp_state[mode].link = NULL;
9181 dev->xdp_state[mode].prog = prog;
9184 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9205 err = bpf_op(dev, &xdp);
9213 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9218 static void dev_xdp_uninstall(struct net_device *dev)
9228 prog = dev_xdp_prog(dev, mode);
9232 bpf_op = dev_xdp_bpf_op(dev, mode);
9236 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9239 link = dev_xdp_link(dev, mode);
9241 link->dev = NULL;
9245 dev_xdp_set_link(dev, mode, NULL);
9249 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9277 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9288 mode = dev_xdp_mode(dev, flags);
9290 if (dev_xdp_link(dev, mode)) {
9296 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9303 cur_prog = dev_xdp_prog(dev, mode);
9327 if (!offload && dev_xdp_prog(dev, other_mode)) {
9335 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9351 bpf_op = dev_xdp_bpf_op(dev, mode);
9357 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9363 dev_xdp_set_link(dev, mode, link);
9365 dev_xdp_set_prog(dev, mode, new_prog);
9372 static int dev_xdp_attach_link(struct net_device *dev,
9376 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9379 static int dev_xdp_detach_link(struct net_device *dev,
9388 mode = dev_xdp_mode(dev, link->flags);
9389 if (dev_xdp_link(dev, mode) != link)
9392 bpf_op = dev_xdp_bpf_op(dev, mode);
9393 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9394 dev_xdp_set_link(dev, mode, NULL);
9404 /* if racing with net_device's tear down, xdp_link->dev might be
9407 if (xdp_link->dev) {
9408 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9409 xdp_link->dev = NULL;
9435 if (xdp_link->dev)
9436 ifindex = xdp_link->dev->ifindex;
9449 if (xdp_link->dev)
9450 ifindex = xdp_link->dev->ifindex;
9468 if (!xdp_link->dev) {
9490 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9491 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9492 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9520 struct net_device *dev;
9524 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9525 if (!dev) {
9537 link->dev = dev;
9546 err = dev_xdp_attach_link(dev, &extack, link);
9550 link->dev = NULL;
9557 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9558 dev_put(dev);
9565 dev_put(dev);
9571 * @dev: device
9579 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9582 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9605 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9656 static void net_set_todo(struct net_device *dev)
9658 list_add_tail(&dev->todo_list, &net_todo_list);
9659 atomic_inc(&dev_net(dev)->dev_unreg_count);
9673 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9692 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9706 static netdev_features_t netdev_fix_features(struct net_device *dev,
9712 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9718 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9724 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9731 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9745 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9750 if ((features & dev->gso_partial_features) &&
9752 netdev_dbg(dev,
9754 features &= ~dev->gso_partial_features;
9764 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9772 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9777 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9783 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9793 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9799 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9806 int __netdev_update_features(struct net_device *dev)
9815 features = netdev_get_wanted_features(dev);
9817 if (dev->netdev_ops->ndo_fix_features)
9818 features = dev->netdev_ops->ndo_fix_features(dev, features);
9821 features = netdev_fix_features(dev, features);
9824 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9825 features = netdev_sync_upper_features(dev, upper, features);
9827 if (dev->features == features)
9830 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9831 &dev->features, &features);
9833 if (dev->netdev_ops->ndo_set_features)
9834 err = dev->netdev_ops->ndo_set_features(dev, features);
9839 netdev_err(dev,
9841 err, &features, &dev->features);
9852 netdev_for_each_lower_dev(dev, lower, iter)
9853 netdev_sync_lower_features(dev, lower, features);
9856 netdev_features_t diff = features ^ dev->features;
9862 * Thus we need to update dev->features
9867 dev->features = features;
9868 udp_tunnel_get_rx_info(dev);
9870 udp_tunnel_drop_rx_info(dev);
9876 dev->features = features;
9877 err |= vlan_get_rx_ctag_filter_info(dev);
9879 vlan_drop_rx_ctag_filter_info(dev);
9885 dev->features = features;
9886 err |= vlan_get_rx_stag_filter_info(dev);
9888 vlan_drop_rx_stag_filter_info(dev);
9892 dev->features = features;
9900 * @dev: the device to check
9902 * Recalculate dev->features set and send notifications if it
9906 void netdev_update_features(struct net_device *dev)
9908 if (__netdev_update_features(dev))
9909 netdev_features_change(dev);
9915 * @dev: the device to check
9917 * Recalculate dev->features set and send notifications even
9919 * netdev_update_features() if also dev->vlan_features might
9923 void netdev_change_features(struct net_device *dev)
9925 __netdev_update_features(dev);
9926 netdev_features_change(dev);
9933 * @dev: the device to transfer operstate to
9940 struct net_device *dev)
9943 netif_dormant_on(dev);
9945 netif_dormant_off(dev);
9948 netif_testing_on(dev);
9950 netif_testing_off(dev);
9953 netif_carrier_on(dev);
9955 netif_carrier_off(dev);
9959 static int netif_alloc_rx_queues(struct net_device *dev)
9961 unsigned int i, count = dev->num_rx_queues;
9972 dev->_rx = rx;
9975 rx[i].dev = dev;
9978 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9988 kvfree(dev->_rx);
9989 dev->_rx = NULL;
9993 static void netif_free_rx_queues(struct net_device *dev)
9995 unsigned int i, count = dev->num_rx_queues;
9998 if (!dev->_rx)
10002 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10004 kvfree(dev->_rx);
10007 static void netdev_init_one_queue(struct net_device *dev,
10012 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10015 queue->dev = dev;
10021 static void netif_free_tx_queues(struct net_device *dev)
10023 kvfree(dev->_tx);
10026 static int netif_alloc_netdev_queues(struct net_device *dev)
10028 unsigned int count = dev->num_tx_queues;
10039 dev->_tx = tx;
10041 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10042 spin_lock_init(&dev->tx_global_lock);
10047 void netif_tx_stop_all_queues(struct net_device *dev)
10051 for (i = 0; i < dev->num_tx_queues; i++) {
10052 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10059 static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10064 * accounting, so that skb_do_redirect() can bump the dev's
10067 if (dev->netdev_ops->ndo_get_peer_dev &&
10068 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10071 switch (dev->pcpu_stat_type) {
10075 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10078 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10081 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10090 static void netdev_do_free_pcpu_stats(struct net_device *dev)
10092 switch (dev->pcpu_stat_type) {
10096 free_percpu(dev->lstats);
10099 free_percpu(dev->tstats);
10102 free_percpu(dev->dstats);
10109 * @dev: device to register
10116 int register_netdevice(struct net_device *dev)
10119 struct net *net = dev_net(dev);
10129 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10132 ret = ethtool_check_ops(dev->ethtool_ops);
10136 spin_lock_init(&dev->addr_list_lock);
10137 netdev_set_addr_lockdep_class(dev);
10139 ret = dev_get_valid_name(net, dev, dev->name);
10144 dev->name_node = netdev_name_node_head_alloc(dev);
10145 if (!dev->name_node)
10149 if (dev->netdev_ops->ndo_init) {
10150 ret = dev->netdev_ops->ndo_init(dev);
10158 if (((dev->hw_features | dev->features) &
10160 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10161 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10162 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10167 ret = netdev_do_alloc_pcpu_stats(dev);
10171 ret = dev_index_reserve(net, dev->ifindex);
10174 dev->ifindex = ret;
10179 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10180 dev->features |= NETIF_F_SOFT_FEATURES;
10182 if (dev->udp_tunnel_nic_info) {
10183 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10184 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10187 dev->wanted_features = dev->features & dev->hw_features;
10189 if (!(dev->flags & IFF_LOOPBACK))
10190 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10197 if (dev->hw_features & NETIF_F_TSO)
10198 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10199 if (dev->vlan_features & NETIF_F_TSO)
10200 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10201 if (dev->mpls_features & NETIF_F_TSO)
10202 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10203 if (dev->hw_enc_features & NETIF_F_TSO)
10204 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10208 dev->vlan_features |= NETIF_F_HIGHDMA;
10212 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10216 dev->mpls_features |= NETIF_F_SG;
10218 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10223 ret = netdev_register_kobject(dev);
10225 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10230 __netdev_update_features(dev);
10237 set_bit(__LINK_STATE_PRESENT, &dev->state);
10239 linkwatch_init_dev(dev);
10241 dev_init_scheduler(dev);
10243 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10244 list_netdevice(dev);
10246 add_device_randomness(dev->dev_addr, dev->addr_len);
10252 if (dev->addr_assign_type == NET_ADDR_PERM)
10253 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10256 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10260 dev->needs_free_netdev = false;
10261 unregister_netdevice_queue(dev, NULL);
10268 if (!dev->rtnl_link_ops ||
10269 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10270 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10276 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10278 dev_index_release(net, dev->ifindex);
10280 netdev_do_free_pcpu_stats(dev);
10282 if (dev->netdev_ops->ndo_uninit)
10283 dev->netdev_ops->ndo_uninit(dev);
10284 if (dev->priv_destructor)
10285 dev->priv_destructor(dev);
10287 netdev_name_node_free(dev->name_node);
10294 * @dev: device to init
10302 int init_dummy_netdev(struct net_device *dev)
10309 memset(dev, 0, sizeof(struct net_device));
10314 dev->reg_state = NETREG_DUMMY;
10317 INIT_LIST_HEAD(&dev->napi_list);
10320 set_bit(__LINK_STATE_PRESENT, &dev->state);
10321 set_bit(__LINK_STATE_START, &dev->state);
10324 dev_net_set(dev, &init_net);
10338 * @dev: device to register
10349 int register_netdev(struct net_device *dev)
10355 err = register_netdevice(dev);
10361 int netdev_refcnt_read(const struct net_device *dev)
10367 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10370 return refcount_read(&dev->dev_refcnt);
10394 struct net_device *dev;
10399 list_for_each_entry(dev, list, todo_list)
10400 if (netdev_refcnt_read(dev) == 1)
10401 return dev;
10408 list_for_each_entry(dev, list, todo_list)
10409 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10415 list_for_each_entry(dev, list, todo_list)
10417 &dev->state)) {
10441 list_for_each_entry(dev, list, todo_list)
10442 if (netdev_refcnt_read(dev) == 1)
10443 return dev;
10447 list_for_each_entry(dev, list, todo_list) {
10449 dev->name, netdev_refcnt_read(dev));
10450 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10484 struct net_device *dev, *tmp;
10492 struct net_device *dev = list_first_entry(&unlink_list,
10495 list_del_init(&dev->unlink_list);
10496 dev->nested_level = dev->lower_level - 1;
10509 list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10510 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10511 netdev_WARN(dev, "run_todo but not unregistering\n");
10512 list_del(&dev->todo_list);
10517 dev->reg_state = NETREG_UNREGISTERED;
10519 linkwatch_forget_dev(dev);
10523 dev = netdev_wait_allrefs_any(&list);
10524 list_del(&dev->todo_list);
10527 BUG_ON(netdev_refcnt_read(dev) != 1);
10528 BUG_ON(!list_empty(&dev->ptype_all));
10529 BUG_ON(!list_empty(&dev->ptype_specific));
10530 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10531 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10533 netdev_do_free_pcpu_stats(dev);
10534 if (dev->priv_destructor)
10535 dev->priv_destructor(dev);
10536 if (dev->needs_free_netdev)
10537 free_netdev(dev);
10539 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10543 kobject_put(&dev->dev.kobj);
10568 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
10575 if (p && cmpxchg(&dev->core_stats, NULL, p))
10579 return READ_ONCE(dev->core_stats);
10585 * @dev: device to get statistics from
10590 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10593 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10596 const struct net_device_ops *ops = dev->netdev_ops;
10601 ops->ndo_get_stats64(dev, storage);
10603 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10605 netdev_stats_to_stats64(storage, &dev->stats);
10609 p = READ_ONCE(dev->core_stats);
10662 * @dev: device to get statistics from
10665 * Populate @s from dev->stats and dev->tstats. Can be used as
10668 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10670 netdev_stats_to_stats64(s, &dev->stats);
10671 dev_fetch_sw_netstats(s, dev->tstats);
10675 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10677 struct netdev_queue *queue = dev_ingress_queue(dev);
10685 netdev_init_one_queue(dev, queue, NULL);
10688 rcu_assign_pointer(dev->ingress_queue, queue);
10695 void netdev_set_default_ethtool_ops(struct net_device *dev,
10698 if (dev->ethtool_ops == &default_ethtool_ops)
10699 dev->ethtool_ops = ops;
10705 * @dev: netdev to enable the IRQ coalescing on
10710 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10712 WARN_ON(dev->reg_state == NETREG_REGISTERED);
10715 dev->gro_flush_timeout = 20000;
10716 dev->napi_defer_hard_irqs = 1;
10721 void netdev_freemem(struct net_device *dev)
10723 char *addr = (char *)dev - dev->padded;
10746 struct net_device *dev;
10750 BUG_ON(strlen(name) >= sizeof(dev->name));
10775 dev = PTR_ALIGN(p, NETDEV_ALIGN);
10776 dev->padded = (char *)dev - (char *)p;
10778 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10780 dev->pcpu_refcnt = alloc_percpu(int);
10781 if (!dev->pcpu_refcnt)
10783 __dev_hold(dev);
10785 refcount_set(&dev->dev_refcnt, 1);
10788 if (dev_addr_init(dev))
10791 dev_mc_init(dev);
10792 dev_uc_init(dev);
10794 dev_net_set(dev, &init_net);
10796 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10797 dev->xdp_zc_max_segs = 1;
10798 dev->gso_max_segs = GSO_MAX_SEGS;
10799 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10800 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10801 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10802 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10803 dev->tso_max_segs = TSO_MAX_SEGS;
10804 dev->upper_level = 1;
10805 dev->lower_level = 1;
10807 dev->nested_level = 0;
10808 INIT_LIST_HEAD(&dev->unlink_list);
10811 INIT_LIST_HEAD(&dev->napi_list);
10812 INIT_LIST_HEAD(&dev->unreg_list);
10813 INIT_LIST_HEAD(&dev->close_list);
10814 INIT_LIST_HEAD(&dev->link_watch_list);
10815 INIT_LIST_HEAD(&dev->adj_list.upper);
10816 INIT_LIST_HEAD(&dev->adj_list.lower);
10817 INIT_LIST_HEAD(&dev->ptype_all);
10818 INIT_LIST_HEAD(&dev->ptype_specific);
10819 INIT_LIST_HEAD(&dev->net_notifier_list);
10821 hash_init(dev->qdisc_hash);
10823 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10824 setup(dev);
10826 if (!dev->tx_queue_len) {
10827 dev->priv_flags |= IFF_NO_QUEUE;
10828 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10831 dev->num_tx_queues = txqs;
10832 dev->real_num_tx_queues = txqs;
10833 if (netif_alloc_netdev_queues(dev))
10836 dev->num_rx_queues = rxqs;
10837 dev->real_num_rx_queues = rxqs;
10838 if (netif_alloc_rx_queues(dev))
10841 strcpy(dev->name, name);
10842 dev->name_assign_type = name_assign_type;
10843 dev->group = INIT_NETDEV_GROUP;
10844 if (!dev->ethtool_ops)
10845 dev->ethtool_ops = &default_ethtool_ops;
10847 nf_hook_netdev_init(dev);
10849 return dev;
10852 free_netdev(dev);
10857 free_percpu(dev->pcpu_refcnt);
10860 netdev_freemem(dev);
10867 * @dev: device
10874 void free_netdev(struct net_device *dev)
10884 if (dev->reg_state == NETREG_UNREGISTERING) {
10886 dev->needs_free_netdev = true;
10890 netif_free_tx_queues(dev);
10891 netif_free_rx_queues(dev);
10893 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10896 dev_addr_flush(dev);
10898 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10901 ref_tracker_dir_exit(&dev->refcnt_tracker);
10903 free_percpu(dev->pcpu_refcnt);
10904 dev->pcpu_refcnt = NULL;
10906 free_percpu(dev->core_stats);
10907 dev->core_stats = NULL;
10908 free_percpu(dev->xdp_bulkq);
10909 dev->xdp_bulkq = NULL;
10912 if (dev->reg_state == NETREG_UNINITIALIZED) {
10913 netdev_freemem(dev);
10917 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10918 dev->reg_state = NETREG_RELEASED;
10921 put_device(&dev->dev);
10943 * @dev: device
10954 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10959 list_move_tail(&dev->unreg_list, head);
10963 list_add(&dev->unreg_list, &single);
10972 struct net_device *dev, *tmp;
10981 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10986 if (dev->reg_state == NETREG_UNINITIALIZED) {
10988 dev->name, dev);
10991 list_del(&dev->unreg_list);
10994 dev->dismantle = true;
10995 BUG_ON(dev->reg_state != NETREG_REGISTERED);
10999 list_for_each_entry(dev, head, unreg_list)
11000 list_add_tail(&dev->close_list, &close_head);
11003 list_for_each_entry(dev, head, unreg_list) {
11006 unlist_netdevice(dev, false);
11007 dev->reg_state = NETREG_UNREGISTERING;
11014 list_for_each_entry(dev, head, unreg_list) {
11018 dev_shutdown(dev);
11019 dev_tcx_uninstall(dev);
11020 dev_xdp_uninstall(dev);
11021 bpf_dev_bound_netdev_unregister(dev);
11023 netdev_offload_xstats_disable_all(dev);
11028 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11030 if (!dev->rtnl_link_ops ||
11031 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11032 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11039 dev_uc_flush(dev);
11040 dev_mc_flush(dev);
11042 netdev_name_node_alt_flush(dev);
11043 netdev_name_node_free(dev->name_node);
11045 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11047 if (dev->netdev_ops->ndo_uninit)
11048 dev->netdev_ops->ndo_uninit(dev);
11051 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11054 WARN_ON(netdev_has_any_upper_dev(dev));
11055 WARN_ON(netdev_has_any_lower_dev(dev));
11058 netdev_unregister_kobject(dev);
11061 netif_reset_xps_queues_gt(dev, 0);
11067 list_for_each_entry(dev, head, unreg_list) {
11068 netdev_put(dev, &dev->dev_registered_tracker);
11069 net_set_todo(dev);
11090 * @dev: device
11099 void unregister_netdev(struct net_device *dev)
11102 unregister_netdevice(dev);
11109 * @dev: device
11123 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11127 struct net *net_old = dev_net(dev);
11135 if (dev->features & NETIF_F_NETNS_LOCAL)
11139 if (dev->reg_state != NETREG_REGISTERED)
11151 if (netdev_name_in_use(net, dev->name)) {
11155 err = dev_prep_valid_name(net, dev, pat, new_name);
11161 netdev_for_each_altname(dev, name_node)
11172 err = dev_index_reserve(net, dev->ifindex);
11185 dev_close(dev);
11188 unlist_netdevice(dev, true);
11193 dev_shutdown(dev);
11198 * Note that dev->reg_state stays at NETREG_REGISTERED.
11202 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11205 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11207 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11213 dev_uc_flush(dev);
11214 dev_mc_flush(dev);
11217 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11218 netdev_adjacent_del_links(dev);
11221 move_netdevice_notifiers_dev_net(dev, net);
11224 dev_net_set(dev, net);
11225 dev->ifindex = new_ifindex;
11228 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11229 netdev_adjacent_add_links(dev);
11232 strscpy(dev->name, new_name, IFNAMSIZ);
11235 err = device_rename(&dev->dev, dev->name);
11241 err = netdev_change_owner(dev, net_old, net);
11245 list_netdevice(dev);
11248 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11254 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11400 * @dev: network device
11404 const char *netdev_drivername(const struct net_device *dev)
11410 parent = dev->dev.parent;
11420 static void __netdev_printk(const char *level, const struct net_device *dev,
11423 if (dev && dev->dev.parent) {
11425 dev->dev.parent,
11427 dev_driver_string(dev->dev.parent),
11428 dev_name(dev->dev.parent),
11429 netdev_name(dev), netdev_reg_state(dev),
11431 } else if (dev) {
11433 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11439 void netdev_printk(const char *level, const struct net_device *dev,
11450 __netdev_printk(level, dev, &vaf);
11457 void func(const struct net_device *dev, const char *fmt, ...) \
11467 __netdev_printk(level, dev, &vaf); \
11498 struct net_device *dev, *aux;
11504 for_each_netdev_safe(net, dev, aux) {
11509 if (dev->features & NETIF_F_NETNS_LOCAL)
11513 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11517 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11519 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11521 netdev_for_each_altname_safe(dev, name_node, tmp)
11528 err = dev_change_net_namespace(dev, &init_net, fb_name);
11531 __func__, dev->name, err);
11544 struct net_device *dev;
11555 for_each_netdev_reverse(net, dev) {
11556 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11557 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11559 unregister_netdevice_queue(dev, &dev_kill_list);
11649 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",