Lines Matching defs:dev
61 #include "dev.h"
457 struct net_device *dev;
460 for_each_netdev(net, dev) {
461 if (dev->rtnl_link_ops == ops)
462 ops->dellink(dev, &list_kill);
532 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
540 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
549 ops->get_slave_size(master_dev, dev);
556 static size_t rtnl_link_get_size(const struct net_device *dev)
558 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
570 ops->get_size(dev);
574 size += nla_total_size(ops->get_xstats_size(dev));
576 size += rtnl_link_get_slave_info_data_size(dev);
625 static size_t rtnl_link_get_af_size(const struct net_device *dev,
639 af_ops->get_link_af_size(dev, ext_filter_mask);
647 static bool rtnl_have_link_slave_info(const struct net_device *dev)
654 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
662 const struct net_device *dev)
669 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
681 err = ops->fill_slave_info(skb, master_dev, dev);
694 const struct net_device *dev)
696 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
705 err = ops->fill_xstats(skb, dev);
713 err = ops->fill_info(skb, dev);
725 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
734 err = rtnl_link_info_fill(skb, dev);
738 err = rtnl_link_slave_info_fill(skb, dev);
859 static void set_operstate(struct net_device *dev, unsigned char transition)
861 unsigned char operstate = dev->operstate;
868 !netif_dormant(dev) && !netif_testing(dev))
873 if (netif_oper_up(dev))
878 if (netif_oper_up(dev))
883 if (dev->operstate != operstate) {
885 dev->operstate = operstate;
887 netdev_state_change(dev);
891 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
893 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
894 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
897 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
905 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
945 static inline int rtnl_vfinfo_size(const struct net_device *dev,
948 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
949 int num_vfs = dev_num_vf(dev->dev.parent);
990 static size_t rtnl_port_size(const struct net_device *dev,
1005 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1008 if (dev_num_vf(dev->dev.parent))
1010 vf_port_size * dev_num_vf(dev->dev.parent);
1025 static size_t rtnl_prop_list_size(const struct net_device *dev)
1030 if (list_empty(&dev->name_node->list))
1033 list_for_each_entry(name_node, &dev->name_node->list, list)
1038 static size_t rtnl_proto_down_size(const struct net_device *dev)
1042 if (dev->proto_down_reason)
1048 static size_t rtnl_devlink_port_size(const struct net_device *dev)
1052 if (dev->devlink_port)
1053 size += devlink_nl_port_handle_size(dev->devlink_port);
1058 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1094 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1095 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1096 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1097 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1105 + rtnl_proto_down_size(dev) /* proto down */
1111 + rtnl_prop_list_size(dev)
1113 + rtnl_devlink_port_size(dev)
1117 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1128 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1134 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1153 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1162 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1173 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1178 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1182 err = rtnl_port_self_fill(skb, dev);
1186 if (dev_num_vf(dev->dev.parent)) {
1187 err = rtnl_vf_ports_fill(skb, dev);
1195 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1200 err = dev_get_phys_port_id(dev, &ppid);
1213 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1218 err = dev_get_phys_port_name(dev, name, sizeof(name));
1231 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1236 err = dev_get_port_parent_id(dev, &ppid, false);
1250 struct net_device *dev)
1261 dev_get_stats(dev, sp);
1274 struct net_device *dev,
1310 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1330 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1364 if (dev->netdev_ops->ndo_get_vf_guid &&
1365 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1384 if (dev->netdev_ops->ndo_get_vf_stats)
1385 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1420 struct net_device *dev,
1426 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1429 num_vfs = dev_num_vf(dev->dev.parent);
1433 if (!dev->netdev_ops->ndo_get_vf_config)
1441 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1451 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1456 map.mem_start = dev->mem_start;
1457 map.mem_end = dev->mem_end;
1458 map.base_addr = dev->base_addr;
1459 map.irq = dev->irq;
1460 map.dma = dev->dma;
1461 map.port = dev->if_port;
1469 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1475 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1481 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1483 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1486 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1488 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1491 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1493 u32 (*get_prog_id)(struct net_device *dev))
1498 curr_id = get_prog_id(dev);
1515 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1528 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1532 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1536 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1589 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1596 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1604 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1607 int ifindex = dev_get_iflink(dev);
1609 if (force || dev->ifindex != ifindex)
1616 struct net_device *dev)
1621 ret = dev_get_alias(dev, buf, sizeof(buf));
1626 const struct net_device *dev,
1631 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1632 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1634 if (!net_eq(dev_net(dev), link_net)) {
1644 return nla_put_iflink(skb, dev, put_iflink);
1648 const struct net_device *dev,
1669 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1689 const struct net_device *dev)
1694 list_for_each_entry(name_node, &dev->name_node->list, list) {
1703 const struct net_device *dev)
1712 ret = rtnl_fill_alt_ifnames(skb, dev);
1725 const struct net_device *dev)
1730 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1733 preason = dev->proto_down_reason;
1754 const struct net_device *dev)
1763 if (dev->devlink_port) {
1764 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1778 struct net_device *dev, struct net *src_net,
1796 ifm->ifi_type = dev->type;
1797 ifm->ifi_index = dev->ifindex;
1798 ifm->ifi_flags = dev_get_flags(dev);
1804 qdisc = rtnl_dereference(dev->qdisc);
1805 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1806 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1808 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1809 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1810 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1811 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1812 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1813 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1814 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1815 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1816 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1817 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1818 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1819 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1820 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
1821 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
1822 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1823 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1825 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1827 put_master_ifindex(skb, dev) ||
1828 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1831 nla_put_ifalias(skb, dev) ||
1833 atomic_read(&dev->carrier_up_count) +
1834 atomic_read(&dev->carrier_down_count)) ||
1836 atomic_read(&dev->carrier_up_count)) ||
1838 atomic_read(&dev->carrier_down_count)))
1841 if (rtnl_fill_proto_down(skb, dev))
1849 if (rtnl_fill_link_ifmap(skb, dev))
1852 if (dev->addr_len) {
1853 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1854 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1858 if (rtnl_phys_port_id_fill(skb, dev))
1861 if (rtnl_phys_port_name_fill(skb, dev))
1864 if (rtnl_phys_switch_id_fill(skb, dev))
1867 if (rtnl_fill_stats(skb, dev))
1870 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1873 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1876 if (rtnl_xdp_fill(skb, dev))
1879 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1880 if (rtnl_link_fill(skb, dev) < 0)
1884 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1894 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1895 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1899 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1903 if (rtnl_fill_prop_list(skb, dev))
1906 if (dev->dev.parent &&
1908 dev_name(dev->dev.parent)))
1911 if (dev->dev.parent && dev->dev.parent->bus &&
1913 dev->dev.parent->bus->name))
1916 if (rtnl_fill_devlink_port(skb, dev))
2056 static bool link_master_filtered(struct net_device *dev, int master_idx)
2063 master = netdev_master_upper_dev_get(dev);
2077 static bool link_kind_filtered(const struct net_device *dev,
2080 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2086 static bool link_dump_filtered(struct net_device *dev,
2090 if (link_master_filtered(dev, master_idx) ||
2091 link_kind_filtered(dev, kind_ops))
2176 struct net_device *dev;
2235 hlist_for_each_entry(dev, head, index_hlist) {
2236 if (link_dump_filtered(dev, master_idx, kind_ops))
2240 err = rtnl_fill_ifinfo(skb, dev, net,
2383 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2386 const struct net_device_ops *ops = dev->netdev_ops;
2393 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2396 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2400 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2404 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2408 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2415 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2427 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2453 err = af_ops->validate_link_af(dev, af, extack);
2463 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2466 const struct net_device_ops *ops = dev->netdev_ops;
2468 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2471 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2473 if (dev->type != ARPHRD_INFINIBAND)
2476 return handle_infiniband_guid(dev, ivt, guid_type);
2479 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2481 const struct net_device_ops *ops = dev->netdev_ops;
2491 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2504 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2536 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2550 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2554 err = rtnl_set_vf_rate(dev, ivt->vf,
2566 err = rtnl_set_vf_rate(dev, ivt->vf,
2579 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2592 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2606 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2619 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2631 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2642 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2648 static int do_set_master(struct net_device *dev, int ifindex,
2651 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2660 err = ops->ndo_del_slave(upper_dev, dev);
2669 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2674 err = ops->ndo_add_slave(upper_dev, dev, extack);
2689 static int do_set_proto_down(struct net_device *dev,
2700 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2724 dev_change_proto_down_reason(dev, mask, value);
2731 if (!proto_down && dev->proto_down_reason) {
2735 err = dev_change_proto_down(dev,
2748 struct net_device *dev, struct ifinfomsg *ifm,
2752 const struct net_device_ops *ops = dev->netdev_ops;
2766 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2778 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2794 if (!netif_device_present(dev)) {
2807 err = ops->ndo_set_config(dev, &k_map);
2818 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2825 sa->sa_family = dev->type;
2827 dev->addr_len);
2828 err = dev_set_mac_address_user(dev, sa, extack);
2836 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2843 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2853 err = dev_change_name(dev, ifname);
2860 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2868 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2869 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2873 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2880 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2887 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2896 err = dev_change_tx_queue_len(dev, value);
2905 if (dev->gso_max_size ^ max_size) {
2906 netif_set_gso_max_size(dev, max_size);
2914 if (dev->gso_max_segs ^ max_segs) {
2915 netif_set_gso_max_segs(dev, max_segs);
2923 if (dev->gro_max_size ^ gro_max_size) {
2924 netif_set_gro_max_size(dev, gro_max_size);
2932 if (dev->gso_ipv4_max_size ^ max_size) {
2933 netif_set_gso_ipv4_max_size(dev, max_size);
2941 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2942 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2948 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2954 if (dev->link_mode ^ value)
2956 dev->link_mode = value;
2977 err = do_setvfinfo(dev, vfinfo);
3012 err = ops->ndo_set_vf_port(dev, vf, port);
3031 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3046 err = af_ops->set_link_af(dev, af, extack);
3056 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3102 err = dev_change_xdp_fd(dev, extack,
3115 netdev_state_change(dev);
3119 dev->name);
3145 struct net_device *dev;
3161 dev = __dev_get_by_index(net, ifm->ifi_index);
3163 dev = rtnl_dev_get(net, tb);
3167 if (dev == NULL) {
3172 err = validate_linkmsg(dev, tb, extack);
3176 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3183 struct net_device *dev, *aux;
3190 for_each_netdev(net, dev) {
3191 if (dev->group == group) {
3195 ops = dev->rtnl_link_ops;
3204 for_each_netdev_safe(net, dev, aux) {
3205 if (dev->group == group) {
3208 ops = dev->rtnl_link_ops;
3209 ops->dellink(dev, &list_kill);
3217 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3222 ops = dev->rtnl_link_ops;
3226 ops->dellink(dev, &list_kill);
3239 struct net_device *dev = NULL;
3264 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3266 dev = rtnl_dev_get(net, tb);
3272 if (!dev) {
3279 err = rtnl_delete_link(dev, portid, nlh);
3288 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3294 old_flags = dev->flags;
3296 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3302 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3303 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3305 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3306 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3318 struct net_device *dev;
3344 dev = ops->alloc(tb, ifname, name_assign_type,
3346 if (IS_ERR(dev))
3347 return dev;
3349 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3354 if (!dev)
3357 err = validate_linkmsg(dev, tb, extack);
3359 free_netdev(dev);
3363 dev_net_set(dev, net);
3364 dev->rtnl_link_ops = ops;
3365 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3370 err = dev_validate_mtu(dev, mtu, extack);
3372 free_netdev(dev);
3375 dev->mtu = mtu;
3378 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3380 dev->addr_assign_type = NET_ADDR_SET;
3383 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3386 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3388 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3390 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3392 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3394 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3396 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3398 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3400 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3402 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3404 return dev;
3414 struct net_device *dev, *aux;
3417 for_each_netdev_safe(net, dev, aux) {
3418 if (dev->group == group) {
3419 err = validate_linkmsg(dev, tb, extack);
3422 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3441 struct net_device *dev;
3475 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3477 if (IS_ERR(dev)) {
3478 err = PTR_ERR(dev);
3482 dev->ifindex = ifm->ifi_index;
3485 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3487 err = register_netdevice(dev);
3489 free_netdev(dev);
3493 err = rtnl_configure_link(dev, ifm, portid, nlh);
3497 err = dev_change_net_namespace(dev, dest_net, ifname);
3502 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3515 ops->dellink(dev, &list_kill);
3518 unregister_netdevice(dev);
3541 struct net_device *dev;
3562 dev = __dev_get_by_index(net, ifm->ifi_index);
3568 dev = rtnl_dev_get(net, tb);
3571 dev = NULL;
3576 if (dev) {
3577 master_dev = netdev_master_upper_dev_get(dev);
3637 if (dev) {
3645 err = validate_linkmsg(dev, tb, extack);
3650 if (!ops || ops != dev->rtnl_link_ops ||
3654 err = ops->changelink(dev, tb, data, extack);
3664 err = m_ops->slave_changelink(master_dev, dev, tb,
3671 return do_setlink(skb, dev, ifm, extack, tb, status);
3675 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3778 struct net_device *dev = NULL;
3805 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3807 dev = rtnl_dev_get(tgt_net, tb);
3812 if (dev == NULL)
3816 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3820 err = rtnl_fill_ifinfo(nskb, dev, net,
3837 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3849 size = rtnl_prop_list_size(dev);
3863 err = netdev_name_node_alt_create(dev, alt_ifname);
3867 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3884 struct net_device *dev;
3900 dev = __dev_get_by_index(net, ifm->ifi_index);
3902 dev = rtnl_dev_get(net, tb);
3906 if (!dev)
3915 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3923 netdev_state_change(dev);
3945 struct net_device *dev;
3964 for_each_netdev_rcu(net, dev) {
3966 if_nlmsg_size(dev, ext_filter_mask));
4020 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
4026 struct net *net = dev_net(dev);
4031 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
4040 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
4056 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4059 struct net *net = dev_net(dev);
4064 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4071 if (dev->reg_state != NETREG_REGISTERED)
4074 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4077 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4080 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4083 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4087 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4090 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4095 struct net_device *dev,
4113 ndm->ndm_ifindex = dev->ifindex;
4116 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4130 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4133 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
4138 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4141 struct net *net = dev_net(dev);
4145 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4149 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4167 struct net_device *dev,
4177 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4182 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4187 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4192 err = dev_uc_add_excl(dev, addr);
4194 err = dev_mc_add_excl(dev, addr);
4232 struct net_device *dev;
4248 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4249 if (dev == NULL) {
4259 if (dev->type != ARPHRD_ETHER) {
4274 netif_is_bridge_port(dev)) {
4275 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4278 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4288 if (dev->netdev_ops->ndo_fdb_add)
4289 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4294 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4298 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4312 struct net_device *dev,
4321 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4326 err = dev_uc_del(dev, addr);
4328 err = dev_mc_del(dev, addr);
4349 struct net_device *dev;
4373 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4374 if (dev == NULL) {
4387 if (dev->type != ARPHRD_ETHER) {
4400 netif_is_bridge_port(dev)) {
4401 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4406 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4409 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4421 ops = dev->netdev_ops;
4424 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4426 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4431 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4437 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4448 struct net_device *dev,
4463 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4479 * @dev: netdevice
4488 struct net_device *dev,
4494 if (dev->type != ARPHRD_ETHER)
4497 netif_addr_lock_bh(dev);
4498 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4501 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4503 netif_addr_unlock_bh(dev);
4599 struct net_device *dev;
4635 hlist_for_each_entry(dev, head, index_hlist) {
4637 if (brport_idx && (dev->ifindex != brport_idx))
4641 if (netif_is_bridge_port(dev)) {
4642 br_dev = netdev_master_upper_dev_get(dev);
4646 if (dev != br_dev &&
4647 !netif_is_bridge_port(dev))
4650 if (br_dev != netdev_master_upper_dev_get(dev) &&
4651 !netif_is_bridge_master(dev))
4659 if (netif_is_bridge_port(dev)) {
4662 br_dev, dev,
4669 if (dev->netdev_ops->ndo_fdb_dump)
4670 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4671 dev, NULL,
4674 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4763 struct net_device *dev = NULL, *br_dev = NULL;
4786 dev = __dev_get_by_index(net, brport_idx);
4787 if (!dev) {
4794 if (dev) {
4807 if (dev) {
4809 if (!netif_is_bridge_port(dev)) {
4813 br_dev = netdev_master_upper_dev_get(dev);
4824 ops = dev->netdev_ops;
4828 if (!br_dev && !dev) {
4843 dev = br_dev;
4844 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4865 struct net_device *dev, u16 mode,
4869 struct net_device *dev,
4876 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4877 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4887 ifm->ifi_type = dev->type;
4888 ifm->ifi_index = dev->ifindex;
4889 ifm->ifi_flags = dev_get_flags(dev);
4893 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4894 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4898 (dev->addr_len &&
4899 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4900 (dev->ifindex != dev_get_iflink(dev) &&
4901 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4920 err = vlan_fill(skb, dev, filter_mask);
5025 struct net_device *dev;
5038 for_each_netdev_rcu(net, dev) {
5039 const struct net_device_ops *ops = dev->netdev_ops;
5040 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5045 skb, portid, seq, dev,
5060 seq, dev,
5096 static int rtnl_bridge_notify(struct net_device *dev)
5098 struct net *net = dev_net(dev);
5102 if (!dev->netdev_ops->ndo_bridge_getlink)
5111 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5137 struct net_device *dev;
5149 dev = __dev_get_by_index(net, ifm->ifi_index);
5150 if (!dev) {
5174 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5181 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5190 if (!dev->netdev_ops->ndo_bridge_setlink)
5193 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5202 err = rtnl_bridge_notify(dev);
5217 struct net_device *dev;
5230 dev = __dev_get_by_index(net, ifm->ifi_index);
5231 if (!dev) {
5251 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5258 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5266 if (!dev->netdev_ops->ndo_bridge_dellink)
5269 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5278 err = rtnl_bridge_notify(dev);
5295 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5297 return dev->netdev_ops &&
5298 dev->netdev_ops->ndo_has_offload_stats &&
5299 dev->netdev_ops->ndo_get_offload_stats &&
5300 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5304 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5306 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5311 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5314 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5330 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5338 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5341 bool enabled = netdev_offload_xstats_enabled(dev, type);
5352 rtnl_offload_xstats_get_stats(struct net_device *dev,
5362 request = netdev_offload_xstats_enabled(dev, type);
5368 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5405 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5413 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5434 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5448 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5461 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5476 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5484 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5502 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5514 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5520 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5524 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5534 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5540 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5544 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5561 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5585 ifsm->ifindex = dev->ifindex;
5600 dev_get_stats(dev, sp);
5604 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5615 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5628 master = netdev_master_upper_dev_get(dev);
5640 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5661 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5695 err = af_ops->fill_stats_af(skb, dev);
5728 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5738 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5742 size += nla_total_size(ops->get_linkxstats_size(dev,
5750 struct net_device *_dev = (struct net_device *)dev;
5761 size += nla_total_size(ops->get_linkxstats_size(dev,
5772 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5785 af_ops->get_stats_af_size(dev));
5905 struct net_device *dev = NULL;
5918 dev = __dev_get_by_index(net, ifsm->ifindex);
5922 if (!dev)
5934 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5938 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5961 struct net_device *dev;
5989 hlist_for_each_entry(dev, head, index_hlist) {
5992 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6021 void rtnl_offload_xstats_notify(struct net_device *dev)
6024 struct net *net = dev_net(dev);
6036 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6041 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6063 struct net_device *dev = NULL;
6080 dev = __dev_get_by_index(net, ifsm->ifindex);
6084 if (!dev)
6101 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6103 err = netdev_offload_xstats_disable(dev, t_l3);
6117 rtnl_offload_xstats_notify(dev);
6153 struct net_device *dev;
6168 for_each_netdev(net, dev) {
6171 if (!dev->netdev_ops->ndo_mdb_dump)
6174 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6261 struct net_device *dev;
6275 dev = __dev_get_by_index(net, bpm->ifindex);
6276 if (!dev) {
6286 if (!dev->netdev_ops->ndo_mdb_add) {
6291 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6300 struct net_device *dev;
6314 dev = __dev_get_by_index(net, bpm->ifindex);
6315 if (!dev) {
6325 if (!dev->netdev_ops->ndo_mdb_del) {
6330 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6476 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6492 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),