Lines Matching defs:dev

414 	struct net_device *dev;
417 for_each_netdev(net, dev) {
418 if (dev->rtnl_link_ops == ops)
419 ops->dellink(dev, &list_kill);
489 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
497 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
506 ops->get_slave_size(master_dev, dev);
513 static size_t rtnl_link_get_size(const struct net_device *dev)
515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
527 ops->get_size(dev);
531 size += nla_total_size(ops->get_xstats_size(dev));
533 size += rtnl_link_get_slave_info_data_size(dev);
580 static size_t rtnl_link_get_af_size(const struct net_device *dev,
594 af_ops->get_link_af_size(dev, ext_filter_mask);
602 static bool rtnl_have_link_slave_info(const struct net_device *dev)
609 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
617 const struct net_device *dev)
624 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
636 err = ops->fill_slave_info(skb, master_dev, dev);
649 const struct net_device *dev)
651 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
660 err = ops->fill_xstats(skb, dev);
668 err = ops->fill_info(skb, dev);
680 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
689 err = rtnl_link_info_fill(skb, dev);
693 err = rtnl_link_slave_info_fill(skb, dev);
825 static void set_operstate(struct net_device *dev, unsigned char transition)
827 unsigned char operstate = dev->operstate;
834 !netif_dormant(dev) && !netif_testing(dev))
851 if (dev->operstate != operstate) {
853 dev->operstate = operstate;
855 netdev_state_change(dev);
859 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
861 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
862 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
865 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
873 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
913 static inline int rtnl_vfinfo_size(const struct net_device *dev,
916 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
917 int num_vfs = dev_num_vf(dev->dev.parent);
958 static size_t rtnl_port_size(const struct net_device *dev,
973 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
976 if (dev_num_vf(dev->dev.parent))
978 vf_port_size * dev_num_vf(dev->dev.parent);
993 static size_t rtnl_prop_list_size(const struct net_device *dev)
998 if (list_empty(&dev->name_node->list))
1001 list_for_each_entry(name_node, &dev->name_node->list, list)
1006 static size_t rtnl_proto_down_size(const struct net_device *dev)
1010 if (dev->proto_down_reason)
1016 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1046 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1047 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1048 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1049 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1057 + rtnl_proto_down_size(dev) /* proto down */
1063 + rtnl_prop_list_size(dev)
1068 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1079 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1085 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1104 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1113 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1124 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1129 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1133 err = rtnl_port_self_fill(skb, dev);
1137 if (dev_num_vf(dev->dev.parent)) {
1138 err = rtnl_vf_ports_fill(skb, dev);
1146 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1151 err = dev_get_phys_port_id(dev, &ppid);
1164 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1169 err = dev_get_phys_port_name(dev, name, sizeof(name));
1182 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1187 err = dev_get_port_parent_id(dev, &ppid, false);
1201 struct net_device *dev)
1212 dev_get_stats(dev, sp);
1225 struct net_device *dev,
1262 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1282 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1316 if (dev->netdev_ops->ndo_get_vf_guid &&
1317 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1336 if (dev->netdev_ops->ndo_get_vf_stats)
1337 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1374 struct net_device *dev,
1380 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1383 num_vfs = dev_num_vf(dev->dev.parent);
1387 if (!dev->netdev_ops->ndo_get_vf_config)
1395 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
1403 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1408 map.mem_start = dev->mem_start;
1409 map.mem_end = dev->mem_end;
1410 map.base_addr = dev->base_addr;
1411 map.irq = dev->irq;
1412 map.dma = dev->dma;
1413 map.port = dev->if_port;
1421 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1427 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1433 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1435 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1438 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1440 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1443 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1445 u32 (*get_prog_id)(struct net_device *dev))
1450 curr_id = get_prog_id(dev);
1467 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1480 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1484 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1488 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1541 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1548 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1556 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1559 int ifindex = dev_get_iflink(dev);
1561 if (force || dev->ifindex != ifindex)
1568 struct net_device *dev)
1573 ret = dev_get_alias(dev, buf, sizeof(buf));
1578 const struct net_device *dev,
1583 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1584 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1586 if (!net_eq(dev_net(dev), link_net)) {
1596 return nla_put_iflink(skb, dev, put_iflink);
1600 const struct net_device *dev,
1621 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1641 const struct net_device *dev)
1646 list_for_each_entry(name_node, &dev->name_node->list, list) {
1655 const struct net_device *dev)
1664 ret = rtnl_fill_alt_ifnames(skb, dev);
1677 const struct net_device *dev)
1682 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1685 preason = dev->proto_down_reason;
1706 struct net_device *dev, struct net *src_net,
1724 ifm->ifi_type = dev->type;
1725 ifm->ifi_index = dev->ifindex;
1726 ifm->ifi_flags = dev_get_flags(dev);
1732 qdisc = rtnl_dereference(dev->qdisc);
1733 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1734 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1736 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1737 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1738 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1739 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1740 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1741 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1742 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1743 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1744 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1745 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1747 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1749 put_master_ifindex(skb, dev) ||
1750 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1753 nla_put_ifalias(skb, dev) ||
1755 atomic_read(&dev->carrier_up_count) +
1756 atomic_read(&dev->carrier_down_count)) ||
1758 atomic_read(&dev->carrier_up_count)) ||
1760 atomic_read(&dev->carrier_down_count)))
1763 if (rtnl_fill_proto_down(skb, dev))
1771 if (rtnl_fill_link_ifmap(skb, dev))
1774 if (dev->addr_len) {
1775 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1776 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1780 if (rtnl_phys_port_id_fill(skb, dev))
1783 if (rtnl_phys_port_name_fill(skb, dev))
1786 if (rtnl_phys_switch_id_fill(skb, dev))
1789 if (rtnl_fill_stats(skb, dev))
1792 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1795 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1798 if (rtnl_xdp_fill(skb, dev))
1801 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1802 if (rtnl_link_fill(skb, dev) < 0)
1806 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1816 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1817 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1821 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1825 if (rtnl_fill_prop_list(skb, dev))
1957 static bool link_master_filtered(struct net_device *dev, int master_idx)
1964 master = netdev_master_upper_dev_get(dev);
1971 static bool link_kind_filtered(const struct net_device *dev,
1974 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1980 static bool link_dump_filtered(struct net_device *dev,
1984 if (link_master_filtered(dev, master_idx) ||
1985 link_kind_filtered(dev, kind_ops))
2070 struct net_device *dev;
2129 hlist_for_each_entry(dev, head, index_hlist) {
2130 if (link_dump_filtered(dev, master_idx, kind_ops))
2134 err = rtnl_fill_ifinfo(skb, dev, net,
2277 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2279 if (dev) {
2281 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2285 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2309 err = af_ops->validate_link_af(dev, af);
2323 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2326 const struct net_device_ops *ops = dev->netdev_ops;
2328 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2331 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2333 if (dev->type != ARPHRD_INFINIBAND)
2336 return handle_infiniband_guid(dev, ivt, guid_type);
2339 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2341 const struct net_device_ops *ops = dev->netdev_ops;
2351 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2364 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2396 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2410 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2416 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2430 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2444 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2457 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2471 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2484 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2496 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2507 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2513 static int do_set_master(struct net_device *dev, int ifindex,
2516 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2525 err = ops->ndo_del_slave(upper_dev, dev);
2534 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2539 err = ops->ndo_add_slave(upper_dev, dev, extack);
2554 static int do_set_proto_down(struct net_device *dev,
2560 const struct net_device_ops *ops = dev->netdev_ops;
2590 dev_change_proto_down_reason(dev, mask, value);
2597 if (!proto_down && dev->proto_down_reason) {
2601 err = dev_change_proto_down(dev,
2614 struct net_device *dev, struct ifinfomsg *ifm,
2618 const struct net_device_ops *ops = dev->netdev_ops;
2621 err = validate_linkmsg(dev, tb);
2627 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2634 err = dev_change_net_namespace(dev, net, pat);
2650 if (!netif_device_present(dev)) {
2663 err = ops->ndo_set_config(dev, &k_map);
2674 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2681 sa->sa_family = dev->type;
2683 dev->addr_len);
2684 err = dev_set_mac_address_user(dev, sa, extack);
2692 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2699 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2709 err = dev_change_name(dev, ifname);
2716 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2724 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2725 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2729 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2736 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2743 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2752 err = dev_change_tx_queue_len(dev, value);
2766 if (dev->gso_max_size ^ max_size) {
2767 netif_set_gso_max_size(dev, max_size);
2780 if (dev->gso_max_segs ^ max_segs) {
2781 dev->gso_max_segs = max_segs;
2787 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2793 if (dev->link_mode ^ value)
2795 dev->link_mode = value;
2816 err = do_setvfinfo(dev, vfinfo);
2851 err = ops->ndo_set_vf_port(dev, vf, port);
2870 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2887 err = af_ops->set_link_af(dev, af);
2900 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2946 err = dev_change_xdp_fd(dev, extack,
2959 netdev_state_change(dev);
2963 dev->name);
2994 struct net_device *dev;
3016 dev = __dev_get_by_index(net, ifm->ifi_index);
3018 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3022 if (dev == NULL) {
3027 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
3034 struct net_device *dev, *aux;
3041 for_each_netdev(net, dev) {
3042 if (dev->group == group) {
3046 ops = dev->rtnl_link_ops;
3055 for_each_netdev_safe(net, dev, aux) {
3056 if (dev->group == group) {
3059 ops = dev->rtnl_link_ops;
3060 ops->dellink(dev, &list_kill);
3068 int rtnl_delete_link(struct net_device *dev)
3073 ops = dev->rtnl_link_ops;
3077 ops->dellink(dev, &list_kill);
3089 struct net_device *dev = NULL;
3114 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3116 dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3123 if (!dev) {
3130 err = rtnl_delete_link(dev);
3139 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3144 old_flags = dev->flags;
3146 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3152 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3153 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3155 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3156 __dev_notify_flags(dev, old_flags, ~0U);
3168 struct net_device *dev;
3192 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
3194 if (!dev)
3197 dev_net_set(dev, net);
3198 dev->rtnl_link_ops = ops;
3199 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3205 err = dev_validate_mtu(dev, mtu, extack);
3207 free_netdev(dev);
3210 dev->mtu = mtu;
3213 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
3215 dev->addr_assign_type = NET_ADDR_SET;
3218 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3221 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3223 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3225 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3227 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3229 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3231 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
3233 return dev;
3243 struct net_device *dev, *aux;
3246 for_each_netdev_safe(net, dev, aux) {
3247 if (dev->group == group) {
3248 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
3271 struct net_device *dev;
3298 dev = __dev_get_by_index(net, ifm->ifi_index);
3304 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3307 dev = NULL;
3312 if (dev) {
3313 master_dev = netdev_master_upper_dev_get(dev);
3318 err = validate_linkmsg(dev, tb);
3377 if (dev) {
3386 if (!ops || ops != dev->rtnl_link_ops ||
3390 err = ops->changelink(dev, tb, data, extack);
3400 err = m_ops->slave_changelink(master_dev, dev, tb,
3407 return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
3411 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3469 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3471 if (IS_ERR(dev)) {
3472 err = PTR_ERR(dev);
3476 dev->ifindex = ifm->ifi_index;
3479 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3481 err = register_netdevice(dev);
3483 free_netdev(dev);
3487 err = rtnl_configure_link(dev, ifm);
3491 err = dev_change_net_namespace(dev, dest_net, ifname);
3496 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3509 ops->dellink(dev, &list_kill);
3512 unregister_netdevice(dev);
3587 struct net_device *dev = NULL;
3614 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3616 dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME],
3622 if (dev == NULL)
3626 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3630 err = rtnl_fill_ifinfo(nskb, dev, net,
3647 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3659 size = rtnl_prop_list_size(dev);
3673 err = netdev_name_node_alt_create(dev, alt_ifname);
3677 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3694 struct net_device *dev;
3710 dev = __dev_get_by_index(net, ifm->ifi_index);
3712 dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3717 if (!dev)
3726 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3734 netdev_state_change(dev);
3756 struct net_device *dev;
3775 for_each_netdev_rcu(net, dev) {
3777 if_nlmsg_size(dev, ext_filter_mask));
3831 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3836 struct net *net = dev_net(dev);
3841 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3845 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3861 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3863 struct net *net = dev_net(dev);
3868 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3874 if (dev->reg_state != NETREG_REGISTERED)
3877 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3880 rtmsg_ifinfo_send(skb, dev, flags);
3883 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3886 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3890 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3893 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3898 struct net_device *dev,
3916 ndm->ndm_ifindex = dev->ifindex;
3919 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
3933 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
3936 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
3941 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3944 struct net *net = dev_net(dev);
3948 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
3952 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3970 struct net_device *dev,
3980 pr_info("%s: FDB only supports static addresses\n", dev->name);
3985 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3990 err = dev_uc_add_excl(dev, addr);
3992 err = dev_mc_add_excl(dev, addr);
4030 struct net_device *dev;
4046 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4047 if (dev == NULL) {
4057 if (dev->type != ARPHRD_ETHER) {
4072 netif_is_bridge_port(dev)) {
4073 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4076 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4086 if (dev->netdev_ops->ndo_fdb_add)
4087 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4092 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4096 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4110 struct net_device *dev,
4119 pr_info("%s: FDB only supports static addresses\n", dev->name);
4124 err = dev_uc_del(dev, addr);
4126 err = dev_mc_del(dev, addr);
4138 struct net_device *dev;
4157 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4158 if (dev == NULL) {
4168 if (dev->type != ARPHRD_ETHER) {
4183 netif_is_bridge_port(dev)) {
4184 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4188 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
4198 if (dev->netdev_ops->ndo_fdb_del)
4199 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
4202 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4205 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4216 struct net_device *dev,
4231 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4247 * @dev: netdevice
4256 struct net_device *dev,
4262 if (dev->type != ARPHRD_ETHER)
4265 netif_addr_lock_bh(dev);
4266 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4269 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4271 netif_addr_unlock_bh(dev);
4367 struct net_device *dev;
4403 hlist_for_each_entry(dev, head, index_hlist) {
4405 if (brport_idx && (dev->ifindex != brport_idx))
4409 if (netif_is_bridge_port(dev)) {
4410 br_dev = netdev_master_upper_dev_get(dev);
4414 if (dev != br_dev &&
4415 !netif_is_bridge_port(dev))
4418 if (br_dev != netdev_master_upper_dev_get(dev) &&
4419 !(dev->priv_flags & IFF_EBRIDGE))
4427 if (netif_is_bridge_port(dev)) {
4430 br_dev, dev,
4437 if (dev->netdev_ops->ndo_fdb_dump)
4438 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4439 dev, NULL,
4442 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4531 struct net_device *dev = NULL, *br_dev = NULL;
4554 dev = __dev_get_by_index(net, brport_idx);
4555 if (!dev) {
4562 if (dev) {
4575 if (dev) {
4577 if (!netif_is_bridge_port(dev)) {
4581 br_dev = netdev_master_upper_dev_get(dev);
4592 ops = dev->netdev_ops;
4596 if (!br_dev && !dev) {
4611 dev = br_dev;
4612 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4633 struct net_device *dev, u16 mode,
4637 struct net_device *dev,
4644 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4645 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4655 ifm->ifi_type = dev->type;
4656 ifm->ifi_index = dev->ifindex;
4657 ifm->ifi_flags = dev_get_flags(dev);
4661 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4662 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4666 (dev->addr_len &&
4667 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4668 (dev->ifindex != dev_get_iflink(dev) &&
4669 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4688 err = vlan_fill(skb, dev, filter_mask);
4793 struct net_device *dev;
4806 for_each_netdev_rcu(net, dev) {
4807 const struct net_device_ops *ops = dev->netdev_ops;
4808 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4813 skb, portid, seq, dev,
4828 seq, dev,
4864 static int rtnl_bridge_notify(struct net_device *dev)
4866 struct net *net = dev_net(dev);
4870 if (!dev->netdev_ops->ndo_bridge_getlink)
4879 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4905 struct net_device *dev;
4917 dev = __dev_get_by_index(net, ifm->ifi_index);
4918 if (!dev) {
4942 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4949 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
4958 if (!dev->netdev_ops->ndo_bridge_setlink)
4961 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4970 err = rtnl_bridge_notify(dev);
4985 struct net_device *dev;
4998 dev = __dev_get_by_index(net, ifm->ifi_index);
4999 if (!dev) {
5019 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5026 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5034 if (!dev->netdev_ops->ndo_bridge_dellink)
5037 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5046 err = rtnl_bridge_notify(dev);
5073 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
5081 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5082 dev->netdev_ops->ndo_get_offload_stats))
5094 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5104 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
5123 static int rtnl_get_offload_stats_size(const struct net_device *dev)
5129 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5130 dev->netdev_ops->ndo_get_offload_stats))
5135 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5147 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5168 ifsm->ifindex = dev->ifindex;
5181 dev_get_stats(dev, sp);
5185 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5194 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5207 master = netdev_master_upper_dev_get(dev);
5217 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5233 err = rtnl_get_offload_stats(skb, dev, prividx);
5264 err = af_ops->fill_stats_af(skb, dev);
5297 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5306 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5310 size += nla_total_size(ops->get_linkxstats_size(dev,
5318 struct net_device *_dev = (struct net_device *)dev;
5329 size += nla_total_size(ops->get_linkxstats_size(dev,
5337 size += rtnl_get_offload_stats_size(dev);
5349 af_ops->get_stats_af_size(dev));
5399 struct net_device *dev = NULL;
5413 dev = __dev_get_by_index(net, ifsm->ifindex);
5417 if (!dev)
5424 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
5428 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5450 struct net_device *dev;
5475 hlist_for_each_entry(dev, head, index_hlist) {
5478 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5642 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5658 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),