xref: /kernel/linux/linux-5.10/net/core/rtnetlink.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
4 *		operating system.  INET is implemented using the  BSD Socket
5 *		interface as the means of communication with the user level.
6 *
7 *		Routing netlink socket interface: protocol independent part.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 *	Fixes:
12 *	Vitaly E. Lavrov		RTA_OK arithmetics was wrong.
13 */
14
15#include <linux/bitops.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
33#include <linux/mutex.h>
34#include <linux/if_addr.h>
35#include <linux/if_bridge.h>
36#include <linux/if_vlan.h>
37#include <linux/pci.h>
38#include <linux/etherdevice.h>
39#include <linux/bpf.h>
40
41#include <linux/uaccess.h>
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
50#include <net/tcp.h>
51#include <net/sock.h>
52#include <net/pkt_sched.h>
53#include <net/fib_rules.h>
54#include <net/rtnetlink.h>
55#include <net/net_namespace.h>
56
57#define RTNL_MAX_TYPE		50
58#define RTNL_SLAVE_MAX_TYPE	36
59
60struct rtnl_link {
61	rtnl_doit_func		doit;
62	rtnl_dumpit_func	dumpit;
63	struct module		*owner;
64	unsigned int		flags;
65	struct rcu_head		rcu;
66};
67
68static DEFINE_MUTEX(rtnl_mutex);
69
70void rtnl_lock(void)
71{
72	mutex_lock(&rtnl_mutex);
73}
74EXPORT_SYMBOL(rtnl_lock);
75
76int rtnl_lock_killable(void)
77{
78	return mutex_lock_killable(&rtnl_mutex);
79}
80EXPORT_SYMBOL(rtnl_lock_killable);
81
82static struct sk_buff *defer_kfree_skb_list;
83void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
84{
85	if (head && tail) {
86		tail->next = defer_kfree_skb_list;
87		defer_kfree_skb_list = head;
88	}
89}
90EXPORT_SYMBOL(rtnl_kfree_skbs);
91
92void __rtnl_unlock(void)
93{
94	struct sk_buff *head = defer_kfree_skb_list;
95
96	defer_kfree_skb_list = NULL;
97
98	mutex_unlock(&rtnl_mutex);
99
100	while (head) {
101		struct sk_buff *next = head->next;
102
103		kfree_skb(head);
104		cond_resched();
105		head = next;
106	}
107}
108
109void rtnl_unlock(void)
110{
111	/* This fellow will unlock it for us. */
112	netdev_run_todo();
113}
114EXPORT_SYMBOL(rtnl_unlock);
115
116int rtnl_trylock(void)
117{
118	return mutex_trylock(&rtnl_mutex);
119}
120EXPORT_SYMBOL(rtnl_trylock);
121
122int rtnl_is_locked(void)
123{
124	return mutex_is_locked(&rtnl_mutex);
125}
126EXPORT_SYMBOL(rtnl_is_locked);
127
128bool refcount_dec_and_rtnl_lock(refcount_t *r)
129{
130	return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
131}
132EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
133
134#ifdef CONFIG_PROVE_LOCKING
135bool lockdep_rtnl_is_held(void)
136{
137	return lockdep_is_held(&rtnl_mutex);
138}
139EXPORT_SYMBOL(lockdep_rtnl_is_held);
140#endif /* #ifdef CONFIG_PROVE_LOCKING */
141
142static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
143
144static inline int rtm_msgindex(int msgtype)
145{
146	int msgindex = msgtype - RTM_BASE;
147
148	/*
149	 * msgindex < 0 implies someone tried to register a netlink
150	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
151	 * the message type has not been added to linux/rtnetlink.h
152	 */
153	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
154
155	return msgindex;
156}
157
158static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
159{
160	struct rtnl_link **tab;
161
162	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
163		protocol = PF_UNSPEC;
164
165	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
166	if (!tab)
167		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
168
169	return tab[msgtype];
170}
171
172static int rtnl_register_internal(struct module *owner,
173				  int protocol, int msgtype,
174				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
175				  unsigned int flags)
176{
177	struct rtnl_link *link, *old;
178	struct rtnl_link __rcu **tab;
179	int msgindex;
180	int ret = -ENOBUFS;
181
182	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
183	msgindex = rtm_msgindex(msgtype);
184
185	rtnl_lock();
186	tab = rtnl_msg_handlers[protocol];
187	if (tab == NULL) {
188		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
189		if (!tab)
190			goto unlock;
191
192		/* ensures we see the 0 stores */
193		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
194	}
195
196	old = rtnl_dereference(tab[msgindex]);
197	if (old) {
198		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
199		if (!link)
200			goto unlock;
201	} else {
202		link = kzalloc(sizeof(*link), GFP_KERNEL);
203		if (!link)
204			goto unlock;
205	}
206
207	WARN_ON(link->owner && link->owner != owner);
208	link->owner = owner;
209
210	WARN_ON(doit && link->doit && link->doit != doit);
211	if (doit)
212		link->doit = doit;
213	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
214	if (dumpit)
215		link->dumpit = dumpit;
216
217	link->flags |= flags;
218
219	/* publish protocol:msgtype */
220	rcu_assign_pointer(tab[msgindex], link);
221	ret = 0;
222	if (old)
223		kfree_rcu(old, rcu);
224unlock:
225	rtnl_unlock();
226	return ret;
227}
228
229/**
230 * rtnl_register_module - Register a rtnetlink message type
231 *
232 * @owner: module registering the hook (THIS_MODULE)
233 * @protocol: Protocol family or PF_UNSPEC
234 * @msgtype: rtnetlink message type
235 * @doit: Function pointer called for each request message
236 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
237 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
238 *
239 * Like rtnl_register, but for use by removable modules.
240 */
241int rtnl_register_module(struct module *owner,
242			 int protocol, int msgtype,
243			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
244			 unsigned int flags)
245{
246	return rtnl_register_internal(owner, protocol, msgtype,
247				      doit, dumpit, flags);
248}
249EXPORT_SYMBOL_GPL(rtnl_register_module);
250
251/**
252 * rtnl_register - Register a rtnetlink message type
253 * @protocol: Protocol family or PF_UNSPEC
254 * @msgtype: rtnetlink message type
255 * @doit: Function pointer called for each request message
256 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
257 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
258 *
259 * Registers the specified function pointers (at least one of them has
260 * to be non-NULL) to be called whenever a request message for the
261 * specified protocol family and message type is received.
262 *
263 * The special protocol family PF_UNSPEC may be used to define fallback
264 * function pointers for the case when no entry for the specific protocol
265 * family exists.
266 */
267void rtnl_register(int protocol, int msgtype,
268		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
269		   unsigned int flags)
270{
271	int err;
272
273	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
274				     flags);
275	if (err)
276		pr_err("Unable to register rtnetlink message handler, "
277		       "protocol = %d, message type = %d\n", protocol, msgtype);
278}
279
280/**
281 * rtnl_unregister - Unregister a rtnetlink message type
282 * @protocol: Protocol family or PF_UNSPEC
283 * @msgtype: rtnetlink message type
284 *
285 * Returns 0 on success or a negative error code.
286 */
287int rtnl_unregister(int protocol, int msgtype)
288{
289	struct rtnl_link **tab, *link;
290	int msgindex;
291
292	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
293	msgindex = rtm_msgindex(msgtype);
294
295	rtnl_lock();
296	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
297	if (!tab) {
298		rtnl_unlock();
299		return -ENOENT;
300	}
301
302	link = tab[msgindex];
303	rcu_assign_pointer(tab[msgindex], NULL);
304	rtnl_unlock();
305
306	kfree_rcu(link, rcu);
307
308	return 0;
309}
310EXPORT_SYMBOL_GPL(rtnl_unregister);
311
312/**
313 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
314 * @protocol : Protocol family or PF_UNSPEC
315 *
316 * Identical to calling rtnl_unregster() for all registered message types
317 * of a certain protocol family.
318 */
319void rtnl_unregister_all(int protocol)
320{
321	struct rtnl_link **tab, *link;
322	int msgindex;
323
324	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
325
326	rtnl_lock();
327	tab = rtnl_msg_handlers[protocol];
328	if (!tab) {
329		rtnl_unlock();
330		return;
331	}
332	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
333	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
334		link = tab[msgindex];
335		if (!link)
336			continue;
337
338		rcu_assign_pointer(tab[msgindex], NULL);
339		kfree_rcu(link, rcu);
340	}
341	rtnl_unlock();
342
343	synchronize_net();
344
345	kfree(tab);
346}
347EXPORT_SYMBOL_GPL(rtnl_unregister_all);
348
349static LIST_HEAD(link_ops);
350
351static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
352{
353	const struct rtnl_link_ops *ops;
354
355	list_for_each_entry(ops, &link_ops, list) {
356		if (!strcmp(ops->kind, kind))
357			return ops;
358	}
359	return NULL;
360}
361
362/**
363 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
364 * @ops: struct rtnl_link_ops * to register
365 *
366 * The caller must hold the rtnl_mutex. This function should be used
367 * by drivers that create devices during module initialization. It
368 * must be called before registering the devices.
369 *
370 * Returns 0 on success or a negative error code.
371 */
372int __rtnl_link_register(struct rtnl_link_ops *ops)
373{
374	if (rtnl_link_ops_get(ops->kind))
375		return -EEXIST;
376
377	/* The check for setup is here because if ops
378	 * does not have that filled up, it is not possible
379	 * to use the ops for creating device. So do not
380	 * fill up dellink as well. That disables rtnl_dellink.
381	 */
382	if (ops->setup && !ops->dellink)
383		ops->dellink = unregister_netdevice_queue;
384
385	list_add_tail(&ops->list, &link_ops);
386	return 0;
387}
388EXPORT_SYMBOL_GPL(__rtnl_link_register);
389
390/**
391 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
392 * @ops: struct rtnl_link_ops * to register
393 *
394 * Returns 0 on success or a negative error code.
395 */
396int rtnl_link_register(struct rtnl_link_ops *ops)
397{
398	int err;
399
400	/* Sanity-check max sizes to avoid stack buffer overflow. */
401	if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
402		    ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
403		return -EINVAL;
404
405	rtnl_lock();
406	err = __rtnl_link_register(ops);
407	rtnl_unlock();
408	return err;
409}
410EXPORT_SYMBOL_GPL(rtnl_link_register);
411
412static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
413{
414	struct net_device *dev;
415	LIST_HEAD(list_kill);
416
417	for_each_netdev(net, dev) {
418		if (dev->rtnl_link_ops == ops)
419			ops->dellink(dev, &list_kill);
420	}
421	unregister_netdevice_many(&list_kill);
422}
423
424/**
425 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
426 * @ops: struct rtnl_link_ops * to unregister
427 *
428 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
429 * integrity (hold pernet_ops_rwsem for writing to close the race
430 * with setup_net() and cleanup_net()).
431 */
432void __rtnl_link_unregister(struct rtnl_link_ops *ops)
433{
434	struct net *net;
435
436	for_each_net(net) {
437		__rtnl_kill_links(net, ops);
438	}
439	list_del(&ops->list);
440}
441EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
442
443/* Return with the rtnl_lock held when there are no network
444 * devices unregistering in any network namespace.
445 */
446static void rtnl_lock_unregistering_all(void)
447{
448	struct net *net;
449	bool unregistering;
450	DEFINE_WAIT_FUNC(wait, woken_wake_function);
451
452	add_wait_queue(&netdev_unregistering_wq, &wait);
453	for (;;) {
454		unregistering = false;
455		rtnl_lock();
456		/* We held write locked pernet_ops_rwsem, and parallel
457		 * setup_net() and cleanup_net() are not possible.
458		 */
459		for_each_net(net) {
460			if (net->dev_unreg_count > 0) {
461				unregistering = true;
462				break;
463			}
464		}
465		if (!unregistering)
466			break;
467		__rtnl_unlock();
468
469		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
470	}
471	remove_wait_queue(&netdev_unregistering_wq, &wait);
472}
473
474/**
475 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
476 * @ops: struct rtnl_link_ops * to unregister
477 */
478void rtnl_link_unregister(struct rtnl_link_ops *ops)
479{
480	/* Close the race with setup_net() and cleanup_net() */
481	down_write(&pernet_ops_rwsem);
482	rtnl_lock_unregistering_all();
483	__rtnl_link_unregister(ops);
484	rtnl_unlock();
485	up_write(&pernet_ops_rwsem);
486}
487EXPORT_SYMBOL_GPL(rtnl_link_unregister);
488
489static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
490{
491	struct net_device *master_dev;
492	const struct rtnl_link_ops *ops;
493	size_t size = 0;
494
495	rcu_read_lock();
496
497	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
498	if (!master_dev)
499		goto out;
500
501	ops = master_dev->rtnl_link_ops;
502	if (!ops || !ops->get_slave_size)
503		goto out;
504	/* IFLA_INFO_SLAVE_DATA + nested data */
505	size = nla_total_size(sizeof(struct nlattr)) +
506	       ops->get_slave_size(master_dev, dev);
507
508out:
509	rcu_read_unlock();
510	return size;
511}
512
513static size_t rtnl_link_get_size(const struct net_device *dev)
514{
515	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
516	size_t size;
517
518	if (!ops)
519		return 0;
520
521	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
522	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
523
524	if (ops->get_size)
525		/* IFLA_INFO_DATA + nested data */
526		size += nla_total_size(sizeof(struct nlattr)) +
527			ops->get_size(dev);
528
529	if (ops->get_xstats_size)
530		/* IFLA_INFO_XSTATS */
531		size += nla_total_size(ops->get_xstats_size(dev));
532
533	size += rtnl_link_get_slave_info_data_size(dev);
534
535	return size;
536}
537
538static LIST_HEAD(rtnl_af_ops);
539
540static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
541{
542	const struct rtnl_af_ops *ops;
543
544	list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
545		if (ops->family == family)
546			return ops;
547	}
548
549	return NULL;
550}
551
552/**
553 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
554 * @ops: struct rtnl_af_ops * to register
555 *
556 * Returns 0 on success or a negative error code.
557 */
558void rtnl_af_register(struct rtnl_af_ops *ops)
559{
560	rtnl_lock();
561	list_add_tail_rcu(&ops->list, &rtnl_af_ops);
562	rtnl_unlock();
563}
564EXPORT_SYMBOL_GPL(rtnl_af_register);
565
566/**
567 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
568 * @ops: struct rtnl_af_ops * to unregister
569 */
570void rtnl_af_unregister(struct rtnl_af_ops *ops)
571{
572	rtnl_lock();
573	list_del_rcu(&ops->list);
574	rtnl_unlock();
575
576	synchronize_rcu();
577}
578EXPORT_SYMBOL_GPL(rtnl_af_unregister);
579
580static size_t rtnl_link_get_af_size(const struct net_device *dev,
581				    u32 ext_filter_mask)
582{
583	struct rtnl_af_ops *af_ops;
584	size_t size;
585
586	/* IFLA_AF_SPEC */
587	size = nla_total_size(sizeof(struct nlattr));
588
589	rcu_read_lock();
590	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
591		if (af_ops->get_link_af_size) {
592			/* AF_* + nested data */
593			size += nla_total_size(sizeof(struct nlattr)) +
594				af_ops->get_link_af_size(dev, ext_filter_mask);
595		}
596	}
597	rcu_read_unlock();
598
599	return size;
600}
601
602static bool rtnl_have_link_slave_info(const struct net_device *dev)
603{
604	struct net_device *master_dev;
605	bool ret = false;
606
607	rcu_read_lock();
608
609	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
610	if (master_dev && master_dev->rtnl_link_ops)
611		ret = true;
612	rcu_read_unlock();
613	return ret;
614}
615
616static int rtnl_link_slave_info_fill(struct sk_buff *skb,
617				     const struct net_device *dev)
618{
619	struct net_device *master_dev;
620	const struct rtnl_link_ops *ops;
621	struct nlattr *slave_data;
622	int err;
623
624	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
625	if (!master_dev)
626		return 0;
627	ops = master_dev->rtnl_link_ops;
628	if (!ops)
629		return 0;
630	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
631		return -EMSGSIZE;
632	if (ops->fill_slave_info) {
633		slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
634		if (!slave_data)
635			return -EMSGSIZE;
636		err = ops->fill_slave_info(skb, master_dev, dev);
637		if (err < 0)
638			goto err_cancel_slave_data;
639		nla_nest_end(skb, slave_data);
640	}
641	return 0;
642
643err_cancel_slave_data:
644	nla_nest_cancel(skb, slave_data);
645	return err;
646}
647
648static int rtnl_link_info_fill(struct sk_buff *skb,
649			       const struct net_device *dev)
650{
651	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
652	struct nlattr *data;
653	int err;
654
655	if (!ops)
656		return 0;
657	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
658		return -EMSGSIZE;
659	if (ops->fill_xstats) {
660		err = ops->fill_xstats(skb, dev);
661		if (err < 0)
662			return err;
663	}
664	if (ops->fill_info) {
665		data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
666		if (data == NULL)
667			return -EMSGSIZE;
668		err = ops->fill_info(skb, dev);
669		if (err < 0)
670			goto err_cancel_data;
671		nla_nest_end(skb, data);
672	}
673	return 0;
674
675err_cancel_data:
676	nla_nest_cancel(skb, data);
677	return err;
678}
679
680static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
681{
682	struct nlattr *linkinfo;
683	int err = -EMSGSIZE;
684
685	linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
686	if (linkinfo == NULL)
687		goto out;
688
689	err = rtnl_link_info_fill(skb, dev);
690	if (err < 0)
691		goto err_cancel_link;
692
693	err = rtnl_link_slave_info_fill(skb, dev);
694	if (err < 0)
695		goto err_cancel_link;
696
697	nla_nest_end(skb, linkinfo);
698	return 0;
699
700err_cancel_link:
701	nla_nest_cancel(skb, linkinfo);
702out:
703	return err;
704}
705
706int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
707{
708	struct sock *rtnl = net->rtnl;
709	int err = 0;
710
711	NETLINK_CB(skb).dst_group = group;
712	if (echo)
713		refcount_inc(&skb->users);
714	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
715	if (echo)
716		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
717	return err;
718}
719
720int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
721{
722	struct sock *rtnl = net->rtnl;
723
724	return nlmsg_unicast(rtnl, skb, pid);
725}
726EXPORT_SYMBOL(rtnl_unicast);
727
728void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
729		 struct nlmsghdr *nlh, gfp_t flags)
730{
731	struct sock *rtnl = net->rtnl;
732	int report = 0;
733
734	if (nlh)
735		report = nlmsg_report(nlh);
736
737	nlmsg_notify(rtnl, skb, pid, group, report, flags);
738}
739EXPORT_SYMBOL(rtnl_notify);
740
741void rtnl_set_sk_err(struct net *net, u32 group, int error)
742{
743	struct sock *rtnl = net->rtnl;
744
745	netlink_set_err(rtnl, 0, group, error);
746}
747EXPORT_SYMBOL(rtnl_set_sk_err);
748
749int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
750{
751	struct nlattr *mx;
752	int i, valid = 0;
753
754	/* nothing is dumped for dst_default_metrics, so just skip the loop */
755	if (metrics == dst_default_metrics.metrics)
756		return 0;
757
758	mx = nla_nest_start_noflag(skb, RTA_METRICS);
759	if (mx == NULL)
760		return -ENOBUFS;
761
762	for (i = 0; i < RTAX_MAX; i++) {
763		if (metrics[i]) {
764			if (i == RTAX_CC_ALGO - 1) {
765				char tmp[TCP_CA_NAME_MAX], *name;
766
767				name = tcp_ca_get_name_by_key(metrics[i], tmp);
768				if (!name)
769					continue;
770				if (nla_put_string(skb, i + 1, name))
771					goto nla_put_failure;
772			} else if (i == RTAX_FEATURES - 1) {
773				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
774
775				if (!user_features)
776					continue;
777				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
778				if (nla_put_u32(skb, i + 1, user_features))
779					goto nla_put_failure;
780			} else {
781				if (nla_put_u32(skb, i + 1, metrics[i]))
782					goto nla_put_failure;
783			}
784			valid++;
785		}
786	}
787
788	if (!valid) {
789		nla_nest_cancel(skb, mx);
790		return 0;
791	}
792
793	return nla_nest_end(skb, mx);
794
795nla_put_failure:
796	nla_nest_cancel(skb, mx);
797	return -EMSGSIZE;
798}
799EXPORT_SYMBOL(rtnetlink_put_metrics);
800
801int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
802		       long expires, u32 error)
803{
804	struct rta_cacheinfo ci = {
805		.rta_error = error,
806		.rta_id =  id,
807	};
808
809	if (dst) {
810		ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
811		ci.rta_used = dst->__use;
812		ci.rta_clntref = atomic_read(&dst->__refcnt);
813	}
814	if (expires) {
815		unsigned long clock;
816
817		clock = jiffies_to_clock_t(abs(expires));
818		clock = min_t(unsigned long, clock, INT_MAX);
819		ci.rta_expires = (expires > 0) ? clock : -clock;
820	}
821	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
822}
823EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
824
825static void set_operstate(struct net_device *dev, unsigned char transition)
826{
827	unsigned char operstate = dev->operstate;
828
829	switch (transition) {
830	case IF_OPER_UP:
831		if ((operstate == IF_OPER_DORMANT ||
832		     operstate == IF_OPER_TESTING ||
833		     operstate == IF_OPER_UNKNOWN) &&
834		    !netif_dormant(dev) && !netif_testing(dev))
835			operstate = IF_OPER_UP;
836		break;
837
838	case IF_OPER_TESTING:
839		if (operstate == IF_OPER_UP ||
840		    operstate == IF_OPER_UNKNOWN)
841			operstate = IF_OPER_TESTING;
842		break;
843
844	case IF_OPER_DORMANT:
845		if (operstate == IF_OPER_UP ||
846		    operstate == IF_OPER_UNKNOWN)
847			operstate = IF_OPER_DORMANT;
848		break;
849	}
850
851	if (dev->operstate != operstate) {
852		write_lock_bh(&dev_base_lock);
853		dev->operstate = operstate;
854		write_unlock_bh(&dev_base_lock);
855		netdev_state_change(dev);
856	}
857}
858
859static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
860{
861	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
862	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
863}
864
865static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
866					   const struct ifinfomsg *ifm)
867{
868	unsigned int flags = ifm->ifi_flags;
869
870	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
871	if (ifm->ifi_change)
872		flags = (flags & ifm->ifi_change) |
873			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
874
875	return flags;
876}
877
878static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
879				 const struct rtnl_link_stats64 *b)
880{
881	a->rx_packets = b->rx_packets;
882	a->tx_packets = b->tx_packets;
883	a->rx_bytes = b->rx_bytes;
884	a->tx_bytes = b->tx_bytes;
885	a->rx_errors = b->rx_errors;
886	a->tx_errors = b->tx_errors;
887	a->rx_dropped = b->rx_dropped;
888	a->tx_dropped = b->tx_dropped;
889
890	a->multicast = b->multicast;
891	a->collisions = b->collisions;
892
893	a->rx_length_errors = b->rx_length_errors;
894	a->rx_over_errors = b->rx_over_errors;
895	a->rx_crc_errors = b->rx_crc_errors;
896	a->rx_frame_errors = b->rx_frame_errors;
897	a->rx_fifo_errors = b->rx_fifo_errors;
898	a->rx_missed_errors = b->rx_missed_errors;
899
900	a->tx_aborted_errors = b->tx_aborted_errors;
901	a->tx_carrier_errors = b->tx_carrier_errors;
902	a->tx_fifo_errors = b->tx_fifo_errors;
903	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
904	a->tx_window_errors = b->tx_window_errors;
905
906	a->rx_compressed = b->rx_compressed;
907	a->tx_compressed = b->tx_compressed;
908
909	a->rx_nohandler = b->rx_nohandler;
910}
911
912/* All VF info */
913static inline int rtnl_vfinfo_size(const struct net_device *dev,
914				   u32 ext_filter_mask)
915{
916	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
917		int num_vfs = dev_num_vf(dev->dev.parent);
918		size_t size = nla_total_size(0);
919		size += num_vfs *
920			(nla_total_size(0) +
921			 nla_total_size(sizeof(struct ifla_vf_mac)) +
922			 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
923			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
924			 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
925			 nla_total_size(MAX_VLAN_LIST_LEN *
926					sizeof(struct ifla_vf_vlan_info)) +
927			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
928			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
929			 nla_total_size(sizeof(struct ifla_vf_rate)) +
930			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
931			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
932			 nla_total_size(sizeof(struct ifla_vf_trust)));
933		if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
934			size += num_vfs *
935				(nla_total_size(0) + /* nest IFLA_VF_STATS */
936				 /* IFLA_VF_STATS_RX_PACKETS */
937				 nla_total_size_64bit(sizeof(__u64)) +
938				 /* IFLA_VF_STATS_TX_PACKETS */
939				 nla_total_size_64bit(sizeof(__u64)) +
940				 /* IFLA_VF_STATS_RX_BYTES */
941				 nla_total_size_64bit(sizeof(__u64)) +
942				 /* IFLA_VF_STATS_TX_BYTES */
943				 nla_total_size_64bit(sizeof(__u64)) +
944				 /* IFLA_VF_STATS_BROADCAST */
945				 nla_total_size_64bit(sizeof(__u64)) +
946				 /* IFLA_VF_STATS_MULTICAST */
947				 nla_total_size_64bit(sizeof(__u64)) +
948				 /* IFLA_VF_STATS_RX_DROPPED */
949				 nla_total_size_64bit(sizeof(__u64)) +
950				 /* IFLA_VF_STATS_TX_DROPPED */
951				 nla_total_size_64bit(sizeof(__u64)));
952		}
953		return size;
954	} else
955		return 0;
956}
957
958static size_t rtnl_port_size(const struct net_device *dev,
959			     u32 ext_filter_mask)
960{
961	size_t port_size = nla_total_size(4)		/* PORT_VF */
962		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
963		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
964		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
965		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
966		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
967	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
968	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
969		+ port_size;
970	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
971		+ port_size;
972
973	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
974	    !(ext_filter_mask & RTEXT_FILTER_VF))
975		return 0;
976	if (dev_num_vf(dev->dev.parent))
977		return port_self_size + vf_ports_size +
978			vf_port_size * dev_num_vf(dev->dev.parent);
979	else
980		return port_self_size;
981}
982
983static size_t rtnl_xdp_size(void)
984{
985	size_t xdp_size = nla_total_size(0) +	/* nest IFLA_XDP */
986			  nla_total_size(1) +	/* XDP_ATTACHED */
987			  nla_total_size(4) +	/* XDP_PROG_ID (or 1st mode) */
988			  nla_total_size(4);	/* XDP_<mode>_PROG_ID */
989
990	return xdp_size;
991}
992
993static size_t rtnl_prop_list_size(const struct net_device *dev)
994{
995	struct netdev_name_node *name_node;
996	size_t size;
997
998	if (list_empty(&dev->name_node->list))
999		return 0;
1000	size = nla_total_size(0);
1001	list_for_each_entry(name_node, &dev->name_node->list, list)
1002		size += nla_total_size(ALTIFNAMSIZ);
1003	return size;
1004}
1005
1006static size_t rtnl_proto_down_size(const struct net_device *dev)
1007{
1008	size_t size = nla_total_size(1);
1009
1010	if (dev->proto_down_reason)
1011		size += nla_total_size(0) + nla_total_size(4);
1012
1013	return size;
1014}
1015
1016static noinline size_t if_nlmsg_size(const struct net_device *dev,
1017				     u32 ext_filter_mask)
1018{
1019	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1020	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1021	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1022	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1023	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1024	       + nla_total_size(sizeof(struct rtnl_link_stats))
1025	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1026	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1027	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1028	       + nla_total_size(4) /* IFLA_TXQLEN */
1029	       + nla_total_size(4) /* IFLA_WEIGHT */
1030	       + nla_total_size(4) /* IFLA_MTU */
1031	       + nla_total_size(4) /* IFLA_LINK */
1032	       + nla_total_size(4) /* IFLA_MASTER */
1033	       + nla_total_size(1) /* IFLA_CARRIER */
1034	       + nla_total_size(4) /* IFLA_PROMISCUITY */
1035	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1036	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1037	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1038	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1039	       + nla_total_size(1) /* IFLA_OPERSTATE */
1040	       + nla_total_size(1) /* IFLA_LINKMODE */
1041	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1042	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
1043	       + nla_total_size(4) /* IFLA_GROUP */
1044	       + nla_total_size(ext_filter_mask
1045			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1046	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1047	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1048	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1049	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1050	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1051	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1052	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1053	       + rtnl_xdp_size() /* IFLA_XDP */
1054	       + nla_total_size(4)  /* IFLA_EVENT */
1055	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
1056	       + nla_total_size(4)  /* IFLA_NEW_IFINDEX */
1057	       + rtnl_proto_down_size(dev)  /* proto down */
1058	       + nla_total_size(4)  /* IFLA_TARGET_NETNSID */
1059	       + nla_total_size(4)  /* IFLA_CARRIER_UP_COUNT */
1060	       + nla_total_size(4)  /* IFLA_CARRIER_DOWN_COUNT */
1061	       + nla_total_size(4)  /* IFLA_MIN_MTU */
1062	       + nla_total_size(4)  /* IFLA_MAX_MTU */
1063	       + rtnl_prop_list_size(dev)
1064	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1065	       + 0;
1066}
1067
1068static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1069{
1070	struct nlattr *vf_ports;
1071	struct nlattr *vf_port;
1072	int vf;
1073	int err;
1074
1075	vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1076	if (!vf_ports)
1077		return -EMSGSIZE;
1078
1079	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1080		vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1081		if (!vf_port)
1082			goto nla_put_failure;
1083		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1084			goto nla_put_failure;
1085		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1086		if (err == -EMSGSIZE)
1087			goto nla_put_failure;
1088		if (err) {
1089			nla_nest_cancel(skb, vf_port);
1090			continue;
1091		}
1092		nla_nest_end(skb, vf_port);
1093	}
1094
1095	nla_nest_end(skb, vf_ports);
1096
1097	return 0;
1098
1099nla_put_failure:
1100	nla_nest_cancel(skb, vf_ports);
1101	return -EMSGSIZE;
1102}
1103
1104static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1105{
1106	struct nlattr *port_self;
1107	int err;
1108
1109	port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1110	if (!port_self)
1111		return -EMSGSIZE;
1112
1113	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1114	if (err) {
1115		nla_nest_cancel(skb, port_self);
1116		return (err == -EMSGSIZE) ? err : 0;
1117	}
1118
1119	nla_nest_end(skb, port_self);
1120
1121	return 0;
1122}
1123
1124static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1125			  u32 ext_filter_mask)
1126{
1127	int err;
1128
1129	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1130	    !(ext_filter_mask & RTEXT_FILTER_VF))
1131		return 0;
1132
1133	err = rtnl_port_self_fill(skb, dev);
1134	if (err)
1135		return err;
1136
1137	if (dev_num_vf(dev->dev.parent)) {
1138		err = rtnl_vf_ports_fill(skb, dev);
1139		if (err)
1140			return err;
1141	}
1142
1143	return 0;
1144}
1145
1146static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1147{
1148	int err;
1149	struct netdev_phys_item_id ppid;
1150
1151	err = dev_get_phys_port_id(dev, &ppid);
1152	if (err) {
1153		if (err == -EOPNOTSUPP)
1154			return 0;
1155		return err;
1156	}
1157
1158	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1159		return -EMSGSIZE;
1160
1161	return 0;
1162}
1163
1164static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1165{
1166	char name[IFNAMSIZ];
1167	int err;
1168
1169	err = dev_get_phys_port_name(dev, name, sizeof(name));
1170	if (err) {
1171		if (err == -EOPNOTSUPP)
1172			return 0;
1173		return err;
1174	}
1175
1176	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1177		return -EMSGSIZE;
1178
1179	return 0;
1180}
1181
1182static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1183{
1184	struct netdev_phys_item_id ppid = { };
1185	int err;
1186
1187	err = dev_get_port_parent_id(dev, &ppid, false);
1188	if (err) {
1189		if (err == -EOPNOTSUPP)
1190			return 0;
1191		return err;
1192	}
1193
1194	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1195		return -EMSGSIZE;
1196
1197	return 0;
1198}
1199
1200static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1201					      struct net_device *dev)
1202{
1203	struct rtnl_link_stats64 *sp;
1204	struct nlattr *attr;
1205
1206	attr = nla_reserve_64bit(skb, IFLA_STATS64,
1207				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1208	if (!attr)
1209		return -EMSGSIZE;
1210
1211	sp = nla_data(attr);
1212	dev_get_stats(dev, sp);
1213
1214	attr = nla_reserve(skb, IFLA_STATS,
1215			   sizeof(struct rtnl_link_stats));
1216	if (!attr)
1217		return -EMSGSIZE;
1218
1219	copy_rtnl_link_stats(nla_data(attr), sp);
1220
1221	return 0;
1222}
1223
1224static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1225					       struct net_device *dev,
1226					       int vfs_num,
1227					       struct nlattr *vfinfo,
1228					       u32 ext_filter_mask)
1229{
1230	struct ifla_vf_rss_query_en vf_rss_query_en;
1231	struct nlattr *vf, *vfstats, *vfvlanlist;
1232	struct ifla_vf_link_state vf_linkstate;
1233	struct ifla_vf_vlan_info vf_vlan_info;
1234	struct ifla_vf_spoofchk vf_spoofchk;
1235	struct ifla_vf_tx_rate vf_tx_rate;
1236	struct ifla_vf_stats vf_stats;
1237	struct ifla_vf_trust vf_trust;
1238	struct ifla_vf_vlan vf_vlan;
1239	struct ifla_vf_rate vf_rate;
1240	struct ifla_vf_mac vf_mac;
1241	struct ifla_vf_broadcast vf_broadcast;
1242	struct ifla_vf_info ivi;
1243	struct ifla_vf_guid node_guid;
1244	struct ifla_vf_guid port_guid;
1245
1246	memset(&ivi, 0, sizeof(ivi));
1247
1248	/* Not all SR-IOV capable drivers support the
1249	 * spoofcheck and "RSS query enable" query.  Preset to
1250	 * -1 so the user space tool can detect that the driver
1251	 * didn't report anything.
1252	 */
1253	ivi.spoofchk = -1;
1254	ivi.rss_query_en = -1;
1255	ivi.trusted = -1;
1256	/* The default value for VF link state is "auto"
1257	 * IFLA_VF_LINK_STATE_AUTO which equals zero
1258	 */
1259	ivi.linkstate = 0;
1260	/* VLAN Protocol by default is 802.1Q */
1261	ivi.vlan_proto = htons(ETH_P_8021Q);
1262	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1263		return 0;
1264
1265	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1266	memset(&node_guid, 0, sizeof(node_guid));
1267	memset(&port_guid, 0, sizeof(port_guid));
1268
1269	vf_mac.vf =
1270		vf_vlan.vf =
1271		vf_vlan_info.vf =
1272		vf_rate.vf =
1273		vf_tx_rate.vf =
1274		vf_spoofchk.vf =
1275		vf_linkstate.vf =
1276		vf_rss_query_en.vf =
1277		vf_trust.vf =
1278		node_guid.vf =
1279		port_guid.vf = ivi.vf;
1280
1281	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1282	memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1283	vf_vlan.vlan = ivi.vlan;
1284	vf_vlan.qos = ivi.qos;
1285	vf_vlan_info.vlan = ivi.vlan;
1286	vf_vlan_info.qos = ivi.qos;
1287	vf_vlan_info.vlan_proto = ivi.vlan_proto;
1288	vf_tx_rate.rate = ivi.max_tx_rate;
1289	vf_rate.min_tx_rate = ivi.min_tx_rate;
1290	vf_rate.max_tx_rate = ivi.max_tx_rate;
1291	vf_spoofchk.setting = ivi.spoofchk;
1292	vf_linkstate.link_state = ivi.linkstate;
1293	vf_rss_query_en.setting = ivi.rss_query_en;
1294	vf_trust.setting = ivi.trusted;
1295	vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1296	if (!vf)
1297		goto nla_put_vfinfo_failure;
1298	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1299	    nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1300	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1301	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1302		    &vf_rate) ||
1303	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1304		    &vf_tx_rate) ||
1305	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1306		    &vf_spoofchk) ||
1307	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1308		    &vf_linkstate) ||
1309	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1310		    sizeof(vf_rss_query_en),
1311		    &vf_rss_query_en) ||
1312	    nla_put(skb, IFLA_VF_TRUST,
1313		    sizeof(vf_trust), &vf_trust))
1314		goto nla_put_vf_failure;
1315
1316	if (dev->netdev_ops->ndo_get_vf_guid &&
1317	    !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1318					      &port_guid)) {
1319		if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1320			    &node_guid) ||
1321		    nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1322			    &port_guid))
1323			goto nla_put_vf_failure;
1324	}
1325	vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1326	if (!vfvlanlist)
1327		goto nla_put_vf_failure;
1328	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1329		    &vf_vlan_info)) {
1330		nla_nest_cancel(skb, vfvlanlist);
1331		goto nla_put_vf_failure;
1332	}
1333	nla_nest_end(skb, vfvlanlist);
1334	if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1335		memset(&vf_stats, 0, sizeof(vf_stats));
1336		if (dev->netdev_ops->ndo_get_vf_stats)
1337			dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1338							  &vf_stats);
1339		vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1340		if (!vfstats)
1341			goto nla_put_vf_failure;
1342		if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1343				      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1344		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1345				      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1346		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1347				      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1348		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1349				      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1350		    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1351				      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1352		    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1353				      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1354		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1355				      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1356		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1357				      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1358			nla_nest_cancel(skb, vfstats);
1359			goto nla_put_vf_failure;
1360		}
1361		nla_nest_end(skb, vfstats);
1362	}
1363	nla_nest_end(skb, vf);
1364	return 0;
1365
1366nla_put_vf_failure:
1367	nla_nest_cancel(skb, vf);
1368nla_put_vfinfo_failure:
1369	nla_nest_cancel(skb, vfinfo);
1370	return -EMSGSIZE;
1371}
1372
1373static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1374					   struct net_device *dev,
1375					   u32 ext_filter_mask)
1376{
1377	struct nlattr *vfinfo;
1378	int i, num_vfs;
1379
1380	if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1381		return 0;
1382
1383	num_vfs = dev_num_vf(dev->dev.parent);
1384	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1385		return -EMSGSIZE;
1386
1387	if (!dev->netdev_ops->ndo_get_vf_config)
1388		return 0;
1389
1390	vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1391	if (!vfinfo)
1392		return -EMSGSIZE;
1393
1394	for (i = 0; i < num_vfs; i++) {
1395		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
1396			return -EMSGSIZE;
1397	}
1398
1399	nla_nest_end(skb, vfinfo);
1400	return 0;
1401}
1402
1403static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1404{
1405	struct rtnl_link_ifmap map;
1406
1407	memset(&map, 0, sizeof(map));
1408	map.mem_start   = dev->mem_start;
1409	map.mem_end     = dev->mem_end;
1410	map.base_addr   = dev->base_addr;
1411	map.irq         = dev->irq;
1412	map.dma         = dev->dma;
1413	map.port        = dev->if_port;
1414
1415	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1416		return -EMSGSIZE;
1417
1418	return 0;
1419}
1420
1421static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1422{
1423	const struct bpf_prog *generic_xdp_prog;
1424
1425	ASSERT_RTNL();
1426
1427	generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1428	if (!generic_xdp_prog)
1429		return 0;
1430	return generic_xdp_prog->aux->id;
1431}
1432
1433static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1434{
1435	return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1436}
1437
1438static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1439{
1440	return dev_xdp_prog_id(dev, XDP_MODE_HW);
1441}
1442
1443static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1444			       u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1445			       u32 (*get_prog_id)(struct net_device *dev))
1446{
1447	u32 curr_id;
1448	int err;
1449
1450	curr_id = get_prog_id(dev);
1451	if (!curr_id)
1452		return 0;
1453
1454	*prog_id = curr_id;
1455	err = nla_put_u32(skb, attr, curr_id);
1456	if (err)
1457		return err;
1458
1459	if (*mode != XDP_ATTACHED_NONE)
1460		*mode = XDP_ATTACHED_MULTI;
1461	else
1462		*mode = tgt_mode;
1463
1464	return 0;
1465}
1466
1467static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1468{
1469	struct nlattr *xdp;
1470	u32 prog_id;
1471	int err;
1472	u8 mode;
1473
1474	xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1475	if (!xdp)
1476		return -EMSGSIZE;
1477
1478	prog_id = 0;
1479	mode = XDP_ATTACHED_NONE;
1480	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1481				  IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1482	if (err)
1483		goto err_cancel;
1484	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1485				  IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1486	if (err)
1487		goto err_cancel;
1488	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1489				  IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1490	if (err)
1491		goto err_cancel;
1492
1493	err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1494	if (err)
1495		goto err_cancel;
1496
1497	if (prog_id && mode != XDP_ATTACHED_MULTI) {
1498		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1499		if (err)
1500			goto err_cancel;
1501	}
1502
1503	nla_nest_end(skb, xdp);
1504	return 0;
1505
1506err_cancel:
1507	nla_nest_cancel(skb, xdp);
1508	return err;
1509}
1510
1511static u32 rtnl_get_event(unsigned long event)
1512{
1513	u32 rtnl_event_type = IFLA_EVENT_NONE;
1514
1515	switch (event) {
1516	case NETDEV_REBOOT:
1517		rtnl_event_type = IFLA_EVENT_REBOOT;
1518		break;
1519	case NETDEV_FEAT_CHANGE:
1520		rtnl_event_type = IFLA_EVENT_FEATURES;
1521		break;
1522	case NETDEV_BONDING_FAILOVER:
1523		rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1524		break;
1525	case NETDEV_NOTIFY_PEERS:
1526		rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1527		break;
1528	case NETDEV_RESEND_IGMP:
1529		rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1530		break;
1531	case NETDEV_CHANGEINFODATA:
1532		rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1533		break;
1534	default:
1535		break;
1536	}
1537
1538	return rtnl_event_type;
1539}
1540
1541static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1542{
1543	const struct net_device *upper_dev;
1544	int ret = 0;
1545
1546	rcu_read_lock();
1547
1548	upper_dev = netdev_master_upper_dev_get_rcu(dev);
1549	if (upper_dev)
1550		ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1551
1552	rcu_read_unlock();
1553	return ret;
1554}
1555
1556static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1557			  bool force)
1558{
1559	int ifindex = dev_get_iflink(dev);
1560
1561	if (force || dev->ifindex != ifindex)
1562		return nla_put_u32(skb, IFLA_LINK, ifindex);
1563
1564	return 0;
1565}
1566
1567static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1568					      struct net_device *dev)
1569{
1570	char buf[IFALIASZ];
1571	int ret;
1572
1573	ret = dev_get_alias(dev, buf, sizeof(buf));
1574	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1575}
1576
1577static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1578				  const struct net_device *dev,
1579				  struct net *src_net, gfp_t gfp)
1580{
1581	bool put_iflink = false;
1582
1583	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1584		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1585
1586		if (!net_eq(dev_net(dev), link_net)) {
1587			int id = peernet2id_alloc(src_net, link_net, gfp);
1588
1589			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1590				return -EMSGSIZE;
1591
1592			put_iflink = true;
1593		}
1594	}
1595
1596	return nla_put_iflink(skb, dev, put_iflink);
1597}
1598
1599static int rtnl_fill_link_af(struct sk_buff *skb,
1600			     const struct net_device *dev,
1601			     u32 ext_filter_mask)
1602{
1603	const struct rtnl_af_ops *af_ops;
1604	struct nlattr *af_spec;
1605
1606	af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1607	if (!af_spec)
1608		return -EMSGSIZE;
1609
1610	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1611		struct nlattr *af;
1612		int err;
1613
1614		if (!af_ops->fill_link_af)
1615			continue;
1616
1617		af = nla_nest_start_noflag(skb, af_ops->family);
1618		if (!af)
1619			return -EMSGSIZE;
1620
1621		err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1622		/*
1623		 * Caller may return ENODATA to indicate that there
1624		 * was no data to be dumped. This is not an error, it
1625		 * means we should trim the attribute header and
1626		 * continue.
1627		 */
1628		if (err == -ENODATA)
1629			nla_nest_cancel(skb, af);
1630		else if (err < 0)
1631			return -EMSGSIZE;
1632
1633		nla_nest_end(skb, af);
1634	}
1635
1636	nla_nest_end(skb, af_spec);
1637	return 0;
1638}
1639
1640static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1641				 const struct net_device *dev)
1642{
1643	struct netdev_name_node *name_node;
1644	int count = 0;
1645
1646	list_for_each_entry(name_node, &dev->name_node->list, list) {
1647		if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1648			return -EMSGSIZE;
1649		count++;
1650	}
1651	return count;
1652}
1653
1654static int rtnl_fill_prop_list(struct sk_buff *skb,
1655			       const struct net_device *dev)
1656{
1657	struct nlattr *prop_list;
1658	int ret;
1659
1660	prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1661	if (!prop_list)
1662		return -EMSGSIZE;
1663
1664	ret = rtnl_fill_alt_ifnames(skb, dev);
1665	if (ret <= 0)
1666		goto nest_cancel;
1667
1668	nla_nest_end(skb, prop_list);
1669	return 0;
1670
1671nest_cancel:
1672	nla_nest_cancel(skb, prop_list);
1673	return ret;
1674}
1675
1676static int rtnl_fill_proto_down(struct sk_buff *skb,
1677				const struct net_device *dev)
1678{
1679	struct nlattr *pr;
1680	u32 preason;
1681
1682	if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1683		goto nla_put_failure;
1684
1685	preason = dev->proto_down_reason;
1686	if (!preason)
1687		return 0;
1688
1689	pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1690	if (!pr)
1691		return -EMSGSIZE;
1692
1693	if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1694		nla_nest_cancel(skb, pr);
1695		goto nla_put_failure;
1696	}
1697
1698	nla_nest_end(skb, pr);
1699	return 0;
1700
1701nla_put_failure:
1702	return -EMSGSIZE;
1703}
1704
1705static int rtnl_fill_ifinfo(struct sk_buff *skb,
1706			    struct net_device *dev, struct net *src_net,
1707			    int type, u32 pid, u32 seq, u32 change,
1708			    unsigned int flags, u32 ext_filter_mask,
1709			    u32 event, int *new_nsid, int new_ifindex,
1710			    int tgt_netnsid, gfp_t gfp)
1711{
1712	struct ifinfomsg *ifm;
1713	struct nlmsghdr *nlh;
1714	struct Qdisc *qdisc;
1715
1716	ASSERT_RTNL();
1717	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1718	if (nlh == NULL)
1719		return -EMSGSIZE;
1720
1721	ifm = nlmsg_data(nlh);
1722	ifm->ifi_family = AF_UNSPEC;
1723	ifm->__ifi_pad = 0;
1724	ifm->ifi_type = dev->type;
1725	ifm->ifi_index = dev->ifindex;
1726	ifm->ifi_flags = dev_get_flags(dev);
1727	ifm->ifi_change = change;
1728
1729	if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1730		goto nla_put_failure;
1731
1732	qdisc = rtnl_dereference(dev->qdisc);
1733	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1734	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1735	    nla_put_u8(skb, IFLA_OPERSTATE,
1736		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1737	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1738	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1739	    nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1740	    nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1741	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1742	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1743	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1744	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1745	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1746#ifdef CONFIG_RPS
1747	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1748#endif
1749	    put_master_ifindex(skb, dev) ||
1750	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1751	    (qdisc &&
1752	     nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1753	    nla_put_ifalias(skb, dev) ||
1754	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1755			atomic_read(&dev->carrier_up_count) +
1756			atomic_read(&dev->carrier_down_count)) ||
1757	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1758			atomic_read(&dev->carrier_up_count)) ||
1759	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1760			atomic_read(&dev->carrier_down_count)))
1761		goto nla_put_failure;
1762
1763	if (rtnl_fill_proto_down(skb, dev))
1764		goto nla_put_failure;
1765
1766	if (event != IFLA_EVENT_NONE) {
1767		if (nla_put_u32(skb, IFLA_EVENT, event))
1768			goto nla_put_failure;
1769	}
1770
1771	if (rtnl_fill_link_ifmap(skb, dev))
1772		goto nla_put_failure;
1773
1774	if (dev->addr_len) {
1775		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1776		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1777			goto nla_put_failure;
1778	}
1779
1780	if (rtnl_phys_port_id_fill(skb, dev))
1781		goto nla_put_failure;
1782
1783	if (rtnl_phys_port_name_fill(skb, dev))
1784		goto nla_put_failure;
1785
1786	if (rtnl_phys_switch_id_fill(skb, dev))
1787		goto nla_put_failure;
1788
1789	if (rtnl_fill_stats(skb, dev))
1790		goto nla_put_failure;
1791
1792	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1793		goto nla_put_failure;
1794
1795	if (rtnl_port_fill(skb, dev, ext_filter_mask))
1796		goto nla_put_failure;
1797
1798	if (rtnl_xdp_fill(skb, dev))
1799		goto nla_put_failure;
1800
1801	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1802		if (rtnl_link_fill(skb, dev) < 0)
1803			goto nla_put_failure;
1804	}
1805
1806	if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1807		goto nla_put_failure;
1808
1809	if (new_nsid &&
1810	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1811		goto nla_put_failure;
1812	if (new_ifindex &&
1813	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1814		goto nla_put_failure;
1815
1816	if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1817	    nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1818		goto nla_put_failure;
1819
1820	rcu_read_lock();
1821	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1822		goto nla_put_failure_rcu;
1823	rcu_read_unlock();
1824
1825	if (rtnl_fill_prop_list(skb, dev))
1826		goto nla_put_failure;
1827
1828	nlmsg_end(skb, nlh);
1829	return 0;
1830
1831nla_put_failure_rcu:
1832	rcu_read_unlock();
1833nla_put_failure:
1834	nlmsg_cancel(skb, nlh);
1835	return -EMSGSIZE;
1836}
1837
1838static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1839	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1840	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1841	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1842	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1843	[IFLA_MTU]		= { .type = NLA_U32 },
1844	[IFLA_LINK]		= { .type = NLA_U32 },
1845	[IFLA_MASTER]		= { .type = NLA_U32 },
1846	[IFLA_CARRIER]		= { .type = NLA_U8 },
1847	[IFLA_TXQLEN]		= { .type = NLA_U32 },
1848	[IFLA_WEIGHT]		= { .type = NLA_U32 },
1849	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
1850	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1851	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1852	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
1853	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
1854	/* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1855	 * allow 0-length string (needed to remove an alias).
1856	 */
1857	[IFLA_IFALIAS]	        = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1858	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
1859	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
1860	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
1861	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
1862	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
1863	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
1864	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
1865	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
1866	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
1867	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
1868	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1869	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
1870	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1871	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
1872	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
1873	[IFLA_XDP]		= { .type = NLA_NESTED },
1874	[IFLA_EVENT]		= { .type = NLA_U32 },
1875	[IFLA_GROUP]		= { .type = NLA_U32 },
1876	[IFLA_TARGET_NETNSID]	= { .type = NLA_S32 },
1877	[IFLA_CARRIER_UP_COUNT]	= { .type = NLA_U32 },
1878	[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1879	[IFLA_MIN_MTU]		= { .type = NLA_U32 },
1880	[IFLA_MAX_MTU]		= { .type = NLA_U32 },
1881	[IFLA_PROP_LIST]	= { .type = NLA_NESTED },
1882	[IFLA_ALT_IFNAME]	= { .type = NLA_STRING,
1883				    .len = ALTIFNAMSIZ - 1 },
1884	[IFLA_PERM_ADDRESS]	= { .type = NLA_REJECT },
1885	[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1886};
1887
1888static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1889	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
1890	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
1891	[IFLA_INFO_SLAVE_KIND]	= { .type = NLA_STRING },
1892	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
1893};
1894
1895static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1896	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
1897	[IFLA_VF_BROADCAST]	= { .type = NLA_REJECT },
1898	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
1899	[IFLA_VF_VLAN_LIST]     = { .type = NLA_NESTED },
1900	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
1901	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
1902	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
1903	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
1904	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
1905	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
1906	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
1907	[IFLA_VF_IB_NODE_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
1908	[IFLA_VF_IB_PORT_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
1909};
1910
1911static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1912	[IFLA_PORT_VF]		= { .type = NLA_U32 },
1913	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
1914				    .len = PORT_PROFILE_MAX },
1915	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1916				      .len = PORT_UUID_MAX },
1917	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
1918				    .len = PORT_UUID_MAX },
1919	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
1920	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
1921
1922	/* Unused, but we need to keep it here since user space could
1923	 * fill it. It's also broken with regard to NLA_BINARY use in
1924	 * combination with structs.
1925	 */
1926	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
1927				    .len = sizeof(struct ifla_port_vsi) },
1928};
1929
1930static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1931	[IFLA_XDP_UNSPEC]	= { .strict_start_type = IFLA_XDP_EXPECTED_FD },
1932	[IFLA_XDP_FD]		= { .type = NLA_S32 },
1933	[IFLA_XDP_EXPECTED_FD]	= { .type = NLA_S32 },
1934	[IFLA_XDP_ATTACHED]	= { .type = NLA_U8 },
1935	[IFLA_XDP_FLAGS]	= { .type = NLA_U32 },
1936	[IFLA_XDP_PROG_ID]	= { .type = NLA_U32 },
1937};
1938
1939static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1940{
1941	const struct rtnl_link_ops *ops = NULL;
1942	struct nlattr *linfo[IFLA_INFO_MAX + 1];
1943
1944	if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
1945		return NULL;
1946
1947	if (linfo[IFLA_INFO_KIND]) {
1948		char kind[MODULE_NAME_LEN];
1949
1950		nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1951		ops = rtnl_link_ops_get(kind);
1952	}
1953
1954	return ops;
1955}
1956
1957static bool link_master_filtered(struct net_device *dev, int master_idx)
1958{
1959	struct net_device *master;
1960
1961	if (!master_idx)
1962		return false;
1963
1964	master = netdev_master_upper_dev_get(dev);
1965	if (!master || master->ifindex != master_idx)
1966		return true;
1967
1968	return false;
1969}
1970
1971static bool link_kind_filtered(const struct net_device *dev,
1972			       const struct rtnl_link_ops *kind_ops)
1973{
1974	if (kind_ops && dev->rtnl_link_ops != kind_ops)
1975		return true;
1976
1977	return false;
1978}
1979
1980static bool link_dump_filtered(struct net_device *dev,
1981			       int master_idx,
1982			       const struct rtnl_link_ops *kind_ops)
1983{
1984	if (link_master_filtered(dev, master_idx) ||
1985	    link_kind_filtered(dev, kind_ops))
1986		return true;
1987
1988	return false;
1989}
1990
1991/**
1992 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
1993 * @sk: netlink socket
1994 * @netnsid: network namespace identifier
1995 *
1996 * Returns the network namespace identified by netnsid on success or an error
1997 * pointer on failure.
1998 */
1999struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2000{
2001	struct net *net;
2002
2003	net = get_net_ns_by_id(sock_net(sk), netnsid);
2004	if (!net)
2005		return ERR_PTR(-EINVAL);
2006
2007	/* For now, the caller is required to have CAP_NET_ADMIN in
2008	 * the user namespace owning the target net ns.
2009	 */
2010	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2011		put_net(net);
2012		return ERR_PTR(-EACCES);
2013	}
2014	return net;
2015}
2016EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2017
2018static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2019				      bool strict_check, struct nlattr **tb,
2020				      struct netlink_ext_ack *extack)
2021{
2022	int hdrlen;
2023
2024	if (strict_check) {
2025		struct ifinfomsg *ifm;
2026
2027		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2028			NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2029			return -EINVAL;
2030		}
2031
2032		ifm = nlmsg_data(nlh);
2033		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2034		    ifm->ifi_change) {
2035			NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2036			return -EINVAL;
2037		}
2038		if (ifm->ifi_index) {
2039			NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2040			return -EINVAL;
2041		}
2042
2043		return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2044						     IFLA_MAX, ifla_policy,
2045						     extack);
2046	}
2047
2048	/* A hack to preserve kernel<->userspace interface.
2049	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2050	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2051	 * what iproute2 < v3.9.0 used.
2052	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2053	 * attribute, its netlink message is shorter than struct ifinfomsg.
2054	 */
2055	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2056		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2057
2058	return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2059				      extack);
2060}
2061
2062static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2063{
2064	struct netlink_ext_ack *extack = cb->extack;
2065	const struct nlmsghdr *nlh = cb->nlh;
2066	struct net *net = sock_net(skb->sk);
2067	struct net *tgt_net = net;
2068	int h, s_h;
2069	int idx = 0, s_idx;
2070	struct net_device *dev;
2071	struct hlist_head *head;
2072	struct nlattr *tb[IFLA_MAX+1];
2073	u32 ext_filter_mask = 0;
2074	const struct rtnl_link_ops *kind_ops = NULL;
2075	unsigned int flags = NLM_F_MULTI;
2076	int master_idx = 0;
2077	int netnsid = -1;
2078	int err, i;
2079
2080	s_h = cb->args[0];
2081	s_idx = cb->args[1];
2082
2083	err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2084	if (err < 0) {
2085		if (cb->strict_check)
2086			return err;
2087
2088		goto walk_entries;
2089	}
2090
2091	for (i = 0; i <= IFLA_MAX; ++i) {
2092		if (!tb[i])
2093			continue;
2094
2095		/* new attributes should only be added with strict checking */
2096		switch (i) {
2097		case IFLA_TARGET_NETNSID:
2098			netnsid = nla_get_s32(tb[i]);
2099			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2100			if (IS_ERR(tgt_net)) {
2101				NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2102				return PTR_ERR(tgt_net);
2103			}
2104			break;
2105		case IFLA_EXT_MASK:
2106			ext_filter_mask = nla_get_u32(tb[i]);
2107			break;
2108		case IFLA_MASTER:
2109			master_idx = nla_get_u32(tb[i]);
2110			break;
2111		case IFLA_LINKINFO:
2112			kind_ops = linkinfo_to_kind_ops(tb[i]);
2113			break;
2114		default:
2115			if (cb->strict_check) {
2116				NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2117				return -EINVAL;
2118			}
2119		}
2120	}
2121
2122	if (master_idx || kind_ops)
2123		flags |= NLM_F_DUMP_FILTERED;
2124
2125walk_entries:
2126	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2127		idx = 0;
2128		head = &tgt_net->dev_index_head[h];
2129		hlist_for_each_entry(dev, head, index_hlist) {
2130			if (link_dump_filtered(dev, master_idx, kind_ops))
2131				goto cont;
2132			if (idx < s_idx)
2133				goto cont;
2134			err = rtnl_fill_ifinfo(skb, dev, net,
2135					       RTM_NEWLINK,
2136					       NETLINK_CB(cb->skb).portid,
2137					       nlh->nlmsg_seq, 0, flags,
2138					       ext_filter_mask, 0, NULL, 0,
2139					       netnsid, GFP_KERNEL);
2140
2141			if (err < 0) {
2142				if (likely(skb->len))
2143					goto out;
2144
2145				goto out_err;
2146			}
2147cont:
2148			idx++;
2149		}
2150	}
2151out:
2152	err = skb->len;
2153out_err:
2154	cb->args[1] = idx;
2155	cb->args[0] = h;
2156	cb->seq = net->dev_base_seq;
2157	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2158	if (netnsid >= 0)
2159		put_net(tgt_net);
2160
2161	return err;
2162}
2163
2164int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2165			     struct netlink_ext_ack *exterr)
2166{
2167	const struct ifinfomsg *ifmp;
2168	const struct nlattr *attrs;
2169	size_t len;
2170
2171	ifmp = nla_data(nla_peer);
2172	attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2173	len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2174
2175	if (ifmp->ifi_index < 0) {
2176		NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2177				    "ifindex can't be negative");
2178		return -EINVAL;
2179	}
2180
2181	return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2182				    exterr);
2183}
2184EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2185
2186struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2187{
2188	struct net *net;
2189	/* Examine the link attributes and figure out which
2190	 * network namespace we are talking about.
2191	 */
2192	if (tb[IFLA_NET_NS_PID])
2193		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2194	else if (tb[IFLA_NET_NS_FD])
2195		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2196	else
2197		net = get_net(src_net);
2198	return net;
2199}
2200EXPORT_SYMBOL(rtnl_link_get_net);
2201
2202/* Figure out which network namespace we are talking about by
2203 * examining the link attributes in the following order:
2204 *
2205 * 1. IFLA_NET_NS_PID
2206 * 2. IFLA_NET_NS_FD
2207 * 3. IFLA_TARGET_NETNSID
2208 */
2209static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2210					       struct nlattr *tb[])
2211{
2212	struct net *net;
2213
2214	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2215		return rtnl_link_get_net(src_net, tb);
2216
2217	if (!tb[IFLA_TARGET_NETNSID])
2218		return get_net(src_net);
2219
2220	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2221	if (!net)
2222		return ERR_PTR(-EINVAL);
2223
2224	return net;
2225}
2226
2227static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2228					     struct net *src_net,
2229					     struct nlattr *tb[], int cap)
2230{
2231	struct net *net;
2232
2233	net = rtnl_link_get_net_by_nlattr(src_net, tb);
2234	if (IS_ERR(net))
2235		return net;
2236
2237	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2238		put_net(net);
2239		return ERR_PTR(-EPERM);
2240	}
2241
2242	return net;
2243}
2244
2245/* Verify that rtnetlink requests do not pass additional properties
2246 * potentially referring to different network namespaces.
2247 */
2248static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2249				    struct netlink_ext_ack *extack,
2250				    bool netns_id_only)
2251{
2252
2253	if (netns_id_only) {
2254		if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2255			return 0;
2256
2257		NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2258		return -EOPNOTSUPP;
2259	}
2260
2261	if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2262		goto invalid_attr;
2263
2264	if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2265		goto invalid_attr;
2266
2267	if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2268		goto invalid_attr;
2269
2270	return 0;
2271
2272invalid_attr:
2273	NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2274	return -EINVAL;
2275}
2276
2277static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2278{
2279	if (dev) {
2280		if (tb[IFLA_ADDRESS] &&
2281		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2282			return -EINVAL;
2283
2284		if (tb[IFLA_BROADCAST] &&
2285		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2286			return -EINVAL;
2287	}
2288
2289	if (tb[IFLA_AF_SPEC]) {
2290		struct nlattr *af;
2291		int rem, err;
2292
2293		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2294			const struct rtnl_af_ops *af_ops;
2295
2296			rcu_read_lock();
2297			af_ops = rtnl_af_lookup(nla_type(af));
2298			if (!af_ops) {
2299				rcu_read_unlock();
2300				return -EAFNOSUPPORT;
2301			}
2302
2303			if (!af_ops->set_link_af) {
2304				rcu_read_unlock();
2305				return -EOPNOTSUPP;
2306			}
2307
2308			if (af_ops->validate_link_af) {
2309				err = af_ops->validate_link_af(dev, af);
2310				if (err < 0) {
2311					rcu_read_unlock();
2312					return err;
2313				}
2314			}
2315
2316			rcu_read_unlock();
2317		}
2318	}
2319
2320	return 0;
2321}
2322
2323static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2324				  int guid_type)
2325{
2326	const struct net_device_ops *ops = dev->netdev_ops;
2327
2328	return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2329}
2330
2331static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2332{
2333	if (dev->type != ARPHRD_INFINIBAND)
2334		return -EOPNOTSUPP;
2335
2336	return handle_infiniband_guid(dev, ivt, guid_type);
2337}
2338
2339static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2340{
2341	const struct net_device_ops *ops = dev->netdev_ops;
2342	int err = -EINVAL;
2343
2344	if (tb[IFLA_VF_MAC]) {
2345		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2346
2347		if (ivm->vf >= INT_MAX)
2348			return -EINVAL;
2349		err = -EOPNOTSUPP;
2350		if (ops->ndo_set_vf_mac)
2351			err = ops->ndo_set_vf_mac(dev, ivm->vf,
2352						  ivm->mac);
2353		if (err < 0)
2354			return err;
2355	}
2356
2357	if (tb[IFLA_VF_VLAN]) {
2358		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2359
2360		if (ivv->vf >= INT_MAX)
2361			return -EINVAL;
2362		err = -EOPNOTSUPP;
2363		if (ops->ndo_set_vf_vlan)
2364			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2365						   ivv->qos,
2366						   htons(ETH_P_8021Q));
2367		if (err < 0)
2368			return err;
2369	}
2370
2371	if (tb[IFLA_VF_VLAN_LIST]) {
2372		struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2373		struct nlattr *attr;
2374		int rem, len = 0;
2375
2376		err = -EOPNOTSUPP;
2377		if (!ops->ndo_set_vf_vlan)
2378			return err;
2379
2380		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2381			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2382			    nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
2383				return -EINVAL;
2384			}
2385			if (len >= MAX_VLAN_LIST_LEN)
2386				return -EOPNOTSUPP;
2387			ivvl[len] = nla_data(attr);
2388
2389			len++;
2390		}
2391		if (len == 0)
2392			return -EINVAL;
2393
2394		if (ivvl[0]->vf >= INT_MAX)
2395			return -EINVAL;
2396		err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2397					   ivvl[0]->qos, ivvl[0]->vlan_proto);
2398		if (err < 0)
2399			return err;
2400	}
2401
2402	if (tb[IFLA_VF_TX_RATE]) {
2403		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2404		struct ifla_vf_info ivf;
2405
2406		if (ivt->vf >= INT_MAX)
2407			return -EINVAL;
2408		err = -EOPNOTSUPP;
2409		if (ops->ndo_get_vf_config)
2410			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2411		if (err < 0)
2412			return err;
2413
2414		err = -EOPNOTSUPP;
2415		if (ops->ndo_set_vf_rate)
2416			err = ops->ndo_set_vf_rate(dev, ivt->vf,
2417						   ivf.min_tx_rate,
2418						   ivt->rate);
2419		if (err < 0)
2420			return err;
2421	}
2422
2423	if (tb[IFLA_VF_RATE]) {
2424		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2425
2426		if (ivt->vf >= INT_MAX)
2427			return -EINVAL;
2428		err = -EOPNOTSUPP;
2429		if (ops->ndo_set_vf_rate)
2430			err = ops->ndo_set_vf_rate(dev, ivt->vf,
2431						   ivt->min_tx_rate,
2432						   ivt->max_tx_rate);
2433		if (err < 0)
2434			return err;
2435	}
2436
2437	if (tb[IFLA_VF_SPOOFCHK]) {
2438		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2439
2440		if (ivs->vf >= INT_MAX)
2441			return -EINVAL;
2442		err = -EOPNOTSUPP;
2443		if (ops->ndo_set_vf_spoofchk)
2444			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2445						       ivs->setting);
2446		if (err < 0)
2447			return err;
2448	}
2449
2450	if (tb[IFLA_VF_LINK_STATE]) {
2451		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2452
2453		if (ivl->vf >= INT_MAX)
2454			return -EINVAL;
2455		err = -EOPNOTSUPP;
2456		if (ops->ndo_set_vf_link_state)
2457			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2458							 ivl->link_state);
2459		if (err < 0)
2460			return err;
2461	}
2462
2463	if (tb[IFLA_VF_RSS_QUERY_EN]) {
2464		struct ifla_vf_rss_query_en *ivrssq_en;
2465
2466		err = -EOPNOTSUPP;
2467		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2468		if (ivrssq_en->vf >= INT_MAX)
2469			return -EINVAL;
2470		if (ops->ndo_set_vf_rss_query_en)
2471			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2472							   ivrssq_en->setting);
2473		if (err < 0)
2474			return err;
2475	}
2476
2477	if (tb[IFLA_VF_TRUST]) {
2478		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2479
2480		if (ivt->vf >= INT_MAX)
2481			return -EINVAL;
2482		err = -EOPNOTSUPP;
2483		if (ops->ndo_set_vf_trust)
2484			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2485		if (err < 0)
2486			return err;
2487	}
2488
2489	if (tb[IFLA_VF_IB_NODE_GUID]) {
2490		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2491
2492		if (ivt->vf >= INT_MAX)
2493			return -EINVAL;
2494		if (!ops->ndo_set_vf_guid)
2495			return -EOPNOTSUPP;
2496		return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2497	}
2498
2499	if (tb[IFLA_VF_IB_PORT_GUID]) {
2500		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2501
2502		if (ivt->vf >= INT_MAX)
2503			return -EINVAL;
2504		if (!ops->ndo_set_vf_guid)
2505			return -EOPNOTSUPP;
2506
2507		return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2508	}
2509
2510	return err;
2511}
2512
2513static int do_set_master(struct net_device *dev, int ifindex,
2514			 struct netlink_ext_ack *extack)
2515{
2516	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2517	const struct net_device_ops *ops;
2518	int err;
2519
2520	if (upper_dev) {
2521		if (upper_dev->ifindex == ifindex)
2522			return 0;
2523		ops = upper_dev->netdev_ops;
2524		if (ops->ndo_del_slave) {
2525			err = ops->ndo_del_slave(upper_dev, dev);
2526			if (err)
2527				return err;
2528		} else {
2529			return -EOPNOTSUPP;
2530		}
2531	}
2532
2533	if (ifindex) {
2534		upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2535		if (!upper_dev)
2536			return -EINVAL;
2537		ops = upper_dev->netdev_ops;
2538		if (ops->ndo_add_slave) {
2539			err = ops->ndo_add_slave(upper_dev, dev, extack);
2540			if (err)
2541				return err;
2542		} else {
2543			return -EOPNOTSUPP;
2544		}
2545	}
2546	return 0;
2547}
2548
2549static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2550	[IFLA_PROTO_DOWN_REASON_MASK]	= { .type = NLA_U32 },
2551	[IFLA_PROTO_DOWN_REASON_VALUE]	= { .type = NLA_U32 },
2552};
2553
2554static int do_set_proto_down(struct net_device *dev,
2555			     struct nlattr *nl_proto_down,
2556			     struct nlattr *nl_proto_down_reason,
2557			     struct netlink_ext_ack *extack)
2558{
2559	struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2560	const struct net_device_ops *ops = dev->netdev_ops;
2561	unsigned long mask = 0;
2562	u32 value;
2563	bool proto_down;
2564	int err;
2565
2566	if (!ops->ndo_change_proto_down) {
2567		NL_SET_ERR_MSG(extack,  "Protodown not supported by device");
2568		return -EOPNOTSUPP;
2569	}
2570
2571	if (nl_proto_down_reason) {
2572		err = nla_parse_nested_deprecated(pdreason,
2573						  IFLA_PROTO_DOWN_REASON_MAX,
2574						  nl_proto_down_reason,
2575						  ifla_proto_down_reason_policy,
2576						  NULL);
2577		if (err < 0)
2578			return err;
2579
2580		if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2581			NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2582			return -EINVAL;
2583		}
2584
2585		value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2586
2587		if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2588			mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2589
2590		dev_change_proto_down_reason(dev, mask, value);
2591	}
2592
2593	if (nl_proto_down) {
2594		proto_down = nla_get_u8(nl_proto_down);
2595
2596		/* Dont turn off protodown if there are active reasons */
2597		if (!proto_down && dev->proto_down_reason) {
2598			NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2599			return -EBUSY;
2600		}
2601		err = dev_change_proto_down(dev,
2602					    proto_down);
2603		if (err)
2604			return err;
2605	}
2606
2607	return 0;
2608}
2609
2610#define DO_SETLINK_MODIFIED	0x01
2611/* notify flag means notify + modified. */
2612#define DO_SETLINK_NOTIFY	0x03
2613static int do_setlink(const struct sk_buff *skb,
2614		      struct net_device *dev, struct ifinfomsg *ifm,
2615		      struct netlink_ext_ack *extack,
2616		      struct nlattr **tb, char *ifname, int status)
2617{
2618	const struct net_device_ops *ops = dev->netdev_ops;
2619	int err;
2620
2621	err = validate_linkmsg(dev, tb);
2622	if (err < 0)
2623		return err;
2624
2625	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2626		const char *pat = ifname && ifname[0] ? ifname : NULL;
2627		struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2628							    tb, CAP_NET_ADMIN);
2629		if (IS_ERR(net)) {
2630			err = PTR_ERR(net);
2631			goto errout;
2632		}
2633
2634		err = dev_change_net_namespace(dev, net, pat);
2635		put_net(net);
2636		if (err)
2637			goto errout;
2638		status |= DO_SETLINK_MODIFIED;
2639	}
2640
2641	if (tb[IFLA_MAP]) {
2642		struct rtnl_link_ifmap *u_map;
2643		struct ifmap k_map;
2644
2645		if (!ops->ndo_set_config) {
2646			err = -EOPNOTSUPP;
2647			goto errout;
2648		}
2649
2650		if (!netif_device_present(dev)) {
2651			err = -ENODEV;
2652			goto errout;
2653		}
2654
2655		u_map = nla_data(tb[IFLA_MAP]);
2656		k_map.mem_start = (unsigned long) u_map->mem_start;
2657		k_map.mem_end = (unsigned long) u_map->mem_end;
2658		k_map.base_addr = (unsigned short) u_map->base_addr;
2659		k_map.irq = (unsigned char) u_map->irq;
2660		k_map.dma = (unsigned char) u_map->dma;
2661		k_map.port = (unsigned char) u_map->port;
2662
2663		err = ops->ndo_set_config(dev, &k_map);
2664		if (err < 0)
2665			goto errout;
2666
2667		status |= DO_SETLINK_NOTIFY;
2668	}
2669
2670	if (tb[IFLA_ADDRESS]) {
2671		struct sockaddr *sa;
2672		int len;
2673
2674		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2675						  sizeof(*sa));
2676		sa = kmalloc(len, GFP_KERNEL);
2677		if (!sa) {
2678			err = -ENOMEM;
2679			goto errout;
2680		}
2681		sa->sa_family = dev->type;
2682		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2683		       dev->addr_len);
2684		err = dev_set_mac_address_user(dev, sa, extack);
2685		kfree(sa);
2686		if (err)
2687			goto errout;
2688		status |= DO_SETLINK_MODIFIED;
2689	}
2690
2691	if (tb[IFLA_MTU]) {
2692		err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2693		if (err < 0)
2694			goto errout;
2695		status |= DO_SETLINK_MODIFIED;
2696	}
2697
2698	if (tb[IFLA_GROUP]) {
2699		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2700		status |= DO_SETLINK_NOTIFY;
2701	}
2702
2703	/*
2704	 * Interface selected by interface index but interface
2705	 * name provided implies that a name change has been
2706	 * requested.
2707	 */
2708	if (ifm->ifi_index > 0 && ifname[0]) {
2709		err = dev_change_name(dev, ifname);
2710		if (err < 0)
2711			goto errout;
2712		status |= DO_SETLINK_MODIFIED;
2713	}
2714
2715	if (tb[IFLA_IFALIAS]) {
2716		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2717				    nla_len(tb[IFLA_IFALIAS]));
2718		if (err < 0)
2719			goto errout;
2720		status |= DO_SETLINK_NOTIFY;
2721	}
2722
2723	if (tb[IFLA_BROADCAST]) {
2724		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2725		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2726	}
2727
2728	if (ifm->ifi_flags || ifm->ifi_change) {
2729		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2730				       extack);
2731		if (err < 0)
2732			goto errout;
2733	}
2734
2735	if (tb[IFLA_MASTER]) {
2736		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2737		if (err)
2738			goto errout;
2739		status |= DO_SETLINK_MODIFIED;
2740	}
2741
2742	if (tb[IFLA_CARRIER]) {
2743		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2744		if (err)
2745			goto errout;
2746		status |= DO_SETLINK_MODIFIED;
2747	}
2748
2749	if (tb[IFLA_TXQLEN]) {
2750		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2751
2752		err = dev_change_tx_queue_len(dev, value);
2753		if (err)
2754			goto errout;
2755		status |= DO_SETLINK_MODIFIED;
2756	}
2757
2758	if (tb[IFLA_GSO_MAX_SIZE]) {
2759		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2760
2761		if (max_size > GSO_MAX_SIZE) {
2762			err = -EINVAL;
2763			goto errout;
2764		}
2765
2766		if (dev->gso_max_size ^ max_size) {
2767			netif_set_gso_max_size(dev, max_size);
2768			status |= DO_SETLINK_MODIFIED;
2769		}
2770	}
2771
2772	if (tb[IFLA_GSO_MAX_SEGS]) {
2773		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2774
2775		if (max_segs > GSO_MAX_SEGS) {
2776			err = -EINVAL;
2777			goto errout;
2778		}
2779
2780		if (dev->gso_max_segs ^ max_segs) {
2781			dev->gso_max_segs = max_segs;
2782			status |= DO_SETLINK_MODIFIED;
2783		}
2784	}
2785
2786	if (tb[IFLA_OPERSTATE])
2787		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2788
2789	if (tb[IFLA_LINKMODE]) {
2790		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2791
2792		write_lock_bh(&dev_base_lock);
2793		if (dev->link_mode ^ value)
2794			status |= DO_SETLINK_NOTIFY;
2795		dev->link_mode = value;
2796		write_unlock_bh(&dev_base_lock);
2797	}
2798
2799	if (tb[IFLA_VFINFO_LIST]) {
2800		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2801		struct nlattr *attr;
2802		int rem;
2803
2804		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2805			if (nla_type(attr) != IFLA_VF_INFO ||
2806			    nla_len(attr) < NLA_HDRLEN) {
2807				err = -EINVAL;
2808				goto errout;
2809			}
2810			err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2811							  attr,
2812							  ifla_vf_policy,
2813							  NULL);
2814			if (err < 0)
2815				goto errout;
2816			err = do_setvfinfo(dev, vfinfo);
2817			if (err < 0)
2818				goto errout;
2819			status |= DO_SETLINK_NOTIFY;
2820		}
2821	}
2822	err = 0;
2823
2824	if (tb[IFLA_VF_PORTS]) {
2825		struct nlattr *port[IFLA_PORT_MAX+1];
2826		struct nlattr *attr;
2827		int vf;
2828		int rem;
2829
2830		err = -EOPNOTSUPP;
2831		if (!ops->ndo_set_vf_port)
2832			goto errout;
2833
2834		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2835			if (nla_type(attr) != IFLA_VF_PORT ||
2836			    nla_len(attr) < NLA_HDRLEN) {
2837				err = -EINVAL;
2838				goto errout;
2839			}
2840			err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2841							  attr,
2842							  ifla_port_policy,
2843							  NULL);
2844			if (err < 0)
2845				goto errout;
2846			if (!port[IFLA_PORT_VF]) {
2847				err = -EOPNOTSUPP;
2848				goto errout;
2849			}
2850			vf = nla_get_u32(port[IFLA_PORT_VF]);
2851			err = ops->ndo_set_vf_port(dev, vf, port);
2852			if (err < 0)
2853				goto errout;
2854			status |= DO_SETLINK_NOTIFY;
2855		}
2856	}
2857	err = 0;
2858
2859	if (tb[IFLA_PORT_SELF]) {
2860		struct nlattr *port[IFLA_PORT_MAX+1];
2861
2862		err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2863						  tb[IFLA_PORT_SELF],
2864						  ifla_port_policy, NULL);
2865		if (err < 0)
2866			goto errout;
2867
2868		err = -EOPNOTSUPP;
2869		if (ops->ndo_set_vf_port)
2870			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2871		if (err < 0)
2872			goto errout;
2873		status |= DO_SETLINK_NOTIFY;
2874	}
2875
2876	if (tb[IFLA_AF_SPEC]) {
2877		struct nlattr *af;
2878		int rem;
2879
2880		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2881			const struct rtnl_af_ops *af_ops;
2882
2883			rcu_read_lock();
2884
2885			BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2886
2887			err = af_ops->set_link_af(dev, af);
2888			if (err < 0) {
2889				rcu_read_unlock();
2890				goto errout;
2891			}
2892
2893			rcu_read_unlock();
2894			status |= DO_SETLINK_NOTIFY;
2895		}
2896	}
2897	err = 0;
2898
2899	if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2900		err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2901					tb[IFLA_PROTO_DOWN_REASON], extack);
2902		if (err)
2903			goto errout;
2904		status |= DO_SETLINK_NOTIFY;
2905	}
2906
2907	if (tb[IFLA_XDP]) {
2908		struct nlattr *xdp[IFLA_XDP_MAX + 1];
2909		u32 xdp_flags = 0;
2910
2911		err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
2912						  tb[IFLA_XDP],
2913						  ifla_xdp_policy, NULL);
2914		if (err < 0)
2915			goto errout;
2916
2917		if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2918			err = -EINVAL;
2919			goto errout;
2920		}
2921
2922		if (xdp[IFLA_XDP_FLAGS]) {
2923			xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2924			if (xdp_flags & ~XDP_FLAGS_MASK) {
2925				err = -EINVAL;
2926				goto errout;
2927			}
2928			if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2929				err = -EINVAL;
2930				goto errout;
2931			}
2932		}
2933
2934		if (xdp[IFLA_XDP_FD]) {
2935			int expected_fd = -1;
2936
2937			if (xdp_flags & XDP_FLAGS_REPLACE) {
2938				if (!xdp[IFLA_XDP_EXPECTED_FD]) {
2939					err = -EINVAL;
2940					goto errout;
2941				}
2942				expected_fd =
2943					nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
2944			}
2945
2946			err = dev_change_xdp_fd(dev, extack,
2947						nla_get_s32(xdp[IFLA_XDP_FD]),
2948						expected_fd,
2949						xdp_flags);
2950			if (err)
2951				goto errout;
2952			status |= DO_SETLINK_NOTIFY;
2953		}
2954	}
2955
2956errout:
2957	if (status & DO_SETLINK_MODIFIED) {
2958		if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2959			netdev_state_change(dev);
2960
2961		if (err < 0)
2962			net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2963					     dev->name);
2964	}
2965
2966	return err;
2967}
2968
2969static struct net_device *rtnl_dev_get(struct net *net,
2970				       struct nlattr *ifname_attr,
2971				       struct nlattr *altifname_attr,
2972				       char *ifname)
2973{
2974	char buffer[ALTIFNAMSIZ];
2975
2976	if (!ifname) {
2977		ifname = buffer;
2978		if (ifname_attr)
2979			nla_strlcpy(ifname, ifname_attr, IFNAMSIZ);
2980		else if (altifname_attr)
2981			nla_strlcpy(ifname, altifname_attr, ALTIFNAMSIZ);
2982		else
2983			return NULL;
2984	}
2985
2986	return __dev_get_by_name(net, ifname);
2987}
2988
2989static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2990			struct netlink_ext_ack *extack)
2991{
2992	struct net *net = sock_net(skb->sk);
2993	struct ifinfomsg *ifm;
2994	struct net_device *dev;
2995	int err;
2996	struct nlattr *tb[IFLA_MAX+1];
2997	char ifname[IFNAMSIZ];
2998
2999	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3000				     ifla_policy, extack);
3001	if (err < 0)
3002		goto errout;
3003
3004	err = rtnl_ensure_unique_netns(tb, extack, false);
3005	if (err < 0)
3006		goto errout;
3007
3008	if (tb[IFLA_IFNAME])
3009		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3010	else
3011		ifname[0] = '\0';
3012
3013	err = -EINVAL;
3014	ifm = nlmsg_data(nlh);
3015	if (ifm->ifi_index > 0)
3016		dev = __dev_get_by_index(net, ifm->ifi_index);
3017	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3018		dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3019	else
3020		goto errout;
3021
3022	if (dev == NULL) {
3023		err = -ENODEV;
3024		goto errout;
3025	}
3026
3027	err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
3028errout:
3029	return err;
3030}
3031
3032static int rtnl_group_dellink(const struct net *net, int group)
3033{
3034	struct net_device *dev, *aux;
3035	LIST_HEAD(list_kill);
3036	bool found = false;
3037
3038	if (!group)
3039		return -EPERM;
3040
3041	for_each_netdev(net, dev) {
3042		if (dev->group == group) {
3043			const struct rtnl_link_ops *ops;
3044
3045			found = true;
3046			ops = dev->rtnl_link_ops;
3047			if (!ops || !ops->dellink)
3048				return -EOPNOTSUPP;
3049		}
3050	}
3051
3052	if (!found)
3053		return -ENODEV;
3054
3055	for_each_netdev_safe(net, dev, aux) {
3056		if (dev->group == group) {
3057			const struct rtnl_link_ops *ops;
3058
3059			ops = dev->rtnl_link_ops;
3060			ops->dellink(dev, &list_kill);
3061		}
3062	}
3063	unregister_netdevice_many(&list_kill);
3064
3065	return 0;
3066}
3067
3068int rtnl_delete_link(struct net_device *dev)
3069{
3070	const struct rtnl_link_ops *ops;
3071	LIST_HEAD(list_kill);
3072
3073	ops = dev->rtnl_link_ops;
3074	if (!ops || !ops->dellink)
3075		return -EOPNOTSUPP;
3076
3077	ops->dellink(dev, &list_kill);
3078	unregister_netdevice_many(&list_kill);
3079
3080	return 0;
3081}
3082EXPORT_SYMBOL_GPL(rtnl_delete_link);
3083
3084static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3085			struct netlink_ext_ack *extack)
3086{
3087	struct net *net = sock_net(skb->sk);
3088	struct net *tgt_net = net;
3089	struct net_device *dev = NULL;
3090	struct ifinfomsg *ifm;
3091	struct nlattr *tb[IFLA_MAX+1];
3092	int err;
3093	int netnsid = -1;
3094
3095	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3096				     ifla_policy, extack);
3097	if (err < 0)
3098		return err;
3099
3100	err = rtnl_ensure_unique_netns(tb, extack, true);
3101	if (err < 0)
3102		return err;
3103
3104	if (tb[IFLA_TARGET_NETNSID]) {
3105		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3106		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3107		if (IS_ERR(tgt_net))
3108			return PTR_ERR(tgt_net);
3109	}
3110
3111	err = -EINVAL;
3112	ifm = nlmsg_data(nlh);
3113	if (ifm->ifi_index > 0)
3114		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3115	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3116		dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3117				   tb[IFLA_ALT_IFNAME], NULL);
3118	else if (tb[IFLA_GROUP])
3119		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3120	else
3121		goto out;
3122
3123	if (!dev) {
3124		if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
3125			err = -ENODEV;
3126
3127		goto out;
3128	}
3129
3130	err = rtnl_delete_link(dev);
3131
3132out:
3133	if (netnsid >= 0)
3134		put_net(tgt_net);
3135
3136	return err;
3137}
3138
3139int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3140{
3141	unsigned int old_flags;
3142	int err;
3143
3144	old_flags = dev->flags;
3145	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3146		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3147					 NULL);
3148		if (err < 0)
3149			return err;
3150	}
3151
3152	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3153		__dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3154	} else {
3155		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3156		__dev_notify_flags(dev, old_flags, ~0U);
3157	}
3158	return 0;
3159}
3160EXPORT_SYMBOL(rtnl_configure_link);
3161
3162struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3163				    unsigned char name_assign_type,
3164				    const struct rtnl_link_ops *ops,
3165				    struct nlattr *tb[],
3166				    struct netlink_ext_ack *extack)
3167{
3168	struct net_device *dev;
3169	unsigned int num_tx_queues = 1;
3170	unsigned int num_rx_queues = 1;
3171
3172	if (tb[IFLA_NUM_TX_QUEUES])
3173		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3174	else if (ops->get_num_tx_queues)
3175		num_tx_queues = ops->get_num_tx_queues();
3176
3177	if (tb[IFLA_NUM_RX_QUEUES])
3178		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3179	else if (ops->get_num_rx_queues)
3180		num_rx_queues = ops->get_num_rx_queues();
3181
3182	if (num_tx_queues < 1 || num_tx_queues > 4096) {
3183		NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3184		return ERR_PTR(-EINVAL);
3185	}
3186
3187	if (num_rx_queues < 1 || num_rx_queues > 4096) {
3188		NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3189		return ERR_PTR(-EINVAL);
3190	}
3191
3192	dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
3193			       ops->setup, num_tx_queues, num_rx_queues);
3194	if (!dev)
3195		return ERR_PTR(-ENOMEM);
3196
3197	dev_net_set(dev, net);
3198	dev->rtnl_link_ops = ops;
3199	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3200
3201	if (tb[IFLA_MTU]) {
3202		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3203		int err;
3204
3205		err = dev_validate_mtu(dev, mtu, extack);
3206		if (err) {
3207			free_netdev(dev);
3208			return ERR_PTR(err);
3209		}
3210		dev->mtu = mtu;
3211	}
3212	if (tb[IFLA_ADDRESS]) {
3213		memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
3214				nla_len(tb[IFLA_ADDRESS]));
3215		dev->addr_assign_type = NET_ADDR_SET;
3216	}
3217	if (tb[IFLA_BROADCAST])
3218		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3219				nla_len(tb[IFLA_BROADCAST]));
3220	if (tb[IFLA_TXQLEN])
3221		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3222	if (tb[IFLA_OPERSTATE])
3223		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3224	if (tb[IFLA_LINKMODE])
3225		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3226	if (tb[IFLA_GROUP])
3227		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3228	if (tb[IFLA_GSO_MAX_SIZE])
3229		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3230	if (tb[IFLA_GSO_MAX_SEGS])
3231		dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
3232
3233	return dev;
3234}
3235EXPORT_SYMBOL(rtnl_create_link);
3236
3237static int rtnl_group_changelink(const struct sk_buff *skb,
3238		struct net *net, int group,
3239		struct ifinfomsg *ifm,
3240		struct netlink_ext_ack *extack,
3241		struct nlattr **tb)
3242{
3243	struct net_device *dev, *aux;
3244	int err;
3245
3246	for_each_netdev_safe(net, dev, aux) {
3247		if (dev->group == group) {
3248			err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
3249			if (err < 0)
3250				return err;
3251		}
3252	}
3253
3254	return 0;
3255}
3256
3257static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3258			  struct nlattr **attr, struct netlink_ext_ack *extack)
3259{
3260	struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3261	unsigned char name_assign_type = NET_NAME_USER;
3262	struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3263	const struct rtnl_link_ops *m_ops;
3264	struct net_device *master_dev;
3265	struct net *net = sock_net(skb->sk);
3266	const struct rtnl_link_ops *ops;
3267	struct nlattr *tb[IFLA_MAX + 1];
3268	struct net *dest_net, *link_net;
3269	struct nlattr **slave_data;
3270	char kind[MODULE_NAME_LEN];
3271	struct net_device *dev;
3272	struct ifinfomsg *ifm;
3273	char ifname[IFNAMSIZ];
3274	struct nlattr **data;
3275	bool link_specified;
3276	int err;
3277
3278#ifdef CONFIG_MODULES
3279replay:
3280#endif
3281	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3282				     ifla_policy, extack);
3283	if (err < 0)
3284		return err;
3285
3286	err = rtnl_ensure_unique_netns(tb, extack, false);
3287	if (err < 0)
3288		return err;
3289
3290	if (tb[IFLA_IFNAME])
3291		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3292	else
3293		ifname[0] = '\0';
3294
3295	ifm = nlmsg_data(nlh);
3296	if (ifm->ifi_index > 0) {
3297		link_specified = true;
3298		dev = __dev_get_by_index(net, ifm->ifi_index);
3299	} else if (ifm->ifi_index < 0) {
3300		NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3301		return -EINVAL;
3302	} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3303		link_specified = true;
3304		dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3305	} else {
3306		link_specified = false;
3307		dev = NULL;
3308	}
3309
3310	master_dev = NULL;
3311	m_ops = NULL;
3312	if (dev) {
3313		master_dev = netdev_master_upper_dev_get(dev);
3314		if (master_dev)
3315			m_ops = master_dev->rtnl_link_ops;
3316	}
3317
3318	err = validate_linkmsg(dev, tb);
3319	if (err < 0)
3320		return err;
3321
3322	if (tb[IFLA_LINKINFO]) {
3323		err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3324						  tb[IFLA_LINKINFO],
3325						  ifla_info_policy, NULL);
3326		if (err < 0)
3327			return err;
3328	} else
3329		memset(linkinfo, 0, sizeof(linkinfo));
3330
3331	if (linkinfo[IFLA_INFO_KIND]) {
3332		nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3333		ops = rtnl_link_ops_get(kind);
3334	} else {
3335		kind[0] = '\0';
3336		ops = NULL;
3337	}
3338
3339	data = NULL;
3340	if (ops) {
3341		if (ops->maxtype > RTNL_MAX_TYPE)
3342			return -EINVAL;
3343
3344		if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3345			err = nla_parse_nested_deprecated(attr, ops->maxtype,
3346							  linkinfo[IFLA_INFO_DATA],
3347							  ops->policy, extack);
3348			if (err < 0)
3349				return err;
3350			data = attr;
3351		}
3352		if (ops->validate) {
3353			err = ops->validate(tb, data, extack);
3354			if (err < 0)
3355				return err;
3356		}
3357	}
3358
3359	slave_data = NULL;
3360	if (m_ops) {
3361		if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3362			return -EINVAL;
3363
3364		if (m_ops->slave_maxtype &&
3365		    linkinfo[IFLA_INFO_SLAVE_DATA]) {
3366			err = nla_parse_nested_deprecated(slave_attr,
3367							  m_ops->slave_maxtype,
3368							  linkinfo[IFLA_INFO_SLAVE_DATA],
3369							  m_ops->slave_policy,
3370							  extack);
3371			if (err < 0)
3372				return err;
3373			slave_data = slave_attr;
3374		}
3375	}
3376
3377	if (dev) {
3378		int status = 0;
3379
3380		if (nlh->nlmsg_flags & NLM_F_EXCL)
3381			return -EEXIST;
3382		if (nlh->nlmsg_flags & NLM_F_REPLACE)
3383			return -EOPNOTSUPP;
3384
3385		if (linkinfo[IFLA_INFO_DATA]) {
3386			if (!ops || ops != dev->rtnl_link_ops ||
3387			    !ops->changelink)
3388				return -EOPNOTSUPP;
3389
3390			err = ops->changelink(dev, tb, data, extack);
3391			if (err < 0)
3392				return err;
3393			status |= DO_SETLINK_NOTIFY;
3394		}
3395
3396		if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3397			if (!m_ops || !m_ops->slave_changelink)
3398				return -EOPNOTSUPP;
3399
3400			err = m_ops->slave_changelink(master_dev, dev, tb,
3401						      slave_data, extack);
3402			if (err < 0)
3403				return err;
3404			status |= DO_SETLINK_NOTIFY;
3405		}
3406
3407		return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
3408	}
3409
3410	if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3411		/* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3412		 * or it's for a group
3413		*/
3414		if (link_specified)
3415			return -ENODEV;
3416		if (tb[IFLA_GROUP])
3417			return rtnl_group_changelink(skb, net,
3418						nla_get_u32(tb[IFLA_GROUP]),
3419						ifm, extack, tb);
3420		return -ENODEV;
3421	}
3422
3423	if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3424		return -EOPNOTSUPP;
3425
3426	if (!ops) {
3427#ifdef CONFIG_MODULES
3428		if (kind[0]) {
3429			__rtnl_unlock();
3430			request_module("rtnl-link-%s", kind);
3431			rtnl_lock();
3432			ops = rtnl_link_ops_get(kind);
3433			if (ops)
3434				goto replay;
3435		}
3436#endif
3437		NL_SET_ERR_MSG(extack, "Unknown device type");
3438		return -EOPNOTSUPP;
3439	}
3440
3441	if (!ops->setup)
3442		return -EOPNOTSUPP;
3443
3444	if (!ifname[0]) {
3445		snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3446		name_assign_type = NET_NAME_ENUM;
3447	}
3448
3449	dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3450	if (IS_ERR(dest_net))
3451		return PTR_ERR(dest_net);
3452
3453	if (tb[IFLA_LINK_NETNSID]) {
3454		int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3455
3456		link_net = get_net_ns_by_id(dest_net, id);
3457		if (!link_net) {
3458			NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3459			err =  -EINVAL;
3460			goto out;
3461		}
3462		err = -EPERM;
3463		if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3464			goto out;
3465	} else {
3466		link_net = NULL;
3467	}
3468
3469	dev = rtnl_create_link(link_net ? : dest_net, ifname,
3470			       name_assign_type, ops, tb, extack);
3471	if (IS_ERR(dev)) {
3472		err = PTR_ERR(dev);
3473		goto out;
3474	}
3475
3476	dev->ifindex = ifm->ifi_index;
3477
3478	if (ops->newlink)
3479		err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3480	else
3481		err = register_netdevice(dev);
3482	if (err < 0) {
3483		free_netdev(dev);
3484		goto out;
3485	}
3486
3487	err = rtnl_configure_link(dev, ifm);
3488	if (err < 0)
3489		goto out_unregister;
3490	if (link_net) {
3491		err = dev_change_net_namespace(dev, dest_net, ifname);
3492		if (err < 0)
3493			goto out_unregister;
3494	}
3495	if (tb[IFLA_MASTER]) {
3496		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3497		if (err)
3498			goto out_unregister;
3499	}
3500out:
3501	if (link_net)
3502		put_net(link_net);
3503	put_net(dest_net);
3504	return err;
3505out_unregister:
3506	if (ops->newlink) {
3507		LIST_HEAD(list_kill);
3508
3509		ops->dellink(dev, &list_kill);
3510		unregister_netdevice_many(&list_kill);
3511	} else {
3512		unregister_netdevice(dev);
3513	}
3514	goto out;
3515}
3516
3517static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3518			struct netlink_ext_ack *extack)
3519{
3520	struct nlattr **attr;
3521	int ret;
3522
3523	attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
3524	if (!attr)
3525		return -ENOMEM;
3526
3527	ret = __rtnl_newlink(skb, nlh, attr, extack);
3528	kfree(attr);
3529	return ret;
3530}
3531
3532static int rtnl_valid_getlink_req(struct sk_buff *skb,
3533				  const struct nlmsghdr *nlh,
3534				  struct nlattr **tb,
3535				  struct netlink_ext_ack *extack)
3536{
3537	struct ifinfomsg *ifm;
3538	int i, err;
3539
3540	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3541		NL_SET_ERR_MSG(extack, "Invalid header for get link");
3542		return -EINVAL;
3543	}
3544
3545	if (!netlink_strict_get_check(skb))
3546		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3547					      ifla_policy, extack);
3548
3549	ifm = nlmsg_data(nlh);
3550	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3551	    ifm->ifi_change) {
3552		NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3553		return -EINVAL;
3554	}
3555
3556	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3557					    ifla_policy, extack);
3558	if (err)
3559		return err;
3560
3561	for (i = 0; i <= IFLA_MAX; i++) {
3562		if (!tb[i])
3563			continue;
3564
3565		switch (i) {
3566		case IFLA_IFNAME:
3567		case IFLA_ALT_IFNAME:
3568		case IFLA_EXT_MASK:
3569		case IFLA_TARGET_NETNSID:
3570			break;
3571		default:
3572			NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3573			return -EINVAL;
3574		}
3575	}
3576
3577	return 0;
3578}
3579
3580static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3581			struct netlink_ext_ack *extack)
3582{
3583	struct net *net = sock_net(skb->sk);
3584	struct net *tgt_net = net;
3585	struct ifinfomsg *ifm;
3586	struct nlattr *tb[IFLA_MAX+1];
3587	struct net_device *dev = NULL;
3588	struct sk_buff *nskb;
3589	int netnsid = -1;
3590	int err;
3591	u32 ext_filter_mask = 0;
3592
3593	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3594	if (err < 0)
3595		return err;
3596
3597	err = rtnl_ensure_unique_netns(tb, extack, true);
3598	if (err < 0)
3599		return err;
3600
3601	if (tb[IFLA_TARGET_NETNSID]) {
3602		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3603		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3604		if (IS_ERR(tgt_net))
3605			return PTR_ERR(tgt_net);
3606	}
3607
3608	if (tb[IFLA_EXT_MASK])
3609		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3610
3611	err = -EINVAL;
3612	ifm = nlmsg_data(nlh);
3613	if (ifm->ifi_index > 0)
3614		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3615	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3616		dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME],
3617				   tb[IFLA_ALT_IFNAME], NULL);
3618	else
3619		goto out;
3620
3621	err = -ENODEV;
3622	if (dev == NULL)
3623		goto out;
3624
3625	err = -ENOBUFS;
3626	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3627	if (nskb == NULL)
3628		goto out;
3629
3630	err = rtnl_fill_ifinfo(nskb, dev, net,
3631			       RTM_NEWLINK, NETLINK_CB(skb).portid,
3632			       nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3633			       0, NULL, 0, netnsid, GFP_KERNEL);
3634	if (err < 0) {
3635		/* -EMSGSIZE implies BUG in if_nlmsg_size */
3636		WARN_ON(err == -EMSGSIZE);
3637		kfree_skb(nskb);
3638	} else
3639		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3640out:
3641	if (netnsid >= 0)
3642		put_net(tgt_net);
3643
3644	return err;
3645}
3646
3647static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3648			   bool *changed, struct netlink_ext_ack *extack)
3649{
3650	char *alt_ifname;
3651	size_t size;
3652	int err;
3653
3654	err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3655	if (err)
3656		return err;
3657
3658	if (cmd == RTM_NEWLINKPROP) {
3659		size = rtnl_prop_list_size(dev);
3660		size += nla_total_size(ALTIFNAMSIZ);
3661		if (size >= U16_MAX) {
3662			NL_SET_ERR_MSG(extack,
3663				       "effective property list too long");
3664			return -EINVAL;
3665		}
3666	}
3667
3668	alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3669	if (!alt_ifname)
3670		return -ENOMEM;
3671
3672	if (cmd == RTM_NEWLINKPROP) {
3673		err = netdev_name_node_alt_create(dev, alt_ifname);
3674		if (!err)
3675			alt_ifname = NULL;
3676	} else if (cmd == RTM_DELLINKPROP) {
3677		err = netdev_name_node_alt_destroy(dev, alt_ifname);
3678	} else {
3679		WARN_ON_ONCE(1);
3680		err = -EINVAL;
3681	}
3682
3683	kfree(alt_ifname);
3684	if (!err)
3685		*changed = true;
3686	return err;
3687}
3688
3689static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3690			 struct netlink_ext_ack *extack)
3691{
3692	struct net *net = sock_net(skb->sk);
3693	struct nlattr *tb[IFLA_MAX + 1];
3694	struct net_device *dev;
3695	struct ifinfomsg *ifm;
3696	bool changed = false;
3697	struct nlattr *attr;
3698	int err, rem;
3699
3700	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3701	if (err)
3702		return err;
3703
3704	err = rtnl_ensure_unique_netns(tb, extack, true);
3705	if (err)
3706		return err;
3707
3708	ifm = nlmsg_data(nlh);
3709	if (ifm->ifi_index > 0)
3710		dev = __dev_get_by_index(net, ifm->ifi_index);
3711	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3712		dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3713				   tb[IFLA_ALT_IFNAME], NULL);
3714	else
3715		return -EINVAL;
3716
3717	if (!dev)
3718		return -ENODEV;
3719
3720	if (!tb[IFLA_PROP_LIST])
3721		return 0;
3722
3723	nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3724		switch (nla_type(attr)) {
3725		case IFLA_ALT_IFNAME:
3726			err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3727			if (err)
3728				return err;
3729			break;
3730		}
3731	}
3732
3733	if (changed)
3734		netdev_state_change(dev);
3735	return 0;
3736}
3737
3738static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3739			    struct netlink_ext_ack *extack)
3740{
3741	return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3742}
3743
3744static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3745			    struct netlink_ext_ack *extack)
3746{
3747	return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3748}
3749
3750static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3751{
3752	struct net *net = sock_net(skb->sk);
3753	size_t min_ifinfo_dump_size = 0;
3754	struct nlattr *tb[IFLA_MAX+1];
3755	u32 ext_filter_mask = 0;
3756	struct net_device *dev;
3757	int hdrlen;
3758
3759	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3760	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3761		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3762
3763	if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3764		if (tb[IFLA_EXT_MASK])
3765			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3766	}
3767
3768	if (!ext_filter_mask)
3769		return NLMSG_GOODSIZE;
3770	/*
3771	 * traverse the list of net devices and compute the minimum
3772	 * buffer size based upon the filter mask.
3773	 */
3774	rcu_read_lock();
3775	for_each_netdev_rcu(net, dev) {
3776		min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3777					   if_nlmsg_size(dev, ext_filter_mask));
3778	}
3779	rcu_read_unlock();
3780
3781	return nlmsg_total_size(min_ifinfo_dump_size);
3782}
3783
3784static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3785{
3786	int idx;
3787	int s_idx = cb->family;
3788	int type = cb->nlh->nlmsg_type - RTM_BASE;
3789	int ret = 0;
3790
3791	if (s_idx == 0)
3792		s_idx = 1;
3793
3794	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3795		struct rtnl_link **tab;
3796		struct rtnl_link *link;
3797		rtnl_dumpit_func dumpit;
3798
3799		if (idx < s_idx || idx == PF_PACKET)
3800			continue;
3801
3802		if (type < 0 || type >= RTM_NR_MSGTYPES)
3803			continue;
3804
3805		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3806		if (!tab)
3807			continue;
3808
3809		link = tab[type];
3810		if (!link)
3811			continue;
3812
3813		dumpit = link->dumpit;
3814		if (!dumpit)
3815			continue;
3816
3817		if (idx > s_idx) {
3818			memset(&cb->args[0], 0, sizeof(cb->args));
3819			cb->prev_seq = 0;
3820			cb->seq = 0;
3821		}
3822		ret = dumpit(skb, cb);
3823		if (ret)
3824			break;
3825	}
3826	cb->family = idx;
3827
3828	return skb->len ? : ret;
3829}
3830
3831struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3832				       unsigned int change,
3833				       u32 event, gfp_t flags, int *new_nsid,
3834				       int new_ifindex)
3835{
3836	struct net *net = dev_net(dev);
3837	struct sk_buff *skb;
3838	int err = -ENOBUFS;
3839	size_t if_info_size;
3840
3841	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3842	if (skb == NULL)
3843		goto errout;
3844
3845	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3846			       type, 0, 0, change, 0, 0, event,
3847			       new_nsid, new_ifindex, -1, flags);
3848	if (err < 0) {
3849		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
3850		WARN_ON(err == -EMSGSIZE);
3851		kfree_skb(skb);
3852		goto errout;
3853	}
3854	return skb;
3855errout:
3856	if (err < 0)
3857		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3858	return NULL;
3859}
3860
3861void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3862{
3863	struct net *net = dev_net(dev);
3864
3865	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3866}
3867
3868static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3869			       unsigned int change, u32 event,
3870			       gfp_t flags, int *new_nsid, int new_ifindex)
3871{
3872	struct sk_buff *skb;
3873
3874	if (dev->reg_state != NETREG_REGISTERED)
3875		return;
3876
3877	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3878				     new_ifindex);
3879	if (skb)
3880		rtmsg_ifinfo_send(skb, dev, flags);
3881}
3882
3883void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3884		  gfp_t flags)
3885{
3886	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3887			   NULL, 0);
3888}
3889
3890void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3891			 gfp_t flags, int *new_nsid, int new_ifindex)
3892{
3893	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3894			   new_nsid, new_ifindex);
3895}
3896
3897static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3898				   struct net_device *dev,
3899				   u8 *addr, u16 vid, u32 pid, u32 seq,
3900				   int type, unsigned int flags,
3901				   int nlflags, u16 ndm_state)
3902{
3903	struct nlmsghdr *nlh;
3904	struct ndmsg *ndm;
3905
3906	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3907	if (!nlh)
3908		return -EMSGSIZE;
3909
3910	ndm = nlmsg_data(nlh);
3911	ndm->ndm_family  = AF_BRIDGE;
3912	ndm->ndm_pad1	 = 0;
3913	ndm->ndm_pad2    = 0;
3914	ndm->ndm_flags	 = flags;
3915	ndm->ndm_type	 = 0;
3916	ndm->ndm_ifindex = dev->ifindex;
3917	ndm->ndm_state   = ndm_state;
3918
3919	if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
3920		goto nla_put_failure;
3921	if (vid)
3922		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3923			goto nla_put_failure;
3924
3925	nlmsg_end(skb, nlh);
3926	return 0;
3927
3928nla_put_failure:
3929	nlmsg_cancel(skb, nlh);
3930	return -EMSGSIZE;
3931}
3932
3933static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
3934{
3935	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3936	       nla_total_size(dev->addr_len) +	/* NDA_LLADDR */
3937	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
3938	       0;
3939}
3940
3941static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3942			    u16 ndm_state)
3943{
3944	struct net *net = dev_net(dev);
3945	struct sk_buff *skb;
3946	int err = -ENOBUFS;
3947
3948	skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
3949	if (!skb)
3950		goto errout;
3951
3952	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3953				      0, 0, type, NTF_SELF, 0, ndm_state);
3954	if (err < 0) {
3955		kfree_skb(skb);
3956		goto errout;
3957	}
3958
3959	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3960	return;
3961errout:
3962	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3963}
3964
3965/*
3966 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3967 */
3968int ndo_dflt_fdb_add(struct ndmsg *ndm,
3969		     struct nlattr *tb[],
3970		     struct net_device *dev,
3971		     const unsigned char *addr, u16 vid,
3972		     u16 flags)
3973{
3974	int err = -EINVAL;
3975
3976	/* If aging addresses are supported device will need to
3977	 * implement its own handler for this.
3978	 */
3979	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3980		pr_info("%s: FDB only supports static addresses\n", dev->name);
3981		return err;
3982	}
3983
3984	if (vid) {
3985		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3986		return err;
3987	}
3988
3989	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3990		err = dev_uc_add_excl(dev, addr);
3991	else if (is_multicast_ether_addr(addr))
3992		err = dev_mc_add_excl(dev, addr);
3993
3994	/* Only return duplicate errors if NLM_F_EXCL is set */
3995	if (err == -EEXIST && !(flags & NLM_F_EXCL))
3996		err = 0;
3997
3998	return err;
3999}
4000EXPORT_SYMBOL(ndo_dflt_fdb_add);
4001
4002static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4003			 struct netlink_ext_ack *extack)
4004{
4005	u16 vid = 0;
4006
4007	if (vlan_attr) {
4008		if (nla_len(vlan_attr) != sizeof(u16)) {
4009			NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4010			return -EINVAL;
4011		}
4012
4013		vid = nla_get_u16(vlan_attr);
4014
4015		if (!vid || vid >= VLAN_VID_MASK) {
4016			NL_SET_ERR_MSG(extack, "invalid vlan id");
4017			return -EINVAL;
4018		}
4019	}
4020	*p_vid = vid;
4021	return 0;
4022}
4023
4024static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4025			struct netlink_ext_ack *extack)
4026{
4027	struct net *net = sock_net(skb->sk);
4028	struct ndmsg *ndm;
4029	struct nlattr *tb[NDA_MAX+1];
4030	struct net_device *dev;
4031	u8 *addr;
4032	u16 vid;
4033	int err;
4034
4035	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4036				     extack);
4037	if (err < 0)
4038		return err;
4039
4040	ndm = nlmsg_data(nlh);
4041	if (ndm->ndm_ifindex == 0) {
4042		NL_SET_ERR_MSG(extack, "invalid ifindex");
4043		return -EINVAL;
4044	}
4045
4046	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4047	if (dev == NULL) {
4048		NL_SET_ERR_MSG(extack, "unknown ifindex");
4049		return -ENODEV;
4050	}
4051
4052	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4053		NL_SET_ERR_MSG(extack, "invalid address");
4054		return -EINVAL;
4055	}
4056
4057	if (dev->type != ARPHRD_ETHER) {
4058		NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4059		return -EINVAL;
4060	}
4061
4062	addr = nla_data(tb[NDA_LLADDR]);
4063
4064	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4065	if (err)
4066		return err;
4067
4068	err = -EOPNOTSUPP;
4069
4070	/* Support fdb on master device the net/bridge default case */
4071	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4072	    netif_is_bridge_port(dev)) {
4073		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4074		const struct net_device_ops *ops = br_dev->netdev_ops;
4075
4076		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4077				       nlh->nlmsg_flags, extack);
4078		if (err)
4079			goto out;
4080		else
4081			ndm->ndm_flags &= ~NTF_MASTER;
4082	}
4083
4084	/* Embedded bridge, macvlan, and any other device support */
4085	if ((ndm->ndm_flags & NTF_SELF)) {
4086		if (dev->netdev_ops->ndo_fdb_add)
4087			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4088							   vid,
4089							   nlh->nlmsg_flags,
4090							   extack);
4091		else
4092			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4093					       nlh->nlmsg_flags);
4094
4095		if (!err) {
4096			rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4097					ndm->ndm_state);
4098			ndm->ndm_flags &= ~NTF_SELF;
4099		}
4100	}
4101out:
4102	return err;
4103}
4104
4105/*
4106 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4107 */
4108int ndo_dflt_fdb_del(struct ndmsg *ndm,
4109		     struct nlattr *tb[],
4110		     struct net_device *dev,
4111		     const unsigned char *addr, u16 vid)
4112{
4113	int err = -EINVAL;
4114
4115	/* If aging addresses are supported device will need to
4116	 * implement its own handler for this.
4117	 */
4118	if (!(ndm->ndm_state & NUD_PERMANENT)) {
4119		pr_info("%s: FDB only supports static addresses\n", dev->name);
4120		return err;
4121	}
4122
4123	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4124		err = dev_uc_del(dev, addr);
4125	else if (is_multicast_ether_addr(addr))
4126		err = dev_mc_del(dev, addr);
4127
4128	return err;
4129}
4130EXPORT_SYMBOL(ndo_dflt_fdb_del);
4131
4132static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4133			struct netlink_ext_ack *extack)
4134{
4135	struct net *net = sock_net(skb->sk);
4136	struct ndmsg *ndm;
4137	struct nlattr *tb[NDA_MAX+1];
4138	struct net_device *dev;
4139	__u8 *addr;
4140	int err;
4141	u16 vid;
4142
4143	if (!netlink_capable(skb, CAP_NET_ADMIN))
4144		return -EPERM;
4145
4146	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4147				     extack);
4148	if (err < 0)
4149		return err;
4150
4151	ndm = nlmsg_data(nlh);
4152	if (ndm->ndm_ifindex == 0) {
4153		NL_SET_ERR_MSG(extack, "invalid ifindex");
4154		return -EINVAL;
4155	}
4156
4157	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4158	if (dev == NULL) {
4159		NL_SET_ERR_MSG(extack, "unknown ifindex");
4160		return -ENODEV;
4161	}
4162
4163	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4164		NL_SET_ERR_MSG(extack, "invalid address");
4165		return -EINVAL;
4166	}
4167
4168	if (dev->type != ARPHRD_ETHER) {
4169		NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4170		return -EINVAL;
4171	}
4172
4173	addr = nla_data(tb[NDA_LLADDR]);
4174
4175	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4176	if (err)
4177		return err;
4178
4179	err = -EOPNOTSUPP;
4180
4181	/* Support fdb on master device the net/bridge default case */
4182	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4183	    netif_is_bridge_port(dev)) {
4184		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4185		const struct net_device_ops *ops = br_dev->netdev_ops;
4186
4187		if (ops->ndo_fdb_del)
4188			err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
4189
4190		if (err)
4191			goto out;
4192		else
4193			ndm->ndm_flags &= ~NTF_MASTER;
4194	}
4195
4196	/* Embedded bridge, macvlan, and any other device support */
4197	if (ndm->ndm_flags & NTF_SELF) {
4198		if (dev->netdev_ops->ndo_fdb_del)
4199			err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
4200							   vid);
4201		else
4202			err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4203
4204		if (!err) {
4205			rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4206					ndm->ndm_state);
4207			ndm->ndm_flags &= ~NTF_SELF;
4208		}
4209	}
4210out:
4211	return err;
4212}
4213
4214static int nlmsg_populate_fdb(struct sk_buff *skb,
4215			      struct netlink_callback *cb,
4216			      struct net_device *dev,
4217			      int *idx,
4218			      struct netdev_hw_addr_list *list)
4219{
4220	struct netdev_hw_addr *ha;
4221	int err;
4222	u32 portid, seq;
4223
4224	portid = NETLINK_CB(cb->skb).portid;
4225	seq = cb->nlh->nlmsg_seq;
4226
4227	list_for_each_entry(ha, &list->list, list) {
4228		if (*idx < cb->args[2])
4229			goto skip;
4230
4231		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4232					      portid, seq,
4233					      RTM_NEWNEIGH, NTF_SELF,
4234					      NLM_F_MULTI, NUD_PERMANENT);
4235		if (err < 0)
4236			return err;
4237skip:
4238		*idx += 1;
4239	}
4240	return 0;
4241}
4242
4243/**
4244 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4245 * @skb: socket buffer to store message in
4246 * @cb: netlink callback
4247 * @dev: netdevice
4248 * @filter_dev: ignored
4249 * @idx: the number of FDB table entries dumped is added to *@idx
4250 *
4251 * Default netdevice operation to dump the existing unicast address list.
4252 * Returns number of addresses from list put in skb.
4253 */
4254int ndo_dflt_fdb_dump(struct sk_buff *skb,
4255		      struct netlink_callback *cb,
4256		      struct net_device *dev,
4257		      struct net_device *filter_dev,
4258		      int *idx)
4259{
4260	int err;
4261
4262	if (dev->type != ARPHRD_ETHER)
4263		return -EINVAL;
4264
4265	netif_addr_lock_bh(dev);
4266	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4267	if (err)
4268		goto out;
4269	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4270out:
4271	netif_addr_unlock_bh(dev);
4272	return err;
4273}
4274EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4275
4276static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4277				 int *br_idx, int *brport_idx,
4278				 struct netlink_ext_ack *extack)
4279{
4280	struct nlattr *tb[NDA_MAX + 1];
4281	struct ndmsg *ndm;
4282	int err, i;
4283
4284	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4285		NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4286		return -EINVAL;
4287	}
4288
4289	ndm = nlmsg_data(nlh);
4290	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4291	    ndm->ndm_flags || ndm->ndm_type) {
4292		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4293		return -EINVAL;
4294	}
4295
4296	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4297					    NDA_MAX, NULL, extack);
4298	if (err < 0)
4299		return err;
4300
4301	*brport_idx = ndm->ndm_ifindex;
4302	for (i = 0; i <= NDA_MAX; ++i) {
4303		if (!tb[i])
4304			continue;
4305
4306		switch (i) {
4307		case NDA_IFINDEX:
4308			if (nla_len(tb[i]) != sizeof(u32)) {
4309				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4310				return -EINVAL;
4311			}
4312			*brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4313			break;
4314		case NDA_MASTER:
4315			if (nla_len(tb[i]) != sizeof(u32)) {
4316				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4317				return -EINVAL;
4318			}
4319			*br_idx = nla_get_u32(tb[NDA_MASTER]);
4320			break;
4321		default:
4322			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4323			return -EINVAL;
4324		}
4325	}
4326
4327	return 0;
4328}
4329
4330static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4331				 int *br_idx, int *brport_idx,
4332				 struct netlink_ext_ack *extack)
4333{
4334	struct nlattr *tb[IFLA_MAX+1];
4335	int err;
4336
4337	/* A hack to preserve kernel<->userspace interface.
4338	 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4339	 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4340	 * So, check for ndmsg with an optional u32 attribute (not used here).
4341	 * Fortunately these sizes don't conflict with the size of ifinfomsg
4342	 * with an optional attribute.
4343	 */
4344	if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4345	    (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4346	     nla_attr_size(sizeof(u32)))) {
4347		struct ifinfomsg *ifm;
4348
4349		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4350					     tb, IFLA_MAX, ifla_policy,
4351					     extack);
4352		if (err < 0) {
4353			return -EINVAL;
4354		} else if (err == 0) {
4355			if (tb[IFLA_MASTER])
4356				*br_idx = nla_get_u32(tb[IFLA_MASTER]);
4357		}
4358
4359		ifm = nlmsg_data(nlh);
4360		*brport_idx = ifm->ifi_index;
4361	}
4362	return 0;
4363}
4364
4365static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4366{
4367	struct net_device *dev;
4368	struct net_device *br_dev = NULL;
4369	const struct net_device_ops *ops = NULL;
4370	const struct net_device_ops *cops = NULL;
4371	struct net *net = sock_net(skb->sk);
4372	struct hlist_head *head;
4373	int brport_idx = 0;
4374	int br_idx = 0;
4375	int h, s_h;
4376	int idx = 0, s_idx;
4377	int err = 0;
4378	int fidx = 0;
4379
4380	if (cb->strict_check)
4381		err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4382					    cb->extack);
4383	else
4384		err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4385					    cb->extack);
4386	if (err < 0)
4387		return err;
4388
4389	if (br_idx) {
4390		br_dev = __dev_get_by_index(net, br_idx);
4391		if (!br_dev)
4392			return -ENODEV;
4393
4394		ops = br_dev->netdev_ops;
4395	}
4396
4397	s_h = cb->args[0];
4398	s_idx = cb->args[1];
4399
4400	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4401		idx = 0;
4402		head = &net->dev_index_head[h];
4403		hlist_for_each_entry(dev, head, index_hlist) {
4404
4405			if (brport_idx && (dev->ifindex != brport_idx))
4406				continue;
4407
4408			if (!br_idx) { /* user did not specify a specific bridge */
4409				if (netif_is_bridge_port(dev)) {
4410					br_dev = netdev_master_upper_dev_get(dev);
4411					cops = br_dev->netdev_ops;
4412				}
4413			} else {
4414				if (dev != br_dev &&
4415				    !netif_is_bridge_port(dev))
4416					continue;
4417
4418				if (br_dev != netdev_master_upper_dev_get(dev) &&
4419				    !(dev->priv_flags & IFF_EBRIDGE))
4420					continue;
4421				cops = ops;
4422			}
4423
4424			if (idx < s_idx)
4425				goto cont;
4426
4427			if (netif_is_bridge_port(dev)) {
4428				if (cops && cops->ndo_fdb_dump) {
4429					err = cops->ndo_fdb_dump(skb, cb,
4430								br_dev, dev,
4431								&fidx);
4432					if (err == -EMSGSIZE)
4433						goto out;
4434				}
4435			}
4436
4437			if (dev->netdev_ops->ndo_fdb_dump)
4438				err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4439								    dev, NULL,
4440								    &fidx);
4441			else
4442				err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4443							&fidx);
4444			if (err == -EMSGSIZE)
4445				goto out;
4446
4447			cops = NULL;
4448
4449			/* reset fdb offset to 0 for rest of the interfaces */
4450			cb->args[2] = 0;
4451			fidx = 0;
4452cont:
4453			idx++;
4454		}
4455	}
4456
4457out:
4458	cb->args[0] = h;
4459	cb->args[1] = idx;
4460	cb->args[2] = fidx;
4461
4462	return skb->len;
4463}
4464
4465static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4466				struct nlattr **tb, u8 *ndm_flags,
4467				int *br_idx, int *brport_idx, u8 **addr,
4468				u16 *vid, struct netlink_ext_ack *extack)
4469{
4470	struct ndmsg *ndm;
4471	int err, i;
4472
4473	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4474		NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4475		return -EINVAL;
4476	}
4477
4478	ndm = nlmsg_data(nlh);
4479	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4480	    ndm->ndm_type) {
4481		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4482		return -EINVAL;
4483	}
4484
4485	if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4486		NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4487		return -EINVAL;
4488	}
4489
4490	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4491					    NDA_MAX, nda_policy, extack);
4492	if (err < 0)
4493		return err;
4494
4495	*ndm_flags = ndm->ndm_flags;
4496	*brport_idx = ndm->ndm_ifindex;
4497	for (i = 0; i <= NDA_MAX; ++i) {
4498		if (!tb[i])
4499			continue;
4500
4501		switch (i) {
4502		case NDA_MASTER:
4503			*br_idx = nla_get_u32(tb[i]);
4504			break;
4505		case NDA_LLADDR:
4506			if (nla_len(tb[i]) != ETH_ALEN) {
4507				NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4508				return -EINVAL;
4509			}
4510			*addr = nla_data(tb[i]);
4511			break;
4512		case NDA_VLAN:
4513			err = fdb_vid_parse(tb[i], vid, extack);
4514			if (err)
4515				return err;
4516			break;
4517		case NDA_VNI:
4518			break;
4519		default:
4520			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4521			return -EINVAL;
4522		}
4523	}
4524
4525	return 0;
4526}
4527
4528static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4529			struct netlink_ext_ack *extack)
4530{
4531	struct net_device *dev = NULL, *br_dev = NULL;
4532	const struct net_device_ops *ops = NULL;
4533	struct net *net = sock_net(in_skb->sk);
4534	struct nlattr *tb[NDA_MAX + 1];
4535	struct sk_buff *skb;
4536	int brport_idx = 0;
4537	u8 ndm_flags = 0;
4538	int br_idx = 0;
4539	u8 *addr = NULL;
4540	u16 vid = 0;
4541	int err;
4542
4543	err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4544				   &brport_idx, &addr, &vid, extack);
4545	if (err < 0)
4546		return err;
4547
4548	if (!addr) {
4549		NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4550		return -EINVAL;
4551	}
4552
4553	if (brport_idx) {
4554		dev = __dev_get_by_index(net, brport_idx);
4555		if (!dev) {
4556			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4557			return -ENODEV;
4558		}
4559	}
4560
4561	if (br_idx) {
4562		if (dev) {
4563			NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4564			return -EINVAL;
4565		}
4566
4567		br_dev = __dev_get_by_index(net, br_idx);
4568		if (!br_dev) {
4569			NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4570			return -EINVAL;
4571		}
4572		ops = br_dev->netdev_ops;
4573	}
4574
4575	if (dev) {
4576		if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4577			if (!netif_is_bridge_port(dev)) {
4578				NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4579				return -EINVAL;
4580			}
4581			br_dev = netdev_master_upper_dev_get(dev);
4582			if (!br_dev) {
4583				NL_SET_ERR_MSG(extack, "Master of device not found");
4584				return -EINVAL;
4585			}
4586			ops = br_dev->netdev_ops;
4587		} else {
4588			if (!(ndm_flags & NTF_SELF)) {
4589				NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4590				return -EINVAL;
4591			}
4592			ops = dev->netdev_ops;
4593		}
4594	}
4595
4596	if (!br_dev && !dev) {
4597		NL_SET_ERR_MSG(extack, "No device specified");
4598		return -ENODEV;
4599	}
4600
4601	if (!ops || !ops->ndo_fdb_get) {
4602		NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4603		return -EOPNOTSUPP;
4604	}
4605
4606	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4607	if (!skb)
4608		return -ENOBUFS;
4609
4610	if (br_dev)
4611		dev = br_dev;
4612	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4613			       NETLINK_CB(in_skb).portid,
4614			       nlh->nlmsg_seq, extack);
4615	if (err)
4616		goto out;
4617
4618	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4619out:
4620	kfree_skb(skb);
4621	return err;
4622}
4623
4624static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4625			       unsigned int attrnum, unsigned int flag)
4626{
4627	if (mask & flag)
4628		return nla_put_u8(skb, attrnum, !!(flags & flag));
4629	return 0;
4630}
4631
4632int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4633			    struct net_device *dev, u16 mode,
4634			    u32 flags, u32 mask, int nlflags,
4635			    u32 filter_mask,
4636			    int (*vlan_fill)(struct sk_buff *skb,
4637					     struct net_device *dev,
4638					     u32 filter_mask))
4639{
4640	struct nlmsghdr *nlh;
4641	struct ifinfomsg *ifm;
4642	struct nlattr *br_afspec;
4643	struct nlattr *protinfo;
4644	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4645	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4646	int err = 0;
4647
4648	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4649	if (nlh == NULL)
4650		return -EMSGSIZE;
4651
4652	ifm = nlmsg_data(nlh);
4653	ifm->ifi_family = AF_BRIDGE;
4654	ifm->__ifi_pad = 0;
4655	ifm->ifi_type = dev->type;
4656	ifm->ifi_index = dev->ifindex;
4657	ifm->ifi_flags = dev_get_flags(dev);
4658	ifm->ifi_change = 0;
4659
4660
4661	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4662	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4663	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4664	    (br_dev &&
4665	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4666	    (dev->addr_len &&
4667	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4668	    (dev->ifindex != dev_get_iflink(dev) &&
4669	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4670		goto nla_put_failure;
4671
4672	br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4673	if (!br_afspec)
4674		goto nla_put_failure;
4675
4676	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4677		nla_nest_cancel(skb, br_afspec);
4678		goto nla_put_failure;
4679	}
4680
4681	if (mode != BRIDGE_MODE_UNDEF) {
4682		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4683			nla_nest_cancel(skb, br_afspec);
4684			goto nla_put_failure;
4685		}
4686	}
4687	if (vlan_fill) {
4688		err = vlan_fill(skb, dev, filter_mask);
4689		if (err) {
4690			nla_nest_cancel(skb, br_afspec);
4691			goto nla_put_failure;
4692		}
4693	}
4694	nla_nest_end(skb, br_afspec);
4695
4696	protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4697	if (!protinfo)
4698		goto nla_put_failure;
4699
4700	if (brport_nla_put_flag(skb, flags, mask,
4701				IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4702	    brport_nla_put_flag(skb, flags, mask,
4703				IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4704	    brport_nla_put_flag(skb, flags, mask,
4705				IFLA_BRPORT_FAST_LEAVE,
4706				BR_MULTICAST_FAST_LEAVE) ||
4707	    brport_nla_put_flag(skb, flags, mask,
4708				IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4709	    brport_nla_put_flag(skb, flags, mask,
4710				IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4711	    brport_nla_put_flag(skb, flags, mask,
4712				IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4713	    brport_nla_put_flag(skb, flags, mask,
4714				IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4715	    brport_nla_put_flag(skb, flags, mask,
4716				IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4717	    brport_nla_put_flag(skb, flags, mask,
4718				IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4719	    brport_nla_put_flag(skb, flags, mask,
4720				IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4721		nla_nest_cancel(skb, protinfo);
4722		goto nla_put_failure;
4723	}
4724
4725	nla_nest_end(skb, protinfo);
4726
4727	nlmsg_end(skb, nlh);
4728	return 0;
4729nla_put_failure:
4730	nlmsg_cancel(skb, nlh);
4731	return err ? err : -EMSGSIZE;
4732}
4733EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4734
4735static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4736				    bool strict_check, u32 *filter_mask,
4737				    struct netlink_ext_ack *extack)
4738{
4739	struct nlattr *tb[IFLA_MAX+1];
4740	int err, i;
4741
4742	if (strict_check) {
4743		struct ifinfomsg *ifm;
4744
4745		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4746			NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4747			return -EINVAL;
4748		}
4749
4750		ifm = nlmsg_data(nlh);
4751		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4752		    ifm->ifi_change || ifm->ifi_index) {
4753			NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4754			return -EINVAL;
4755		}
4756
4757		err = nlmsg_parse_deprecated_strict(nlh,
4758						    sizeof(struct ifinfomsg),
4759						    tb, IFLA_MAX, ifla_policy,
4760						    extack);
4761	} else {
4762		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4763					     tb, IFLA_MAX, ifla_policy,
4764					     extack);
4765	}
4766	if (err < 0)
4767		return err;
4768
4769	/* new attributes should only be added with strict checking */
4770	for (i = 0; i <= IFLA_MAX; ++i) {
4771		if (!tb[i])
4772			continue;
4773
4774		switch (i) {
4775		case IFLA_EXT_MASK:
4776			*filter_mask = nla_get_u32(tb[i]);
4777			break;
4778		default:
4779			if (strict_check) {
4780				NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4781				return -EINVAL;
4782			}
4783		}
4784	}
4785
4786	return 0;
4787}
4788
4789static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4790{
4791	const struct nlmsghdr *nlh = cb->nlh;
4792	struct net *net = sock_net(skb->sk);
4793	struct net_device *dev;
4794	int idx = 0;
4795	u32 portid = NETLINK_CB(cb->skb).portid;
4796	u32 seq = nlh->nlmsg_seq;
4797	u32 filter_mask = 0;
4798	int err;
4799
4800	err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4801				       cb->extack);
4802	if (err < 0 && cb->strict_check)
4803		return err;
4804
4805	rcu_read_lock();
4806	for_each_netdev_rcu(net, dev) {
4807		const struct net_device_ops *ops = dev->netdev_ops;
4808		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4809
4810		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4811			if (idx >= cb->args[0]) {
4812				err = br_dev->netdev_ops->ndo_bridge_getlink(
4813						skb, portid, seq, dev,
4814						filter_mask, NLM_F_MULTI);
4815				if (err < 0 && err != -EOPNOTSUPP) {
4816					if (likely(skb->len))
4817						break;
4818
4819					goto out_err;
4820				}
4821			}
4822			idx++;
4823		}
4824
4825		if (ops->ndo_bridge_getlink) {
4826			if (idx >= cb->args[0]) {
4827				err = ops->ndo_bridge_getlink(skb, portid,
4828							      seq, dev,
4829							      filter_mask,
4830							      NLM_F_MULTI);
4831				if (err < 0 && err != -EOPNOTSUPP) {
4832					if (likely(skb->len))
4833						break;
4834
4835					goto out_err;
4836				}
4837			}
4838			idx++;
4839		}
4840	}
4841	err = skb->len;
4842out_err:
4843	rcu_read_unlock();
4844	cb->args[0] = idx;
4845
4846	return err;
4847}
4848
4849static inline size_t bridge_nlmsg_size(void)
4850{
4851	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4852		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
4853		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
4854		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
4855		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
4856		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
4857		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
4858		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
4859		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
4860		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
4861		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
4862}
4863
4864static int rtnl_bridge_notify(struct net_device *dev)
4865{
4866	struct net *net = dev_net(dev);
4867	struct sk_buff *skb;
4868	int err = -EOPNOTSUPP;
4869
4870	if (!dev->netdev_ops->ndo_bridge_getlink)
4871		return 0;
4872
4873	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4874	if (!skb) {
4875		err = -ENOMEM;
4876		goto errout;
4877	}
4878
4879	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4880	if (err < 0)
4881		goto errout;
4882
4883	/* Notification info is only filled for bridge ports, not the bridge
4884	 * device itself. Therefore, a zero notification length is valid and
4885	 * should not result in an error.
4886	 */
4887	if (!skb->len)
4888		goto errout;
4889
4890	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4891	return 0;
4892errout:
4893	WARN_ON(err == -EMSGSIZE);
4894	kfree_skb(skb);
4895	if (err)
4896		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4897	return err;
4898}
4899
4900static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4901			       struct netlink_ext_ack *extack)
4902{
4903	struct net *net = sock_net(skb->sk);
4904	struct ifinfomsg *ifm;
4905	struct net_device *dev;
4906	struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
4907	int rem, err = -EOPNOTSUPP;
4908	u16 flags = 0;
4909
4910	if (nlmsg_len(nlh) < sizeof(*ifm))
4911		return -EINVAL;
4912
4913	ifm = nlmsg_data(nlh);
4914	if (ifm->ifi_family != AF_BRIDGE)
4915		return -EPFNOSUPPORT;
4916
4917	dev = __dev_get_by_index(net, ifm->ifi_index);
4918	if (!dev) {
4919		NL_SET_ERR_MSG(extack, "unknown ifindex");
4920		return -ENODEV;
4921	}
4922
4923	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4924	if (br_spec) {
4925		nla_for_each_nested(attr, br_spec, rem) {
4926			if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
4927				if (nla_len(attr) < sizeof(flags))
4928					return -EINVAL;
4929
4930				br_flags_attr = attr;
4931				flags = nla_get_u16(attr);
4932			}
4933
4934			if (nla_type(attr) == IFLA_BRIDGE_MODE) {
4935				if (nla_len(attr) < sizeof(u16))
4936					return -EINVAL;
4937			}
4938		}
4939	}
4940
4941	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4942		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4943
4944		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
4945			err = -EOPNOTSUPP;
4946			goto out;
4947		}
4948
4949		err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
4950							     extack);
4951		if (err)
4952			goto out;
4953
4954		flags &= ~BRIDGE_FLAGS_MASTER;
4955	}
4956
4957	if ((flags & BRIDGE_FLAGS_SELF)) {
4958		if (!dev->netdev_ops->ndo_bridge_setlink)
4959			err = -EOPNOTSUPP;
4960		else
4961			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4962								  flags,
4963								  extack);
4964		if (!err) {
4965			flags &= ~BRIDGE_FLAGS_SELF;
4966
4967			/* Generate event to notify upper layer of bridge
4968			 * change
4969			 */
4970			err = rtnl_bridge_notify(dev);
4971		}
4972	}
4973
4974	if (br_flags_attr)
4975		memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
4976out:
4977	return err;
4978}
4979
4980static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
4981			       struct netlink_ext_ack *extack)
4982{
4983	struct net *net = sock_net(skb->sk);
4984	struct ifinfomsg *ifm;
4985	struct net_device *dev;
4986	struct nlattr *br_spec, *attr = NULL;
4987	int rem, err = -EOPNOTSUPP;
4988	u16 flags = 0;
4989	bool have_flags = false;
4990
4991	if (nlmsg_len(nlh) < sizeof(*ifm))
4992		return -EINVAL;
4993
4994	ifm = nlmsg_data(nlh);
4995	if (ifm->ifi_family != AF_BRIDGE)
4996		return -EPFNOSUPPORT;
4997
4998	dev = __dev_get_by_index(net, ifm->ifi_index);
4999	if (!dev) {
5000		NL_SET_ERR_MSG(extack, "unknown ifindex");
5001		return -ENODEV;
5002	}
5003
5004	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5005	if (br_spec) {
5006		nla_for_each_nested(attr, br_spec, rem) {
5007			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5008				if (nla_len(attr) < sizeof(flags))
5009					return -EINVAL;
5010
5011				have_flags = true;
5012				flags = nla_get_u16(attr);
5013				break;
5014			}
5015		}
5016	}
5017
5018	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5019		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5020
5021		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5022			err = -EOPNOTSUPP;
5023			goto out;
5024		}
5025
5026		err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5027		if (err)
5028			goto out;
5029
5030		flags &= ~BRIDGE_FLAGS_MASTER;
5031	}
5032
5033	if ((flags & BRIDGE_FLAGS_SELF)) {
5034		if (!dev->netdev_ops->ndo_bridge_dellink)
5035			err = -EOPNOTSUPP;
5036		else
5037			err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5038								  flags);
5039
5040		if (!err) {
5041			flags &= ~BRIDGE_FLAGS_SELF;
5042
5043			/* Generate event to notify upper layer of bridge
5044			 * change
5045			 */
5046			err = rtnl_bridge_notify(dev);
5047		}
5048	}
5049
5050	if (have_flags)
5051		memcpy(nla_data(attr), &flags, sizeof(flags));
5052out:
5053	return err;
5054}
5055
5056static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5057{
5058	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5059	       (!idxattr || idxattr == attrid);
5060}
5061
5062#define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
5063static int rtnl_get_offload_stats_attr_size(int attr_id)
5064{
5065	switch (attr_id) {
5066	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
5067		return sizeof(struct rtnl_link_stats64);
5068	}
5069
5070	return 0;
5071}
5072
5073static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
5074				  int *prividx)
5075{
5076	struct nlattr *attr = NULL;
5077	int attr_id, size;
5078	void *attr_data;
5079	int err;
5080
5081	if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5082	      dev->netdev_ops->ndo_get_offload_stats))
5083		return -ENODATA;
5084
5085	for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
5086	     attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
5087		if (attr_id < *prividx)
5088			continue;
5089
5090		size = rtnl_get_offload_stats_attr_size(attr_id);
5091		if (!size)
5092			continue;
5093
5094		if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5095			continue;
5096
5097		attr = nla_reserve_64bit(skb, attr_id, size,
5098					 IFLA_OFFLOAD_XSTATS_UNSPEC);
5099		if (!attr)
5100			goto nla_put_failure;
5101
5102		attr_data = nla_data(attr);
5103		memset(attr_data, 0, size);
5104		err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
5105							     attr_data);
5106		if (err)
5107			goto get_offload_stats_failure;
5108	}
5109
5110	if (!attr)
5111		return -ENODATA;
5112
5113	*prividx = 0;
5114	return 0;
5115
5116nla_put_failure:
5117	err = -EMSGSIZE;
5118get_offload_stats_failure:
5119	*prividx = attr_id;
5120	return err;
5121}
5122
5123static int rtnl_get_offload_stats_size(const struct net_device *dev)
5124{
5125	int nla_size = 0;
5126	int attr_id;
5127	int size;
5128
5129	if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5130	      dev->netdev_ops->ndo_get_offload_stats))
5131		return 0;
5132
5133	for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
5134	     attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
5135		if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5136			continue;
5137		size = rtnl_get_offload_stats_attr_size(attr_id);
5138		nla_size += nla_total_size_64bit(size);
5139	}
5140
5141	if (nla_size != 0)
5142		nla_size += nla_total_size(0);
5143
5144	return nla_size;
5145}
5146
5147static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5148			       int type, u32 pid, u32 seq, u32 change,
5149			       unsigned int flags, unsigned int filter_mask,
5150			       int *idxattr, int *prividx)
5151{
5152	struct if_stats_msg *ifsm;
5153	struct nlmsghdr *nlh;
5154	struct nlattr *attr;
5155	int s_prividx = *prividx;
5156	int err;
5157
5158	ASSERT_RTNL();
5159
5160	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5161	if (!nlh)
5162		return -EMSGSIZE;
5163
5164	ifsm = nlmsg_data(nlh);
5165	ifsm->family = PF_UNSPEC;
5166	ifsm->pad1 = 0;
5167	ifsm->pad2 = 0;
5168	ifsm->ifindex = dev->ifindex;
5169	ifsm->filter_mask = filter_mask;
5170
5171	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5172		struct rtnl_link_stats64 *sp;
5173
5174		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5175					 sizeof(struct rtnl_link_stats64),
5176					 IFLA_STATS_UNSPEC);
5177		if (!attr)
5178			goto nla_put_failure;
5179
5180		sp = nla_data(attr);
5181		dev_get_stats(dev, sp);
5182	}
5183
5184	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5185		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5186
5187		if (ops && ops->fill_linkxstats) {
5188			*idxattr = IFLA_STATS_LINK_XSTATS;
5189			attr = nla_nest_start_noflag(skb,
5190						     IFLA_STATS_LINK_XSTATS);
5191			if (!attr)
5192				goto nla_put_failure;
5193
5194			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5195			nla_nest_end(skb, attr);
5196			if (err)
5197				goto nla_put_failure;
5198			*idxattr = 0;
5199		}
5200	}
5201
5202	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5203			     *idxattr)) {
5204		const struct rtnl_link_ops *ops = NULL;
5205		const struct net_device *master;
5206
5207		master = netdev_master_upper_dev_get(dev);
5208		if (master)
5209			ops = master->rtnl_link_ops;
5210		if (ops && ops->fill_linkxstats) {
5211			*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5212			attr = nla_nest_start_noflag(skb,
5213						     IFLA_STATS_LINK_XSTATS_SLAVE);
5214			if (!attr)
5215				goto nla_put_failure;
5216
5217			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5218			nla_nest_end(skb, attr);
5219			if (err)
5220				goto nla_put_failure;
5221			*idxattr = 0;
5222		}
5223	}
5224
5225	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5226			     *idxattr)) {
5227		*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5228		attr = nla_nest_start_noflag(skb,
5229					     IFLA_STATS_LINK_OFFLOAD_XSTATS);
5230		if (!attr)
5231			goto nla_put_failure;
5232
5233		err = rtnl_get_offload_stats(skb, dev, prividx);
5234		if (err == -ENODATA)
5235			nla_nest_cancel(skb, attr);
5236		else
5237			nla_nest_end(skb, attr);
5238
5239		if (err && err != -ENODATA)
5240			goto nla_put_failure;
5241		*idxattr = 0;
5242	}
5243
5244	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5245		struct rtnl_af_ops *af_ops;
5246
5247		*idxattr = IFLA_STATS_AF_SPEC;
5248		attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5249		if (!attr)
5250			goto nla_put_failure;
5251
5252		rcu_read_lock();
5253		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5254			if (af_ops->fill_stats_af) {
5255				struct nlattr *af;
5256				int err;
5257
5258				af = nla_nest_start_noflag(skb,
5259							   af_ops->family);
5260				if (!af) {
5261					rcu_read_unlock();
5262					goto nla_put_failure;
5263				}
5264				err = af_ops->fill_stats_af(skb, dev);
5265
5266				if (err == -ENODATA) {
5267					nla_nest_cancel(skb, af);
5268				} else if (err < 0) {
5269					rcu_read_unlock();
5270					goto nla_put_failure;
5271				}
5272
5273				nla_nest_end(skb, af);
5274			}
5275		}
5276		rcu_read_unlock();
5277
5278		nla_nest_end(skb, attr);
5279
5280		*idxattr = 0;
5281	}
5282
5283	nlmsg_end(skb, nlh);
5284
5285	return 0;
5286
5287nla_put_failure:
5288	/* not a multi message or no progress mean a real error */
5289	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5290		nlmsg_cancel(skb, nlh);
5291	else
5292		nlmsg_end(skb, nlh);
5293
5294	return -EMSGSIZE;
5295}
5296
5297static size_t if_nlmsg_stats_size(const struct net_device *dev,
5298				  u32 filter_mask)
5299{
5300	size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5301
5302	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5303		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5304
5305	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5306		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5307		int attr = IFLA_STATS_LINK_XSTATS;
5308
5309		if (ops && ops->get_linkxstats_size) {
5310			size += nla_total_size(ops->get_linkxstats_size(dev,
5311									attr));
5312			/* for IFLA_STATS_LINK_XSTATS */
5313			size += nla_total_size(0);
5314		}
5315	}
5316
5317	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5318		struct net_device *_dev = (struct net_device *)dev;
5319		const struct rtnl_link_ops *ops = NULL;
5320		const struct net_device *master;
5321
5322		/* netdev_master_upper_dev_get can't take const */
5323		master = netdev_master_upper_dev_get(_dev);
5324		if (master)
5325			ops = master->rtnl_link_ops;
5326		if (ops && ops->get_linkxstats_size) {
5327			int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5328
5329			size += nla_total_size(ops->get_linkxstats_size(dev,
5330									attr));
5331			/* for IFLA_STATS_LINK_XSTATS_SLAVE */
5332			size += nla_total_size(0);
5333		}
5334	}
5335
5336	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
5337		size += rtnl_get_offload_stats_size(dev);
5338
5339	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5340		struct rtnl_af_ops *af_ops;
5341
5342		/* for IFLA_STATS_AF_SPEC */
5343		size += nla_total_size(0);
5344
5345		rcu_read_lock();
5346		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5347			if (af_ops->get_stats_af_size) {
5348				size += nla_total_size(
5349					af_ops->get_stats_af_size(dev));
5350
5351				/* for AF_* */
5352				size += nla_total_size(0);
5353			}
5354		}
5355		rcu_read_unlock();
5356	}
5357
5358	return size;
5359}
5360
5361static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5362				bool is_dump, struct netlink_ext_ack *extack)
5363{
5364	struct if_stats_msg *ifsm;
5365
5366	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5367		NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5368		return -EINVAL;
5369	}
5370
5371	if (!strict_check)
5372		return 0;
5373
5374	ifsm = nlmsg_data(nlh);
5375
5376	/* only requests using strict checks can pass data to influence
5377	 * the dump. The legacy exception is filter_mask.
5378	 */
5379	if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5380		NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5381		return -EINVAL;
5382	}
5383	if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
5384		NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
5385		return -EINVAL;
5386	}
5387	if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5388		NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5389		return -EINVAL;
5390	}
5391
5392	return 0;
5393}
5394
5395static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5396			  struct netlink_ext_ack *extack)
5397{
5398	struct net *net = sock_net(skb->sk);
5399	struct net_device *dev = NULL;
5400	int idxattr = 0, prividx = 0;
5401	struct if_stats_msg *ifsm;
5402	struct sk_buff *nskb;
5403	u32 filter_mask;
5404	int err;
5405
5406	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5407				   false, extack);
5408	if (err)
5409		return err;
5410
5411	ifsm = nlmsg_data(nlh);
5412	if (ifsm->ifindex > 0)
5413		dev = __dev_get_by_index(net, ifsm->ifindex);
5414	else
5415		return -EINVAL;
5416
5417	if (!dev)
5418		return -ENODEV;
5419
5420	filter_mask = ifsm->filter_mask;
5421	if (!filter_mask)
5422		return -EINVAL;
5423
5424	nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
5425	if (!nskb)
5426		return -ENOBUFS;
5427
5428	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5429				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5430				  0, filter_mask, &idxattr, &prividx);
5431	if (err < 0) {
5432		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5433		WARN_ON(err == -EMSGSIZE);
5434		kfree_skb(nskb);
5435	} else {
5436		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5437	}
5438
5439	return err;
5440}
5441
5442static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5443{
5444	struct netlink_ext_ack *extack = cb->extack;
5445	int h, s_h, err, s_idx, s_idxattr, s_prividx;
5446	struct net *net = sock_net(skb->sk);
5447	unsigned int flags = NLM_F_MULTI;
5448	struct if_stats_msg *ifsm;
5449	struct hlist_head *head;
5450	struct net_device *dev;
5451	u32 filter_mask = 0;
5452	int idx = 0;
5453
5454	s_h = cb->args[0];
5455	s_idx = cb->args[1];
5456	s_idxattr = cb->args[2];
5457	s_prividx = cb->args[3];
5458
5459	cb->seq = net->dev_base_seq;
5460
5461	err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5462	if (err)
5463		return err;
5464
5465	ifsm = nlmsg_data(cb->nlh);
5466	filter_mask = ifsm->filter_mask;
5467	if (!filter_mask) {
5468		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5469		return -EINVAL;
5470	}
5471
5472	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5473		idx = 0;
5474		head = &net->dev_index_head[h];
5475		hlist_for_each_entry(dev, head, index_hlist) {
5476			if (idx < s_idx)
5477				goto cont;
5478			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5479						  NETLINK_CB(cb->skb).portid,
5480						  cb->nlh->nlmsg_seq, 0,
5481						  flags, filter_mask,
5482						  &s_idxattr, &s_prividx);
5483			/* If we ran out of room on the first message,
5484			 * we're in trouble
5485			 */
5486			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5487
5488			if (err < 0)
5489				goto out;
5490			s_prividx = 0;
5491			s_idxattr = 0;
5492			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5493cont:
5494			idx++;
5495		}
5496	}
5497out:
5498	cb->args[3] = s_prividx;
5499	cb->args[2] = s_idxattr;
5500	cb->args[1] = idx;
5501	cb->args[0] = h;
5502
5503	return skb->len;
5504}
5505
5506/* Process one rtnetlink message. */
5507
5508static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
5509			     struct netlink_ext_ack *extack)
5510{
5511	struct net *net = sock_net(skb->sk);
5512	struct rtnl_link *link;
5513	struct module *owner;
5514	int err = -EOPNOTSUPP;
5515	rtnl_doit_func doit;
5516	unsigned int flags;
5517	int kind;
5518	int family;
5519	int type;
5520
5521	type = nlh->nlmsg_type;
5522	if (type > RTM_MAX)
5523		return -EOPNOTSUPP;
5524
5525	type -= RTM_BASE;
5526
5527	/* All the messages must have at least 1 byte length */
5528	if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
5529		return 0;
5530
5531	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
5532	kind = type&3;
5533
5534	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
5535		return -EPERM;
5536
5537	rcu_read_lock();
5538	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
5539		struct sock *rtnl;
5540		rtnl_dumpit_func dumpit;
5541		u32 min_dump_alloc = 0;
5542
5543		link = rtnl_get_link(family, type);
5544		if (!link || !link->dumpit) {
5545			family = PF_UNSPEC;
5546			link = rtnl_get_link(family, type);
5547			if (!link || !link->dumpit)
5548				goto err_unlock;
5549		}
5550		owner = link->owner;
5551		dumpit = link->dumpit;
5552
5553		if (type == RTM_GETLINK - RTM_BASE)
5554			min_dump_alloc = rtnl_calcit(skb, nlh);
5555
5556		err = 0;
5557		/* need to do this before rcu_read_unlock() */
5558		if (!try_module_get(owner))
5559			err = -EPROTONOSUPPORT;
5560
5561		rcu_read_unlock();
5562
5563		rtnl = net->rtnl;
5564		if (err == 0) {
5565			struct netlink_dump_control c = {
5566				.dump		= dumpit,
5567				.min_dump_alloc	= min_dump_alloc,
5568				.module		= owner,
5569			};
5570			err = netlink_dump_start(rtnl, skb, nlh, &c);
5571			/* netlink_dump_start() will keep a reference on
5572			 * module if dump is still in progress.
5573			 */
5574			module_put(owner);
5575		}
5576		return err;
5577	}
5578
5579	link = rtnl_get_link(family, type);
5580	if (!link || !link->doit) {
5581		family = PF_UNSPEC;
5582		link = rtnl_get_link(PF_UNSPEC, type);
5583		if (!link || !link->doit)
5584			goto out_unlock;
5585	}
5586
5587	owner = link->owner;
5588	if (!try_module_get(owner)) {
5589		err = -EPROTONOSUPPORT;
5590		goto out_unlock;
5591	}
5592
5593	flags = link->flags;
5594	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
5595		doit = link->doit;
5596		rcu_read_unlock();
5597		if (doit)
5598			err = doit(skb, nlh, extack);
5599		module_put(owner);
5600		return err;
5601	}
5602	rcu_read_unlock();
5603
5604	rtnl_lock();
5605	link = rtnl_get_link(family, type);
5606	if (link && link->doit)
5607		err = link->doit(skb, nlh, extack);
5608	rtnl_unlock();
5609
5610	module_put(owner);
5611
5612	return err;
5613
5614out_unlock:
5615	rcu_read_unlock();
5616	return err;
5617
5618err_unlock:
5619	rcu_read_unlock();
5620	return -EOPNOTSUPP;
5621}
5622
5623static void rtnetlink_rcv(struct sk_buff *skb)
5624{
5625	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
5626}
5627
5628static int rtnetlink_bind(struct net *net, int group)
5629{
5630	switch (group) {
5631	case RTNLGRP_IPV4_MROUTE_R:
5632	case RTNLGRP_IPV6_MROUTE_R:
5633		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5634			return -EPERM;
5635		break;
5636	}
5637	return 0;
5638}
5639
5640static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
5641{
5642	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5643
5644	switch (event) {
5645	case NETDEV_REBOOT:
5646	case NETDEV_CHANGEMTU:
5647	case NETDEV_CHANGEADDR:
5648	case NETDEV_CHANGENAME:
5649	case NETDEV_FEAT_CHANGE:
5650	case NETDEV_BONDING_FAILOVER:
5651	case NETDEV_POST_TYPE_CHANGE:
5652	case NETDEV_NOTIFY_PEERS:
5653	case NETDEV_CHANGEUPPER:
5654	case NETDEV_RESEND_IGMP:
5655	case NETDEV_CHANGEINFODATA:
5656	case NETDEV_CHANGELOWERSTATE:
5657	case NETDEV_CHANGE_TX_QUEUE_LEN:
5658		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
5659				   GFP_KERNEL, NULL, 0);
5660		break;
5661	default:
5662		break;
5663	}
5664	return NOTIFY_DONE;
5665}
5666
5667static struct notifier_block rtnetlink_dev_notifier = {
5668	.notifier_call	= rtnetlink_event,
5669};
5670
5671
5672static int __net_init rtnetlink_net_init(struct net *net)
5673{
5674	struct sock *sk;
5675	struct netlink_kernel_cfg cfg = {
5676		.groups		= RTNLGRP_MAX,
5677		.input		= rtnetlink_rcv,
5678		.cb_mutex	= &rtnl_mutex,
5679		.flags		= NL_CFG_F_NONROOT_RECV,
5680		.bind		= rtnetlink_bind,
5681	};
5682
5683	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
5684	if (!sk)
5685		return -ENOMEM;
5686	net->rtnl = sk;
5687	return 0;
5688}
5689
5690static void __net_exit rtnetlink_net_exit(struct net *net)
5691{
5692	netlink_kernel_release(net->rtnl);
5693	net->rtnl = NULL;
5694}
5695
5696static struct pernet_operations rtnetlink_net_ops = {
5697	.init = rtnetlink_net_init,
5698	.exit = rtnetlink_net_exit,
5699};
5700
5701void __init rtnetlink_init(void)
5702{
5703	if (register_pernet_subsys(&rtnetlink_net_ops))
5704		panic("rtnetlink_init: cannot initialize rtnetlink\n");
5705
5706	register_netdevice_notifier(&rtnetlink_dev_notifier);
5707
5708	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
5709		      rtnl_dump_ifinfo, 0);
5710	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
5711	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
5712	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
5713
5714	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
5715	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
5716	rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
5717
5718	rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
5719	rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
5720
5721	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
5722	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
5723	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
5724
5725	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
5726	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
5727	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
5728
5729	rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
5730		      0);
5731}
5732