xref: /kernel/linux/linux-6.6/net/bridge/br_fdb.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	Forwarding database
4 *	Linux ethernet bridge
5 *
6 *	Authors:
7 *	Lennert Buytenhek		<buytenh@gnu.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/rculist.h>
13#include <linux/spinlock.h>
14#include <linux/times.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/jhash.h>
18#include <linux/random.h>
19#include <linux/slab.h>
20#include <linux/atomic.h>
21#include <asm/unaligned.h>
22#include <linux/if_vlan.h>
23#include <net/switchdev.h>
24#include <trace/events/bridge.h>
25#include "br_private.h"
26
27static const struct rhashtable_params br_fdb_rht_params = {
28	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
29	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
30	.key_len = sizeof(struct net_bridge_fdb_key),
31	.automatic_shrinking = true,
32};
33
34static struct kmem_cache *br_fdb_cache __read_mostly;
35
36int __init br_fdb_init(void)
37{
38	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
39					 sizeof(struct net_bridge_fdb_entry),
40					 0,
41					 SLAB_HWCACHE_ALIGN, NULL);
42	if (!br_fdb_cache)
43		return -ENOMEM;
44
45	return 0;
46}
47
48void br_fdb_fini(void)
49{
50	kmem_cache_destroy(br_fdb_cache);
51}
52
53int br_fdb_hash_init(struct net_bridge *br)
54{
55	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
56}
57
58void br_fdb_hash_fini(struct net_bridge *br)
59{
60	rhashtable_destroy(&br->fdb_hash_tbl);
61}
62
63/* if topology_changing then use forward_delay (default 15 sec)
64 * otherwise keep longer (default 5 minutes)
65 */
66static inline unsigned long hold_time(const struct net_bridge *br)
67{
68	return br->topology_change ? br->forward_delay : br->ageing_time;
69}
70
71static inline int has_expired(const struct net_bridge *br,
72				  const struct net_bridge_fdb_entry *fdb)
73{
74	return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
75	       !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
76	       time_before_eq(fdb->updated + hold_time(br), jiffies);
77}
78
79static void fdb_rcu_free(struct rcu_head *head)
80{
81	struct net_bridge_fdb_entry *ent
82		= container_of(head, struct net_bridge_fdb_entry, rcu);
83	kmem_cache_free(br_fdb_cache, ent);
84}
85
86static int fdb_to_nud(const struct net_bridge *br,
87		      const struct net_bridge_fdb_entry *fdb)
88{
89	if (test_bit(BR_FDB_LOCAL, &fdb->flags))
90		return NUD_PERMANENT;
91	else if (test_bit(BR_FDB_STATIC, &fdb->flags))
92		return NUD_NOARP;
93	else if (has_expired(br, fdb))
94		return NUD_STALE;
95	else
96		return NUD_REACHABLE;
97}
98
99static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
100			 const struct net_bridge_fdb_entry *fdb,
101			 u32 portid, u32 seq, int type, unsigned int flags)
102{
103	const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
104	unsigned long now = jiffies;
105	struct nda_cacheinfo ci;
106	struct nlmsghdr *nlh;
107	struct ndmsg *ndm;
108	u32 ext_flags = 0;
109
110	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
111	if (nlh == NULL)
112		return -EMSGSIZE;
113
114	ndm = nlmsg_data(nlh);
115	ndm->ndm_family	 = AF_BRIDGE;
116	ndm->ndm_pad1    = 0;
117	ndm->ndm_pad2    = 0;
118	ndm->ndm_flags	 = 0;
119	ndm->ndm_type	 = 0;
120	ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
121	ndm->ndm_state   = fdb_to_nud(br, fdb);
122
123	if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
124		ndm->ndm_flags |= NTF_OFFLOADED;
125	if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
126		ndm->ndm_flags |= NTF_EXT_LEARNED;
127	if (test_bit(BR_FDB_STICKY, &fdb->flags))
128		ndm->ndm_flags |= NTF_STICKY;
129	if (test_bit(BR_FDB_LOCKED, &fdb->flags))
130		ext_flags |= NTF_EXT_LOCKED;
131
132	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
133		goto nla_put_failure;
134	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
135		goto nla_put_failure;
136	if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
137		goto nla_put_failure;
138
139	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
140	ci.ndm_confirmed = 0;
141	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
142	ci.ndm_refcnt	 = 0;
143	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
144		goto nla_put_failure;
145
146	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
147					&fdb->key.vlan_id))
148		goto nla_put_failure;
149
150	if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
151		struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
152		u8 notify_bits = FDB_NOTIFY_BIT;
153
154		if (!nest)
155			goto nla_put_failure;
156		if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
157			notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
158
159		if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
160			nla_nest_cancel(skb, nest);
161			goto nla_put_failure;
162		}
163
164		nla_nest_end(skb, nest);
165	}
166
167	nlmsg_end(skb, nlh);
168	return 0;
169
170nla_put_failure:
171	nlmsg_cancel(skb, nlh);
172	return -EMSGSIZE;
173}
174
175static inline size_t fdb_nlmsg_size(void)
176{
177	return NLMSG_ALIGN(sizeof(struct ndmsg))
178		+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
179		+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
180		+ nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
181		+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
182		+ nla_total_size(sizeof(struct nda_cacheinfo))
183		+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
184		+ nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
185}
186
187static void fdb_notify(struct net_bridge *br,
188		       const struct net_bridge_fdb_entry *fdb, int type,
189		       bool swdev_notify)
190{
191	struct net *net = dev_net(br->dev);
192	struct sk_buff *skb;
193	int err = -ENOBUFS;
194
195	if (swdev_notify)
196		br_switchdev_fdb_notify(br, fdb, type);
197
198	skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
199	if (skb == NULL)
200		goto errout;
201
202	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
203	if (err < 0) {
204		/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
205		WARN_ON(err == -EMSGSIZE);
206		kfree_skb(skb);
207		goto errout;
208	}
209	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
210	return;
211errout:
212	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
213}
214
215static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
216						 const unsigned char *addr,
217						 __u16 vid)
218{
219	struct net_bridge_fdb_key key;
220
221	WARN_ON_ONCE(!rcu_read_lock_held());
222
223	key.vlan_id = vid;
224	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
225
226	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
227}
228
229/* requires bridge hash_lock */
230static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
231						const unsigned char *addr,
232						__u16 vid)
233{
234	struct net_bridge_fdb_entry *fdb;
235
236	lockdep_assert_held_once(&br->hash_lock);
237
238	rcu_read_lock();
239	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
240	rcu_read_unlock();
241
242	return fdb;
243}
244
245struct net_device *br_fdb_find_port(const struct net_device *br_dev,
246				    const unsigned char *addr,
247				    __u16 vid)
248{
249	struct net_bridge_fdb_entry *f;
250	struct net_device *dev = NULL;
251	struct net_bridge *br;
252
253	ASSERT_RTNL();
254
255	if (!netif_is_bridge_master(br_dev))
256		return NULL;
257
258	br = netdev_priv(br_dev);
259	rcu_read_lock();
260	f = br_fdb_find_rcu(br, addr, vid);
261	if (f && f->dst)
262		dev = f->dst->dev;
263	rcu_read_unlock();
264
265	return dev;
266}
267EXPORT_SYMBOL_GPL(br_fdb_find_port);
268
269struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
270					     const unsigned char *addr,
271					     __u16 vid)
272{
273	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
274}
275
276/* When a static FDB entry is added, the mac address from the entry is
277 * added to the bridge private HW address list and all required ports
278 * are then updated with the new information.
279 * Called under RTNL.
280 */
281static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
282{
283	int err;
284	struct net_bridge_port *p;
285
286	ASSERT_RTNL();
287
288	list_for_each_entry(p, &br->port_list, list) {
289		if (!br_promisc_port(p)) {
290			err = dev_uc_add(p->dev, addr);
291			if (err)
292				goto undo;
293		}
294	}
295
296	return;
297undo:
298	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
299		if (!br_promisc_port(p))
300			dev_uc_del(p->dev, addr);
301	}
302}
303
304/* When a static FDB entry is deleted, the HW address from that entry is
305 * also removed from the bridge private HW address list and updates all
306 * the ports with needed information.
307 * Called under RTNL.
308 */
309static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
310{
311	struct net_bridge_port *p;
312
313	ASSERT_RTNL();
314
315	list_for_each_entry(p, &br->port_list, list) {
316		if (!br_promisc_port(p))
317			dev_uc_del(p->dev, addr);
318	}
319}
320
321static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
322		       bool swdev_notify)
323{
324	trace_fdb_delete(br, f);
325
326	if (test_bit(BR_FDB_STATIC, &f->flags))
327		fdb_del_hw_addr(br, f->key.addr.addr);
328
329	hlist_del_init_rcu(&f->fdb_node);
330	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
331			       br_fdb_rht_params);
332	fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
333	call_rcu(&f->rcu, fdb_rcu_free);
334}
335
336/* Delete a local entry if no other port had the same address. */
337static void fdb_delete_local(struct net_bridge *br,
338			     const struct net_bridge_port *p,
339			     struct net_bridge_fdb_entry *f)
340{
341	const unsigned char *addr = f->key.addr.addr;
342	struct net_bridge_vlan_group *vg;
343	const struct net_bridge_vlan *v;
344	struct net_bridge_port *op;
345	u16 vid = f->key.vlan_id;
346
347	/* Maybe another port has same hw addr? */
348	list_for_each_entry(op, &br->port_list, list) {
349		vg = nbp_vlan_group(op);
350		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
351		    (!vid || br_vlan_find(vg, vid))) {
352			f->dst = op;
353			clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
354			return;
355		}
356	}
357
358	vg = br_vlan_group(br);
359	v = br_vlan_find(vg, vid);
360	/* Maybe bridge device has same hw addr? */
361	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
362	    (!vid || (v && br_vlan_should_use(v)))) {
363		f->dst = NULL;
364		clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
365		return;
366	}
367
368	fdb_delete(br, f, true);
369}
370
371void br_fdb_find_delete_local(struct net_bridge *br,
372			      const struct net_bridge_port *p,
373			      const unsigned char *addr, u16 vid)
374{
375	struct net_bridge_fdb_entry *f;
376
377	spin_lock_bh(&br->hash_lock);
378	f = br_fdb_find(br, addr, vid);
379	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
380	    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
381		fdb_delete_local(br, p, f);
382	spin_unlock_bh(&br->hash_lock);
383}
384
385static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
386					       struct net_bridge_port *source,
387					       const unsigned char *addr,
388					       __u16 vid,
389					       unsigned long flags)
390{
391	struct net_bridge_fdb_entry *fdb;
392	int err;
393
394	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
395	if (!fdb)
396		return NULL;
397
398	memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
399	WRITE_ONCE(fdb->dst, source);
400	fdb->key.vlan_id = vid;
401	fdb->flags = flags;
402	fdb->updated = fdb->used = jiffies;
403	err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
404					    br_fdb_rht_params);
405	if (err) {
406		kmem_cache_free(br_fdb_cache, fdb);
407		return NULL;
408	}
409
410	hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
411
412	return fdb;
413}
414
415static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
416			 const unsigned char *addr, u16 vid)
417{
418	struct net_bridge_fdb_entry *fdb;
419
420	if (!is_valid_ether_addr(addr))
421		return -EINVAL;
422
423	fdb = br_fdb_find(br, addr, vid);
424	if (fdb) {
425		/* it is okay to have multiple ports with same
426		 * address, just use the first one.
427		 */
428		if (test_bit(BR_FDB_LOCAL, &fdb->flags))
429			return 0;
430		br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
431			source ? source->dev->name : br->dev->name, addr, vid);
432		fdb_delete(br, fdb, true);
433	}
434
435	fdb = fdb_create(br, source, addr, vid,
436			 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
437	if (!fdb)
438		return -ENOMEM;
439
440	fdb_add_hw_addr(br, addr);
441	fdb_notify(br, fdb, RTM_NEWNEIGH, true);
442	return 0;
443}
444
445void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
446{
447	struct net_bridge_vlan_group *vg;
448	struct net_bridge_fdb_entry *f;
449	struct net_bridge *br = p->br;
450	struct net_bridge_vlan *v;
451
452	spin_lock_bh(&br->hash_lock);
453	vg = nbp_vlan_group(p);
454	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
455		if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
456		    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
457			/* delete old one */
458			fdb_delete_local(br, p, f);
459
460			/* if this port has no vlan information
461			 * configured, we can safely be done at
462			 * this point.
463			 */
464			if (!vg || !vg->num_vlans)
465				goto insert;
466		}
467	}
468
469insert:
470	/* insert new address,  may fail if invalid address or dup. */
471	fdb_add_local(br, p, newaddr, 0);
472
473	if (!vg || !vg->num_vlans)
474		goto done;
475
476	/* Now add entries for every VLAN configured on the port.
477	 * This function runs under RTNL so the bitmap will not change
478	 * from under us.
479	 */
480	list_for_each_entry(v, &vg->vlan_list, vlist)
481		fdb_add_local(br, p, newaddr, v->vid);
482
483done:
484	spin_unlock_bh(&br->hash_lock);
485}
486
487void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
488{
489	struct net_bridge_vlan_group *vg;
490	struct net_bridge_fdb_entry *f;
491	struct net_bridge_vlan *v;
492
493	spin_lock_bh(&br->hash_lock);
494
495	/* If old entry was unassociated with any port, then delete it. */
496	f = br_fdb_find(br, br->dev->dev_addr, 0);
497	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
498	    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
499		fdb_delete_local(br, NULL, f);
500
501	fdb_add_local(br, NULL, newaddr, 0);
502	vg = br_vlan_group(br);
503	if (!vg || !vg->num_vlans)
504		goto out;
505	/* Now remove and add entries for every VLAN configured on the
506	 * bridge.  This function runs under RTNL so the bitmap will not
507	 * change from under us.
508	 */
509	list_for_each_entry(v, &vg->vlan_list, vlist) {
510		if (!br_vlan_should_use(v))
511			continue;
512		f = br_fdb_find(br, br->dev->dev_addr, v->vid);
513		if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
514		    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
515			fdb_delete_local(br, NULL, f);
516		fdb_add_local(br, NULL, newaddr, v->vid);
517	}
518out:
519	spin_unlock_bh(&br->hash_lock);
520}
521
522void br_fdb_cleanup(struct work_struct *work)
523{
524	struct net_bridge *br = container_of(work, struct net_bridge,
525					     gc_work.work);
526	struct net_bridge_fdb_entry *f = NULL;
527	unsigned long delay = hold_time(br);
528	unsigned long work_delay = delay;
529	unsigned long now = jiffies;
530
531	/* this part is tricky, in order to avoid blocking learning and
532	 * consequently forwarding, we rely on rcu to delete objects with
533	 * delayed freeing allowing us to continue traversing
534	 */
535	rcu_read_lock();
536	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
537		unsigned long this_timer = f->updated + delay;
538
539		if (test_bit(BR_FDB_STATIC, &f->flags) ||
540		    test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
541			if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
542				if (time_after(this_timer, now))
543					work_delay = min(work_delay,
544							 this_timer - now);
545				else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
546							   &f->flags))
547					fdb_notify(br, f, RTM_NEWNEIGH, false);
548			}
549			continue;
550		}
551
552		if (time_after(this_timer, now)) {
553			work_delay = min(work_delay, this_timer - now);
554		} else {
555			spin_lock_bh(&br->hash_lock);
556			if (!hlist_unhashed(&f->fdb_node))
557				fdb_delete(br, f, true);
558			spin_unlock_bh(&br->hash_lock);
559		}
560	}
561	rcu_read_unlock();
562
563	/* Cleanup minimum 10 milliseconds apart */
564	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
565	mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
566}
567
568static bool __fdb_flush_matches(const struct net_bridge *br,
569				const struct net_bridge_fdb_entry *f,
570				const struct net_bridge_fdb_flush_desc *desc)
571{
572	const struct net_bridge_port *dst = READ_ONCE(f->dst);
573	int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
574
575	if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
576		return false;
577	if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
578		return false;
579	if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
580		return false;
581
582	return true;
583}
584
585/* Flush forwarding database entries matching the description */
586void br_fdb_flush(struct net_bridge *br,
587		  const struct net_bridge_fdb_flush_desc *desc)
588{
589	struct net_bridge_fdb_entry *f;
590
591	rcu_read_lock();
592	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
593		if (!__fdb_flush_matches(br, f, desc))
594			continue;
595
596		spin_lock_bh(&br->hash_lock);
597		if (!hlist_unhashed(&f->fdb_node))
598			fdb_delete(br, f, true);
599		spin_unlock_bh(&br->hash_lock);
600	}
601	rcu_read_unlock();
602}
603
604static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
605{
606	unsigned long flags = 0;
607
608	if (ndm_state & NUD_PERMANENT)
609		__set_bit(BR_FDB_LOCAL, &flags);
610	if (ndm_state & NUD_NOARP)
611		__set_bit(BR_FDB_STATIC, &flags);
612
613	return flags;
614}
615
616static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
617{
618	unsigned long flags = 0;
619
620	if (ndm_flags & NTF_USE)
621		__set_bit(BR_FDB_ADDED_BY_USER, &flags);
622	if (ndm_flags & NTF_EXT_LEARNED)
623		__set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
624	if (ndm_flags & NTF_OFFLOADED)
625		__set_bit(BR_FDB_OFFLOADED, &flags);
626	if (ndm_flags & NTF_STICKY)
627		__set_bit(BR_FDB_STICKY, &flags);
628
629	return flags;
630}
631
632static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
633					int ifindex,
634					struct netlink_ext_ack *extack)
635{
636	const struct net_device *dev;
637
638	dev = __dev_get_by_index(dev_net(br->dev), ifindex);
639	if (!dev) {
640		NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
641		return -ENODEV;
642	}
643	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
644		NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
645		return -EINVAL;
646	}
647	if (netif_is_bridge_master(dev) && dev != br->dev) {
648		NL_SET_ERR_MSG_MOD(extack,
649				   "Flush bridge device does not match target bridge device");
650		return -EINVAL;
651	}
652	if (netif_is_bridge_port(dev)) {
653		struct net_bridge_port *p = br_port_get_rtnl(dev);
654
655		if (p->br != br) {
656			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
657			return -EINVAL;
658		}
659	}
660
661	return 0;
662}
663
664int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[],
665		       struct net_device *dev, u16 vid,
666		       struct netlink_ext_ack *extack)
667{
668	u8 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
669	struct net_bridge_fdb_flush_desc desc = { .vlan_id = vid };
670	struct net_bridge_port *p = NULL;
671	struct net_bridge *br;
672
673	if (netif_is_bridge_master(dev)) {
674		br = netdev_priv(dev);
675	} else {
676		p = br_port_get_rtnl(dev);
677		if (!p) {
678			NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
679			return -EINVAL;
680		}
681		br = p->br;
682	}
683
684	if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
685		NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
686		return -EINVAL;
687	}
688	if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
689		NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
690		return -EINVAL;
691	}
692
693	desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
694	desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
695	if (tb[NDA_NDM_STATE_MASK]) {
696		u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
697
698		desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
699	}
700	if (tb[NDA_NDM_FLAGS_MASK]) {
701		u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
702
703		desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
704	}
705	if (tb[NDA_IFINDEX]) {
706		int err, ifidx = nla_get_s32(tb[NDA_IFINDEX]);
707
708		err = __fdb_flush_validate_ifindex(br, ifidx, extack);
709		if (err)
710			return err;
711		desc.port_ifindex = ifidx;
712	} else if (p) {
713		/* flush was invoked with port device and NTF_MASTER */
714		desc.port_ifindex = p->dev->ifindex;
715	}
716
717	br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
718		 desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
719
720	br_fdb_flush(br, &desc);
721
722	return 0;
723}
724
725/* Flush all entries referring to a specific port.
726 * if do_all is set also flush static entries
727 * if vid is set delete all entries that match the vlan_id
728 */
729void br_fdb_delete_by_port(struct net_bridge *br,
730			   const struct net_bridge_port *p,
731			   u16 vid,
732			   int do_all)
733{
734	struct net_bridge_fdb_entry *f;
735	struct hlist_node *tmp;
736
737	spin_lock_bh(&br->hash_lock);
738	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
739		if (f->dst != p)
740			continue;
741
742		if (!do_all)
743			if (test_bit(BR_FDB_STATIC, &f->flags) ||
744			    (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
745			     !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
746			    (vid && f->key.vlan_id != vid))
747				continue;
748
749		if (test_bit(BR_FDB_LOCAL, &f->flags))
750			fdb_delete_local(br, p, f);
751		else
752			fdb_delete(br, f, true);
753	}
754	spin_unlock_bh(&br->hash_lock);
755}
756
757#if IS_ENABLED(CONFIG_ATM_LANE)
758/* Interface used by ATM LANE hook to test
759 * if an addr is on some other bridge port */
760int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
761{
762	struct net_bridge_fdb_entry *fdb;
763	struct net_bridge_port *port;
764	int ret;
765
766	rcu_read_lock();
767	port = br_port_get_rcu(dev);
768	if (!port)
769		ret = 0;
770	else {
771		const struct net_bridge_port *dst = NULL;
772
773		fdb = br_fdb_find_rcu(port->br, addr, 0);
774		if (fdb)
775			dst = READ_ONCE(fdb->dst);
776
777		ret = dst && dst->dev != dev &&
778		      dst->state == BR_STATE_FORWARDING;
779	}
780	rcu_read_unlock();
781
782	return ret;
783}
784#endif /* CONFIG_ATM_LANE */
785
786/*
787 * Fill buffer with forwarding table records in
788 * the API format.
789 */
790int br_fdb_fillbuf(struct net_bridge *br, void *buf,
791		   unsigned long maxnum, unsigned long skip)
792{
793	struct net_bridge_fdb_entry *f;
794	struct __fdb_entry *fe = buf;
795	int num = 0;
796
797	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
798
799	rcu_read_lock();
800	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
801		if (num >= maxnum)
802			break;
803
804		if (has_expired(br, f))
805			continue;
806
807		/* ignore pseudo entry for local MAC address */
808		if (!f->dst)
809			continue;
810
811		if (skip) {
812			--skip;
813			continue;
814		}
815
816		/* convert from internal format to API */
817		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
818
819		/* due to ABI compat need to split into hi/lo */
820		fe->port_no = f->dst->port_no;
821		fe->port_hi = f->dst->port_no >> 8;
822
823		fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
824		if (!test_bit(BR_FDB_STATIC, &f->flags))
825			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
826		++fe;
827		++num;
828	}
829	rcu_read_unlock();
830
831	return num;
832}
833
834/* Add entry for local address of interface */
835int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
836		     const unsigned char *addr, u16 vid)
837{
838	int ret;
839
840	spin_lock_bh(&br->hash_lock);
841	ret = fdb_add_local(br, source, addr, vid);
842	spin_unlock_bh(&br->hash_lock);
843	return ret;
844}
845
846/* returns true if the fdb was modified */
847static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
848{
849	return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
850		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
851}
852
853void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
854		   const unsigned char *addr, u16 vid, unsigned long flags)
855{
856	struct net_bridge_fdb_entry *fdb;
857
858	/* some users want to always flood. */
859	if (hold_time(br) == 0)
860		return;
861
862	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
863	if (likely(fdb)) {
864		/* attempt to update an entry for a local interface */
865		if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
866			if (net_ratelimit())
867				br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
868					source->dev->name, addr, vid);
869		} else {
870			unsigned long now = jiffies;
871			bool fdb_modified = false;
872
873			if (now != fdb->updated) {
874				fdb->updated = now;
875				fdb_modified = __fdb_mark_active(fdb);
876			}
877
878			/* fastpath: update of existing entry */
879			if (unlikely(source != READ_ONCE(fdb->dst) &&
880				     !test_bit(BR_FDB_STICKY, &fdb->flags))) {
881				br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
882				WRITE_ONCE(fdb->dst, source);
883				fdb_modified = true;
884				/* Take over HW learned entry */
885				if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
886						      &fdb->flags)))
887					clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
888						  &fdb->flags);
889				/* Clear locked flag when roaming to an
890				 * unlocked port.
891				 */
892				if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
893					clear_bit(BR_FDB_LOCKED, &fdb->flags);
894			}
895
896			if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
897				set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
898			if (unlikely(fdb_modified)) {
899				trace_br_fdb_update(br, source, addr, vid, flags);
900				fdb_notify(br, fdb, RTM_NEWNEIGH, true);
901			}
902		}
903	} else {
904		spin_lock(&br->hash_lock);
905		fdb = fdb_create(br, source, addr, vid, flags);
906		if (fdb) {
907			trace_br_fdb_update(br, source, addr, vid, flags);
908			fdb_notify(br, fdb, RTM_NEWNEIGH, true);
909		}
910		/* else  we lose race and someone else inserts
911		 * it first, don't bother updating
912		 */
913		spin_unlock(&br->hash_lock);
914	}
915}
916
917/* Dump information about entries, in response to GETNEIGH */
918int br_fdb_dump(struct sk_buff *skb,
919		struct netlink_callback *cb,
920		struct net_device *dev,
921		struct net_device *filter_dev,
922		int *idx)
923{
924	struct net_bridge *br = netdev_priv(dev);
925	struct net_bridge_fdb_entry *f;
926	int err = 0;
927
928	if (!netif_is_bridge_master(dev))
929		return err;
930
931	if (!filter_dev) {
932		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
933		if (err < 0)
934			return err;
935	}
936
937	rcu_read_lock();
938	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
939		if (*idx < cb->args[2])
940			goto skip;
941		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
942			if (filter_dev != dev)
943				goto skip;
944			/* !f->dst is a special case for bridge
945			 * It means the MAC belongs to the bridge
946			 * Therefore need a little more filtering
947			 * we only want to dump the !f->dst case
948			 */
949			if (f->dst)
950				goto skip;
951		}
952		if (!filter_dev && f->dst)
953			goto skip;
954
955		err = fdb_fill_info(skb, br, f,
956				    NETLINK_CB(cb->skb).portid,
957				    cb->nlh->nlmsg_seq,
958				    RTM_NEWNEIGH,
959				    NLM_F_MULTI);
960		if (err < 0)
961			break;
962skip:
963		*idx += 1;
964	}
965	rcu_read_unlock();
966
967	return err;
968}
969
970int br_fdb_get(struct sk_buff *skb,
971	       struct nlattr *tb[],
972	       struct net_device *dev,
973	       const unsigned char *addr,
974	       u16 vid, u32 portid, u32 seq,
975	       struct netlink_ext_ack *extack)
976{
977	struct net_bridge *br = netdev_priv(dev);
978	struct net_bridge_fdb_entry *f;
979	int err = 0;
980
981	rcu_read_lock();
982	f = br_fdb_find_rcu(br, addr, vid);
983	if (!f) {
984		NL_SET_ERR_MSG(extack, "Fdb entry not found");
985		err = -ENOENT;
986		goto errout;
987	}
988
989	err = fdb_fill_info(skb, br, f, portid, seq,
990			    RTM_NEWNEIGH, 0);
991errout:
992	rcu_read_unlock();
993	return err;
994}
995
996/* returns true if the fdb is modified */
997static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
998{
999	bool modified = false;
1000
1001	/* allow to mark an entry as inactive, usually done on creation */
1002	if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
1003	    !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
1004		modified = true;
1005
1006	if ((notify & FDB_NOTIFY_BIT) &&
1007	    !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1008		/* enabled activity tracking */
1009		modified = true;
1010	} else if (!(notify & FDB_NOTIFY_BIT) &&
1011		   test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1012		/* disabled activity tracking, clear notify state */
1013		clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
1014		modified = true;
1015	}
1016
1017	return modified;
1018}
1019
1020/* Update (create or replace) forwarding database entry */
1021static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
1022			 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
1023			 struct nlattr *nfea_tb[])
1024{
1025	bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
1026	bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
1027	struct net_bridge_fdb_entry *fdb;
1028	u16 state = ndm->ndm_state;
1029	bool modified = false;
1030	u8 notify = 0;
1031
1032	/* If the port cannot learn allow only local and static entries */
1033	if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
1034	    !(source->state == BR_STATE_LEARNING ||
1035	      source->state == BR_STATE_FORWARDING))
1036		return -EPERM;
1037
1038	if (!source && !(state & NUD_PERMANENT)) {
1039		pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
1040			br->dev->name);
1041		return -EINVAL;
1042	}
1043
1044	if (is_sticky && (state & NUD_PERMANENT))
1045		return -EINVAL;
1046
1047	if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
1048		notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
1049		if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
1050		    (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
1051			return -EINVAL;
1052	}
1053
1054	fdb = br_fdb_find(br, addr, vid);
1055	if (fdb == NULL) {
1056		if (!(flags & NLM_F_CREATE))
1057			return -ENOENT;
1058
1059		fdb = fdb_create(br, source, addr, vid, 0);
1060		if (!fdb)
1061			return -ENOMEM;
1062
1063		modified = true;
1064	} else {
1065		if (flags & NLM_F_EXCL)
1066			return -EEXIST;
1067
1068		if (READ_ONCE(fdb->dst) != source) {
1069			WRITE_ONCE(fdb->dst, source);
1070			modified = true;
1071		}
1072	}
1073
1074	if (fdb_to_nud(br, fdb) != state) {
1075		if (state & NUD_PERMANENT) {
1076			set_bit(BR_FDB_LOCAL, &fdb->flags);
1077			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1078				fdb_add_hw_addr(br, addr);
1079		} else if (state & NUD_NOARP) {
1080			clear_bit(BR_FDB_LOCAL, &fdb->flags);
1081			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1082				fdb_add_hw_addr(br, addr);
1083		} else {
1084			clear_bit(BR_FDB_LOCAL, &fdb->flags);
1085			if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
1086				fdb_del_hw_addr(br, addr);
1087		}
1088
1089		modified = true;
1090	}
1091
1092	if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
1093		change_bit(BR_FDB_STICKY, &fdb->flags);
1094		modified = true;
1095	}
1096
1097	if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
1098		modified = true;
1099
1100	if (fdb_handle_notify(fdb, notify))
1101		modified = true;
1102
1103	set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1104
1105	fdb->used = jiffies;
1106	if (modified) {
1107		if (refresh)
1108			fdb->updated = jiffies;
1109		fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1110	}
1111
1112	return 0;
1113}
1114
1115static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
1116			struct net_bridge_port *p, const unsigned char *addr,
1117			u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
1118			struct netlink_ext_ack *extack)
1119{
1120	int err = 0;
1121
1122	if (ndm->ndm_flags & NTF_USE) {
1123		if (!p) {
1124			pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
1125				br->dev->name);
1126			return -EINVAL;
1127		}
1128		if (!nbp_state_should_learn(p))
1129			return 0;
1130
1131		local_bh_disable();
1132		rcu_read_lock();
1133		br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
1134		rcu_read_unlock();
1135		local_bh_enable();
1136	} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
1137		if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
1138			NL_SET_ERR_MSG_MOD(extack,
1139					   "FDB entry towards bridge must be permanent");
1140			return -EINVAL;
1141		}
1142		err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
1143	} else {
1144		spin_lock_bh(&br->hash_lock);
1145		err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
1146		spin_unlock_bh(&br->hash_lock);
1147	}
1148
1149	return err;
1150}
1151
1152static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
1153	[NFEA_ACTIVITY_NOTIFY]	= { .type = NLA_U8 },
1154	[NFEA_DONT_REFRESH]	= { .type = NLA_FLAG },
1155};
1156
1157/* Add new permanent fdb entry with RTM_NEWNEIGH */
1158int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1159	       struct net_device *dev,
1160	       const unsigned char *addr, u16 vid, u16 nlh_flags,
1161	       struct netlink_ext_ack *extack)
1162{
1163	struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
1164	struct net_bridge_vlan_group *vg;
1165	struct net_bridge_port *p = NULL;
1166	struct net_bridge_vlan *v;
1167	struct net_bridge *br = NULL;
1168	u32 ext_flags = 0;
1169	int err = 0;
1170
1171	trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
1172
1173	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
1174		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
1175		return -EINVAL;
1176	}
1177
1178	if (is_zero_ether_addr(addr)) {
1179		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
1180		return -EINVAL;
1181	}
1182
1183	if (netif_is_bridge_master(dev)) {
1184		br = netdev_priv(dev);
1185		vg = br_vlan_group(br);
1186	} else {
1187		p = br_port_get_rtnl(dev);
1188		if (!p) {
1189			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
1190				dev->name);
1191			return -EINVAL;
1192		}
1193		br = p->br;
1194		vg = nbp_vlan_group(p);
1195	}
1196
1197	if (tb[NDA_FLAGS_EXT])
1198		ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
1199
1200	if (ext_flags & NTF_EXT_LOCKED) {
1201		NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
1202		return -EINVAL;
1203	}
1204
1205	if (tb[NDA_FDB_EXT_ATTRS]) {
1206		attr = tb[NDA_FDB_EXT_ATTRS];
1207		err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
1208				       br_nda_fdb_pol, extack);
1209		if (err)
1210			return err;
1211	} else {
1212		memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
1213	}
1214
1215	if (vid) {
1216		v = br_vlan_find(vg, vid);
1217		if (!v || !br_vlan_should_use(v)) {
1218			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1219			return -EINVAL;
1220		}
1221
1222		/* VID was specified, so use it. */
1223		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
1224				   extack);
1225	} else {
1226		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
1227				   extack);
1228		if (err || !vg || !vg->num_vlans)
1229			goto out;
1230
1231		/* We have vlans configured on this port and user didn't
1232		 * specify a VLAN.  To be nice, add/update entry for every
1233		 * vlan on this port.
1234		 */
1235		list_for_each_entry(v, &vg->vlan_list, vlist) {
1236			if (!br_vlan_should_use(v))
1237				continue;
1238			err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
1239					   nfea_tb, extack);
1240			if (err)
1241				goto out;
1242		}
1243	}
1244
1245out:
1246	return err;
1247}
1248
1249static int fdb_delete_by_addr_and_port(struct net_bridge *br,
1250				       const struct net_bridge_port *p,
1251				       const u8 *addr, u16 vlan)
1252{
1253	struct net_bridge_fdb_entry *fdb;
1254
1255	fdb = br_fdb_find(br, addr, vlan);
1256	if (!fdb || READ_ONCE(fdb->dst) != p)
1257		return -ENOENT;
1258
1259	fdb_delete(br, fdb, true);
1260
1261	return 0;
1262}
1263
1264static int __br_fdb_delete(struct net_bridge *br,
1265			   const struct net_bridge_port *p,
1266			   const unsigned char *addr, u16 vid)
1267{
1268	int err;
1269
1270	spin_lock_bh(&br->hash_lock);
1271	err = fdb_delete_by_addr_and_port(br, p, addr, vid);
1272	spin_unlock_bh(&br->hash_lock);
1273
1274	return err;
1275}
1276
1277/* Remove neighbor entry with RTM_DELNEIGH */
1278int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1279		  struct net_device *dev,
1280		  const unsigned char *addr, u16 vid,
1281		  struct netlink_ext_ack *extack)
1282{
1283	struct net_bridge_vlan_group *vg;
1284	struct net_bridge_port *p = NULL;
1285	struct net_bridge_vlan *v;
1286	struct net_bridge *br;
1287	int err;
1288
1289	if (netif_is_bridge_master(dev)) {
1290		br = netdev_priv(dev);
1291		vg = br_vlan_group(br);
1292	} else {
1293		p = br_port_get_rtnl(dev);
1294		if (!p) {
1295			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
1296				dev->name);
1297			return -EINVAL;
1298		}
1299		vg = nbp_vlan_group(p);
1300		br = p->br;
1301	}
1302
1303	if (vid) {
1304		v = br_vlan_find(vg, vid);
1305		if (!v) {
1306			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1307			return -EINVAL;
1308		}
1309
1310		err = __br_fdb_delete(br, p, addr, vid);
1311	} else {
1312		err = -ENOENT;
1313		err &= __br_fdb_delete(br, p, addr, 0);
1314		if (!vg || !vg->num_vlans)
1315			return err;
1316
1317		list_for_each_entry(v, &vg->vlan_list, vlist) {
1318			if (!br_vlan_should_use(v))
1319				continue;
1320			err &= __br_fdb_delete(br, p, addr, v->vid);
1321		}
1322	}
1323
1324	return err;
1325}
1326
1327int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
1328{
1329	struct net_bridge_fdb_entry *f, *tmp;
1330	int err = 0;
1331
1332	ASSERT_RTNL();
1333
1334	/* the key here is that static entries change only under rtnl */
1335	rcu_read_lock();
1336	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1337		/* We only care for static entries */
1338		if (!test_bit(BR_FDB_STATIC, &f->flags))
1339			continue;
1340		err = dev_uc_add(p->dev, f->key.addr.addr);
1341		if (err)
1342			goto rollback;
1343	}
1344done:
1345	rcu_read_unlock();
1346
1347	return err;
1348
1349rollback:
1350	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
1351		/* We only care for static entries */
1352		if (!test_bit(BR_FDB_STATIC, &tmp->flags))
1353			continue;
1354		if (tmp == f)
1355			break;
1356		dev_uc_del(p->dev, tmp->key.addr.addr);
1357	}
1358
1359	goto done;
1360}
1361
1362void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1363{
1364	struct net_bridge_fdb_entry *f;
1365
1366	ASSERT_RTNL();
1367
1368	rcu_read_lock();
1369	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1370		/* We only care for static entries */
1371		if (!test_bit(BR_FDB_STATIC, &f->flags))
1372			continue;
1373
1374		dev_uc_del(p->dev, f->key.addr.addr);
1375	}
1376	rcu_read_unlock();
1377}
1378
1379int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1380			      const unsigned char *addr, u16 vid, bool locked,
1381			      bool swdev_notify)
1382{
1383	struct net_bridge_fdb_entry *fdb;
1384	bool modified = false;
1385	int err = 0;
1386
1387	trace_br_fdb_external_learn_add(br, p, addr, vid);
1388
1389	if (locked && (!p || !(p->flags & BR_PORT_MAB)))
1390		return -EINVAL;
1391
1392	spin_lock_bh(&br->hash_lock);
1393
1394	fdb = br_fdb_find(br, addr, vid);
1395	if (!fdb) {
1396		unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
1397
1398		if (swdev_notify)
1399			flags |= BIT(BR_FDB_ADDED_BY_USER);
1400
1401		if (!p)
1402			flags |= BIT(BR_FDB_LOCAL);
1403
1404		if (locked)
1405			flags |= BIT(BR_FDB_LOCKED);
1406
1407		fdb = fdb_create(br, p, addr, vid, flags);
1408		if (!fdb) {
1409			err = -ENOMEM;
1410			goto err_unlock;
1411		}
1412		fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1413	} else {
1414		if (locked &&
1415		    (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
1416		     READ_ONCE(fdb->dst) != p)) {
1417			err = -EINVAL;
1418			goto err_unlock;
1419		}
1420
1421		fdb->updated = jiffies;
1422
1423		if (READ_ONCE(fdb->dst) != p) {
1424			WRITE_ONCE(fdb->dst, p);
1425			modified = true;
1426		}
1427
1428		if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
1429			/* Refresh entry */
1430			fdb->used = jiffies;
1431		} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
1432			/* Take over SW learned entry */
1433			set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
1434			modified = true;
1435		}
1436
1437		if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
1438			change_bit(BR_FDB_LOCKED, &fdb->flags);
1439			modified = true;
1440		}
1441
1442		if (swdev_notify)
1443			set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1444
1445		if (!p)
1446			set_bit(BR_FDB_LOCAL, &fdb->flags);
1447
1448		if (modified)
1449			fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1450	}
1451
1452err_unlock:
1453	spin_unlock_bh(&br->hash_lock);
1454
1455	return err;
1456}
1457
1458int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1459			      const unsigned char *addr, u16 vid,
1460			      bool swdev_notify)
1461{
1462	struct net_bridge_fdb_entry *fdb;
1463	int err = 0;
1464
1465	spin_lock_bh(&br->hash_lock);
1466
1467	fdb = br_fdb_find(br, addr, vid);
1468	if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
1469		fdb_delete(br, fdb, swdev_notify);
1470	else
1471		err = -ENOENT;
1472
1473	spin_unlock_bh(&br->hash_lock);
1474
1475	return err;
1476}
1477
1478void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
1479			  const unsigned char *addr, u16 vid, bool offloaded)
1480{
1481	struct net_bridge_fdb_entry *fdb;
1482
1483	spin_lock_bh(&br->hash_lock);
1484
1485	fdb = br_fdb_find(br, addr, vid);
1486	if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
1487		change_bit(BR_FDB_OFFLOADED, &fdb->flags);
1488
1489	spin_unlock_bh(&br->hash_lock);
1490}
1491
1492void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
1493{
1494	struct net_bridge_fdb_entry *f;
1495	struct net_bridge_port *p;
1496
1497	ASSERT_RTNL();
1498
1499	p = br_port_get_rtnl(dev);
1500	if (!p)
1501		return;
1502
1503	spin_lock_bh(&p->br->hash_lock);
1504	hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
1505		if (f->dst == p && f->key.vlan_id == vid)
1506			clear_bit(BR_FDB_OFFLOADED, &f->flags);
1507	}
1508	spin_unlock_bh(&p->br->hash_lock);
1509}
1510EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
1511