1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/netdevice.h>
7#include <linux/etherdevice.h>
8#include <linux/slab.h>
9#include <linux/device.h>
10#include <linux/skbuff.h>
11#include <linux/if_vlan.h>
12#include <linux/if_bridge.h>
13#include <linux/workqueue.h>
14#include <linux/jiffies.h>
15#include <linux/rtnetlink.h>
16#include <linux/netlink.h>
17#include <net/switchdev.h>
18#include <net/vxlan.h>
19
20#include "spectrum_span.h"
21#include "spectrum_switchdev.h"
22#include "spectrum.h"
23#include "core.h"
24#include "reg.h"
25
26struct mlxsw_sp_bridge_ops;
27
28struct mlxsw_sp_bridge {
29	struct mlxsw_sp *mlxsw_sp;
30	struct {
31		struct delayed_work dw;
32#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33		unsigned int interval; /* ms */
34	} fdb_notify;
35#define MLXSW_SP_MIN_AGEING_TIME 10
36#define MLXSW_SP_MAX_AGEING_TIME 1000000
37#define MLXSW_SP_DEFAULT_AGEING_TIME 300
38	u32 ageing_time;
39	bool vlan_enabled_exists;
40	struct list_head bridges_list;
41	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44};
45
46struct mlxsw_sp_bridge_device {
47	struct net_device *dev;
48	struct list_head list;
49	struct list_head ports_list;
50	struct list_head mids_list;
51	u8 vlan_enabled:1,
52	   multicast_enabled:1,
53	   mrouter:1;
54	const struct mlxsw_sp_bridge_ops *ops;
55};
56
57struct mlxsw_sp_bridge_port {
58	struct net_device *dev;
59	struct mlxsw_sp_bridge_device *bridge_device;
60	struct list_head list;
61	struct list_head vlans_list;
62	unsigned int ref_count;
63	u8 stp_state;
64	unsigned long flags;
65	bool mrouter;
66	bool lagged;
67	union {
68		u16 lag_id;
69		u16 system_port;
70	};
71};
72
73struct mlxsw_sp_bridge_vlan {
74	struct list_head list;
75	struct list_head port_vlan_list;
76	u16 vid;
77};
78
79struct mlxsw_sp_bridge_ops {
80	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81			 struct mlxsw_sp_bridge_port *bridge_port,
82			 struct mlxsw_sp_port *mlxsw_sp_port,
83			 struct netlink_ext_ack *extack);
84	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85			   struct mlxsw_sp_bridge_port *bridge_port,
86			   struct mlxsw_sp_port *mlxsw_sp_port);
87	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88			  const struct net_device *vxlan_dev, u16 vid,
89			  struct netlink_ext_ack *extack);
90	struct mlxsw_sp_fid *
91		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
92			   u16 vid, struct netlink_ext_ack *extack);
93	struct mlxsw_sp_fid *
94		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
95			      u16 vid);
96	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97		       const struct mlxsw_sp_fid *fid);
98};
99
100static int
101mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102			       struct mlxsw_sp_bridge_port *bridge_port,
103			       u16 fid_index);
104
105static void
106mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107			       struct mlxsw_sp_bridge_port *bridge_port);
108
109static void
110mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111				   struct mlxsw_sp_bridge_device
112				   *bridge_device);
113
114static void
115mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116				 struct mlxsw_sp_bridge_port *bridge_port,
117				 bool add);
118
119static struct mlxsw_sp_bridge_device *
120mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121			    const struct net_device *br_dev)
122{
123	struct mlxsw_sp_bridge_device *bridge_device;
124
125	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126		if (bridge_device->dev == br_dev)
127			return bridge_device;
128
129	return NULL;
130}
131
132bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133					 const struct net_device *br_dev)
134{
135	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
136}
137
138static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
139						    struct netdev_nested_priv *priv)
140{
141	struct mlxsw_sp *mlxsw_sp = priv->data;
142
143	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
144	return 0;
145}
146
147static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148						struct net_device *dev)
149{
150	struct netdev_nested_priv priv = {
151		.data = (void *)mlxsw_sp,
152	};
153
154	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
155	netdev_walk_all_upper_dev_rcu(dev,
156				      mlxsw_sp_bridge_device_upper_rif_destroy,
157				      &priv);
158}
159
160static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
161					     struct net_device *br_dev,
162					     struct netlink_ext_ack *extack)
163{
164	struct net_device *dev, *stop_dev;
165	struct list_head *iter;
166	int err;
167
168	netdev_for_each_lower_dev(br_dev, dev, iter) {
169		if (netif_is_vxlan(dev) && netif_running(dev)) {
170			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
171							 br_dev, dev, 0,
172							 extack);
173			if (err) {
174				stop_dev = dev;
175				goto err_vxlan_join;
176			}
177		}
178	}
179
180	return 0;
181
182err_vxlan_join:
183	netdev_for_each_lower_dev(br_dev, dev, iter) {
184		if (netif_is_vxlan(dev) && netif_running(dev)) {
185			if (stop_dev == dev)
186				break;
187			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
188		}
189	}
190	return err;
191}
192
193static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
194					      struct net_device *br_dev)
195{
196	struct net_device *dev;
197	struct list_head *iter;
198
199	netdev_for_each_lower_dev(br_dev, dev, iter) {
200		if (netif_is_vxlan(dev) && netif_running(dev))
201			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
202	}
203}
204
205static struct mlxsw_sp_bridge_device *
206mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
207			      struct net_device *br_dev,
208			      struct netlink_ext_ack *extack)
209{
210	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
211	struct mlxsw_sp_bridge_device *bridge_device;
212	bool vlan_enabled = br_vlan_enabled(br_dev);
213	int err;
214
215	if (vlan_enabled && bridge->vlan_enabled_exists) {
216		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
217		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
218		return ERR_PTR(-EINVAL);
219	}
220
221	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
222	if (!bridge_device)
223		return ERR_PTR(-ENOMEM);
224
225	bridge_device->dev = br_dev;
226	bridge_device->vlan_enabled = vlan_enabled;
227	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
228	bridge_device->mrouter = br_multicast_router(br_dev);
229	INIT_LIST_HEAD(&bridge_device->ports_list);
230	if (vlan_enabled) {
231		bridge->vlan_enabled_exists = true;
232		bridge_device->ops = bridge->bridge_8021q_ops;
233	} else {
234		bridge_device->ops = bridge->bridge_8021d_ops;
235	}
236	INIT_LIST_HEAD(&bridge_device->mids_list);
237	list_add(&bridge_device->list, &bridge->bridges_list);
238
239	/* It is possible we already have VXLAN devices enslaved to the bridge.
240	 * In which case, we need to replay their configuration as if they were
241	 * just now enslaved to the bridge.
242	 */
243	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
244	if (err)
245		goto err_vxlan_init;
246
247	return bridge_device;
248
249err_vxlan_init:
250	list_del(&bridge_device->list);
251	if (bridge_device->vlan_enabled)
252		bridge->vlan_enabled_exists = false;
253	kfree(bridge_device);
254	return ERR_PTR(err);
255}
256
257static void
258mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
259			       struct mlxsw_sp_bridge_device *bridge_device)
260{
261	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
262	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
263					    bridge_device->dev);
264	list_del(&bridge_device->list);
265	if (bridge_device->vlan_enabled)
266		bridge->vlan_enabled_exists = false;
267	WARN_ON(!list_empty(&bridge_device->ports_list));
268	WARN_ON(!list_empty(&bridge_device->mids_list));
269	kfree(bridge_device);
270}
271
272static struct mlxsw_sp_bridge_device *
273mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
274			   struct net_device *br_dev,
275			   struct netlink_ext_ack *extack)
276{
277	struct mlxsw_sp_bridge_device *bridge_device;
278
279	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
280	if (bridge_device)
281		return bridge_device;
282
283	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
284}
285
286static void
287mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
288			   struct mlxsw_sp_bridge_device *bridge_device)
289{
290	if (list_empty(&bridge_device->ports_list))
291		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
292}
293
294static struct mlxsw_sp_bridge_port *
295__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
296			    const struct net_device *brport_dev)
297{
298	struct mlxsw_sp_bridge_port *bridge_port;
299
300	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
301		if (bridge_port->dev == brport_dev)
302			return bridge_port;
303	}
304
305	return NULL;
306}
307
308struct mlxsw_sp_bridge_port *
309mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
310			  struct net_device *brport_dev)
311{
312	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
313	struct mlxsw_sp_bridge_device *bridge_device;
314
315	if (!br_dev)
316		return NULL;
317
318	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
319	if (!bridge_device)
320		return NULL;
321
322	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
323}
324
325static struct mlxsw_sp_bridge_port *
326mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
327			    struct net_device *brport_dev)
328{
329	struct mlxsw_sp_bridge_port *bridge_port;
330	struct mlxsw_sp_port *mlxsw_sp_port;
331
332	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
333	if (!bridge_port)
334		return NULL;
335
336	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
337	bridge_port->lagged = mlxsw_sp_port->lagged;
338	if (bridge_port->lagged)
339		bridge_port->lag_id = mlxsw_sp_port->lag_id;
340	else
341		bridge_port->system_port = mlxsw_sp_port->local_port;
342	bridge_port->dev = brport_dev;
343	bridge_port->bridge_device = bridge_device;
344	bridge_port->stp_state = BR_STATE_DISABLED;
345	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
346			     BR_MCAST_FLOOD;
347	INIT_LIST_HEAD(&bridge_port->vlans_list);
348	list_add(&bridge_port->list, &bridge_device->ports_list);
349	bridge_port->ref_count = 1;
350
351	return bridge_port;
352}
353
354static void
355mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
356{
357	list_del(&bridge_port->list);
358	WARN_ON(!list_empty(&bridge_port->vlans_list));
359	kfree(bridge_port);
360}
361
362static struct mlxsw_sp_bridge_port *
363mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
364			 struct net_device *brport_dev,
365			 struct netlink_ext_ack *extack)
366{
367	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
368	struct mlxsw_sp_bridge_device *bridge_device;
369	struct mlxsw_sp_bridge_port *bridge_port;
370	int err;
371
372	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
373	if (bridge_port) {
374		bridge_port->ref_count++;
375		return bridge_port;
376	}
377
378	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
379	if (IS_ERR(bridge_device))
380		return ERR_CAST(bridge_device);
381
382	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
383	if (!bridge_port) {
384		err = -ENOMEM;
385		goto err_bridge_port_create;
386	}
387
388	return bridge_port;
389
390err_bridge_port_create:
391	mlxsw_sp_bridge_device_put(bridge, bridge_device);
392	return ERR_PTR(err);
393}
394
395static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
396				     struct mlxsw_sp_bridge_port *bridge_port)
397{
398	struct mlxsw_sp_bridge_device *bridge_device;
399
400	if (--bridge_port->ref_count != 0)
401		return;
402	bridge_device = bridge_port->bridge_device;
403	mlxsw_sp_bridge_port_destroy(bridge_port);
404	mlxsw_sp_bridge_device_put(bridge, bridge_device);
405}
406
407static struct mlxsw_sp_port_vlan *
408mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
409				  const struct mlxsw_sp_bridge_device *
410				  bridge_device,
411				  u16 vid)
412{
413	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
414
415	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
416			    list) {
417		if (!mlxsw_sp_port_vlan->bridge_port)
418			continue;
419		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
420		    bridge_device)
421			continue;
422		if (bridge_device->vlan_enabled &&
423		    mlxsw_sp_port_vlan->vid != vid)
424			continue;
425		return mlxsw_sp_port_vlan;
426	}
427
428	return NULL;
429}
430
431static struct mlxsw_sp_port_vlan*
432mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
433			       u16 fid_index)
434{
435	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
436
437	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
438			    list) {
439		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
440
441		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
442			return mlxsw_sp_port_vlan;
443	}
444
445	return NULL;
446}
447
448static struct mlxsw_sp_bridge_vlan *
449mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
450			  u16 vid)
451{
452	struct mlxsw_sp_bridge_vlan *bridge_vlan;
453
454	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
455		if (bridge_vlan->vid == vid)
456			return bridge_vlan;
457	}
458
459	return NULL;
460}
461
462static struct mlxsw_sp_bridge_vlan *
463mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
464{
465	struct mlxsw_sp_bridge_vlan *bridge_vlan;
466
467	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
468	if (!bridge_vlan)
469		return NULL;
470
471	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
472	bridge_vlan->vid = vid;
473	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
474
475	return bridge_vlan;
476}
477
478static void
479mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
480{
481	list_del(&bridge_vlan->list);
482	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
483	kfree(bridge_vlan);
484}
485
486static struct mlxsw_sp_bridge_vlan *
487mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
488{
489	struct mlxsw_sp_bridge_vlan *bridge_vlan;
490
491	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
492	if (bridge_vlan)
493		return bridge_vlan;
494
495	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
496}
497
498static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
499{
500	if (list_empty(&bridge_vlan->port_vlan_list))
501		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
502}
503
504static int
505mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
506				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
507				  u8 state)
508{
509	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
510
511	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
512			    bridge_vlan_node) {
513		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
514			continue;
515		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
516						 bridge_vlan->vid, state);
517	}
518
519	return 0;
520}
521
522static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
523					    struct switchdev_trans *trans,
524					    struct net_device *orig_dev,
525					    u8 state)
526{
527	struct mlxsw_sp_bridge_port *bridge_port;
528	struct mlxsw_sp_bridge_vlan *bridge_vlan;
529	int err;
530
531	if (switchdev_trans_ph_prepare(trans))
532		return 0;
533
534	/* It's possible we failed to enslave the port, yet this
535	 * operation is executed due to it being deferred.
536	 */
537	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
538						orig_dev);
539	if (!bridge_port)
540		return 0;
541
542	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
543		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
544							bridge_vlan, state);
545		if (err)
546			goto err_port_bridge_vlan_stp_set;
547	}
548
549	bridge_port->stp_state = state;
550
551	return 0;
552
553err_port_bridge_vlan_stp_set:
554	list_for_each_entry_continue_reverse(bridge_vlan,
555					     &bridge_port->vlans_list, list)
556		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
557						  bridge_port->stp_state);
558	return err;
559}
560
561static int
562mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
563				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
564				    enum mlxsw_sp_flood_type packet_type,
565				    bool member)
566{
567	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
568
569	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
570			    bridge_vlan_node) {
571		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
572			continue;
573		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
574					      packet_type,
575					      mlxsw_sp_port->local_port,
576					      member);
577	}
578
579	return 0;
580}
581
582static int
583mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
584				     struct mlxsw_sp_bridge_port *bridge_port,
585				     enum mlxsw_sp_flood_type packet_type,
586				     bool member)
587{
588	struct mlxsw_sp_bridge_vlan *bridge_vlan;
589	int err;
590
591	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
592		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
593							  bridge_vlan,
594							  packet_type,
595							  member);
596		if (err)
597			goto err_port_bridge_vlan_flood_set;
598	}
599
600	return 0;
601
602err_port_bridge_vlan_flood_set:
603	list_for_each_entry_continue_reverse(bridge_vlan,
604					     &bridge_port->vlans_list, list)
605		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
606						    packet_type, !member);
607	return err;
608}
609
610static int
611mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
612				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
613				       bool set)
614{
615	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
616	u16 vid = bridge_vlan->vid;
617
618	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
619			    bridge_vlan_node) {
620		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
621			continue;
622		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
623	}
624
625	return 0;
626}
627
628static int
629mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630				  struct mlxsw_sp_bridge_port *bridge_port,
631				  bool set)
632{
633	struct mlxsw_sp_bridge_vlan *bridge_vlan;
634	int err;
635
636	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
637		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
638							     bridge_vlan, set);
639		if (err)
640			goto err_port_bridge_vlan_learning_set;
641	}
642
643	return 0;
644
645err_port_bridge_vlan_learning_set:
646	list_for_each_entry_continue_reverse(bridge_vlan,
647					     &bridge_port->vlans_list, list)
648		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
649						       bridge_vlan, !set);
650	return err;
651}
652
653static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
654					       *mlxsw_sp_port,
655					       struct switchdev_trans *trans,
656					       unsigned long brport_flags)
657{
658	if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
659		return -EINVAL;
660
661	return 0;
662}
663
664static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
665					   struct switchdev_trans *trans,
666					   struct net_device *orig_dev,
667					   unsigned long brport_flags)
668{
669	struct mlxsw_sp_bridge_port *bridge_port;
670	int err;
671
672	if (switchdev_trans_ph_prepare(trans))
673		return 0;
674
675	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
676						orig_dev);
677	if (!bridge_port)
678		return 0;
679
680	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
681						   MLXSW_SP_FLOOD_TYPE_UC,
682						   brport_flags & BR_FLOOD);
683	if (err)
684		return err;
685
686	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
687						brport_flags & BR_LEARNING);
688	if (err)
689		return err;
690
691	if (bridge_port->bridge_device->multicast_enabled)
692		goto out;
693
694	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
695						   MLXSW_SP_FLOOD_TYPE_MC,
696						   brport_flags &
697						   BR_MCAST_FLOOD);
698	if (err)
699		return err;
700
701out:
702	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
703	return 0;
704}
705
706static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
707{
708	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
709	int err;
710
711	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
712	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
713	if (err)
714		return err;
715	mlxsw_sp->bridge->ageing_time = ageing_time;
716	return 0;
717}
718
719static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
720					    struct switchdev_trans *trans,
721					    unsigned long ageing_clock_t)
722{
723	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
725	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
726
727	if (switchdev_trans_ph_prepare(trans)) {
728		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
729		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
730			return -ERANGE;
731		else
732			return 0;
733	}
734
735	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
736}
737
738static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
739					  struct switchdev_trans *trans,
740					  struct net_device *orig_dev,
741					  bool vlan_enabled)
742{
743	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744	struct mlxsw_sp_bridge_device *bridge_device;
745
746	if (!switchdev_trans_ph_prepare(trans))
747		return 0;
748
749	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
750	if (WARN_ON(!bridge_device))
751		return -EINVAL;
752
753	if (bridge_device->vlan_enabled == vlan_enabled)
754		return 0;
755
756	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
757	return -EINVAL;
758}
759
760static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
761					  struct switchdev_trans *trans,
762					  struct net_device *orig_dev,
763					  bool is_port_mrouter)
764{
765	struct mlxsw_sp_bridge_port *bridge_port;
766	int err;
767
768	if (switchdev_trans_ph_prepare(trans))
769		return 0;
770
771	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
772						orig_dev);
773	if (!bridge_port)
774		return 0;
775
776	if (!bridge_port->bridge_device->multicast_enabled)
777		goto out;
778
779	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
780						   MLXSW_SP_FLOOD_TYPE_MC,
781						   is_port_mrouter);
782	if (err)
783		return err;
784
785	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
786					 is_port_mrouter);
787out:
788	bridge_port->mrouter = is_port_mrouter;
789	return 0;
790}
791
792static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
793{
794	const struct mlxsw_sp_bridge_device *bridge_device;
795
796	bridge_device = bridge_port->bridge_device;
797	return bridge_device->multicast_enabled ? bridge_port->mrouter :
798					bridge_port->flags & BR_MCAST_FLOOD;
799}
800
801static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
802					 struct switchdev_trans *trans,
803					 struct net_device *orig_dev,
804					 bool mc_disabled)
805{
806	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
807	struct mlxsw_sp_bridge_device *bridge_device;
808	struct mlxsw_sp_bridge_port *bridge_port;
809	int err;
810
811	if (switchdev_trans_ph_prepare(trans))
812		return 0;
813
814	/* It's possible we failed to enslave the port, yet this
815	 * operation is executed due to it being deferred.
816	 */
817	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
818	if (!bridge_device)
819		return 0;
820
821	if (bridge_device->multicast_enabled != !mc_disabled) {
822		bridge_device->multicast_enabled = !mc_disabled;
823		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
824						   bridge_device);
825	}
826
827	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
828		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
829		bool member = mlxsw_sp_mc_flood(bridge_port);
830
831		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
832							   bridge_port,
833							   packet_type, member);
834		if (err)
835			return err;
836	}
837
838	bridge_device->multicast_enabled = !mc_disabled;
839
840	return 0;
841}
842
843static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
844					 u16 mid_idx, bool add)
845{
846	char *smid_pl;
847	int err;
848
849	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
850	if (!smid_pl)
851		return -ENOMEM;
852
853	mlxsw_reg_smid_pack(smid_pl, mid_idx,
854			    mlxsw_sp_router_port(mlxsw_sp), add);
855	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
856	kfree(smid_pl);
857	return err;
858}
859
860static void
861mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
862				   struct mlxsw_sp_bridge_device *bridge_device,
863				   bool add)
864{
865	struct mlxsw_sp_mid *mid;
866
867	list_for_each_entry(mid, &bridge_device->mids_list, list)
868		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
869}
870
871static int
872mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
873				  struct switchdev_trans *trans,
874				  struct net_device *orig_dev,
875				  bool is_mrouter)
876{
877	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
878	struct mlxsw_sp_bridge_device *bridge_device;
879
880	if (switchdev_trans_ph_prepare(trans))
881		return 0;
882
883	/* It's possible we failed to enslave the port, yet this
884	 * operation is executed due to it being deferred.
885	 */
886	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
887	if (!bridge_device)
888		return 0;
889
890	if (bridge_device->mrouter != is_mrouter)
891		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
892						   is_mrouter);
893	bridge_device->mrouter = is_mrouter;
894	return 0;
895}
896
897static int mlxsw_sp_port_attr_set(struct net_device *dev,
898				  const struct switchdev_attr *attr,
899				  struct switchdev_trans *trans)
900{
901	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
902	int err;
903
904	switch (attr->id) {
905	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
906		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
907						       attr->orig_dev,
908						       attr->u.stp_state);
909		break;
910	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
911		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
912							  trans,
913							  attr->u.brport_flags);
914		break;
915	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
916		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
917						      attr->orig_dev,
918						      attr->u.brport_flags);
919		break;
920	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
921		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
922						       attr->u.ageing_time);
923		break;
924	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
925		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
926						     attr->orig_dev,
927						     attr->u.vlan_filtering);
928		break;
929	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
930		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
931						     attr->orig_dev,
932						     attr->u.mrouter);
933		break;
934	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
935		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
936						    attr->orig_dev,
937						    attr->u.mc_disabled);
938		break;
939	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
940		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
941							attr->orig_dev,
942							attr->u.mrouter);
943		break;
944	default:
945		err = -EOPNOTSUPP;
946		break;
947	}
948
949	if (switchdev_trans_ph_commit(trans))
950		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
951
952	return err;
953}
954
955static int
956mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
957			    struct mlxsw_sp_bridge_port *bridge_port,
958			    struct netlink_ext_ack *extack)
959{
960	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
961	struct mlxsw_sp_bridge_device *bridge_device;
962	u8 local_port = mlxsw_sp_port->local_port;
963	u16 vid = mlxsw_sp_port_vlan->vid;
964	struct mlxsw_sp_fid *fid;
965	int err;
966
967	bridge_device = bridge_port->bridge_device;
968	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
969	if (IS_ERR(fid))
970		return PTR_ERR(fid);
971
972	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
973				     bridge_port->flags & BR_FLOOD);
974	if (err)
975		goto err_fid_uc_flood_set;
976
977	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
978				     mlxsw_sp_mc_flood(bridge_port));
979	if (err)
980		goto err_fid_mc_flood_set;
981
982	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
983				     true);
984	if (err)
985		goto err_fid_bc_flood_set;
986
987	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
988	if (err)
989		goto err_fid_port_vid_map;
990
991	mlxsw_sp_port_vlan->fid = fid;
992
993	return 0;
994
995err_fid_port_vid_map:
996	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
997err_fid_bc_flood_set:
998	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
999err_fid_mc_flood_set:
1000	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1001err_fid_uc_flood_set:
1002	mlxsw_sp_fid_put(fid);
1003	return err;
1004}
1005
1006static void
1007mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1008{
1009	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1010	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1011	u8 local_port = mlxsw_sp_port->local_port;
1012	u16 vid = mlxsw_sp_port_vlan->vid;
1013
1014	mlxsw_sp_port_vlan->fid = NULL;
1015	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1016	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1017	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1018	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1019	mlxsw_sp_fid_put(fid);
1020}
1021
1022static u16
1023mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1024			     u16 vid, bool is_pvid)
1025{
1026	if (is_pvid)
1027		return vid;
1028	else if (mlxsw_sp_port->pvid == vid)
1029		return 0;	/* Dis-allow untagged packets */
1030	else
1031		return mlxsw_sp_port->pvid;
1032}
1033
1034static int
1035mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1036			       struct mlxsw_sp_bridge_port *bridge_port,
1037			       struct netlink_ext_ack *extack)
1038{
1039	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1040	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1041	u16 vid = mlxsw_sp_port_vlan->vid;
1042	int err;
1043
1044	/* No need to continue if only VLAN flags were changed */
1045	if (mlxsw_sp_port_vlan->bridge_port)
1046		return 0;
1047
1048	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1049					  extack);
1050	if (err)
1051		return err;
1052
1053	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1054					     bridge_port->flags & BR_LEARNING);
1055	if (err)
1056		goto err_port_vid_learning_set;
1057
1058	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1059					bridge_port->stp_state);
1060	if (err)
1061		goto err_port_vid_stp_set;
1062
1063	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1064	if (!bridge_vlan) {
1065		err = -ENOMEM;
1066		goto err_bridge_vlan_get;
1067	}
1068
1069	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1070		 &bridge_vlan->port_vlan_list);
1071
1072	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1073				 bridge_port->dev, extack);
1074	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1075
1076	return 0;
1077
1078err_bridge_vlan_get:
1079	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1080err_port_vid_stp_set:
1081	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1082err_port_vid_learning_set:
1083	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1084	return err;
1085}
1086
1087void
1088mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1089{
1090	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1091	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1092	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1093	struct mlxsw_sp_bridge_port *bridge_port;
1094	u16 vid = mlxsw_sp_port_vlan->vid;
1095	bool last_port, last_vlan;
1096
1097	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1098		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1099		return;
1100
1101	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1102	last_vlan = list_is_singular(&bridge_port->vlans_list);
1103	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1104	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1105
1106	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1107	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1108	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1109	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1110	if (last_port)
1111		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1112					       bridge_port,
1113					       mlxsw_sp_fid_index(fid));
1114	if (last_vlan)
1115		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1116
1117	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1118
1119	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1120	mlxsw_sp_port_vlan->bridge_port = NULL;
1121}
1122
1123static int
1124mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1125			      struct mlxsw_sp_bridge_port *bridge_port,
1126			      u16 vid, bool is_untagged, bool is_pvid,
1127			      struct netlink_ext_ack *extack)
1128{
1129	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1130	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1131	u16 old_pvid = mlxsw_sp_port->pvid;
1132	int err;
1133
1134	/* The only valid scenario in which a port-vlan already exists, is if
1135	 * the VLAN flags were changed and the port-vlan is associated with the
1136	 * correct bridge port
1137	 */
1138	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1139	if (mlxsw_sp_port_vlan &&
1140	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1141		return -EEXIST;
1142
1143	if (!mlxsw_sp_port_vlan) {
1144		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1145							       vid);
1146		if (IS_ERR(mlxsw_sp_port_vlan))
1147			return PTR_ERR(mlxsw_sp_port_vlan);
1148	}
1149
1150	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1151				     is_untagged);
1152	if (err)
1153		goto err_port_vlan_set;
1154
1155	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1156	if (err)
1157		goto err_port_pvid_set;
1158
1159	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1160					     extack);
1161	if (err)
1162		goto err_port_vlan_bridge_join;
1163
1164	return 0;
1165
1166err_port_vlan_bridge_join:
1167	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1168err_port_pvid_set:
1169	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1170err_port_vlan_set:
1171	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1172	return err;
1173}
1174
1175static int
1176mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1177				const struct net_device *br_dev,
1178				const struct switchdev_obj_port_vlan *vlan)
1179{
1180	u16 pvid;
1181	u16 vid;
1182
1183	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1184	if (!pvid)
1185		return 0;
1186
1187	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1188		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1189			if (vid != pvid) {
1190				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1191				return -EBUSY;
1192			}
1193		} else {
1194			if (vid == pvid) {
1195				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1196				return -EBUSY;
1197			}
1198		}
1199	}
1200
1201	return 0;
1202}
1203
1204static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1205				   const struct switchdev_obj_port_vlan *vlan,
1206				   struct switchdev_trans *trans,
1207				   struct netlink_ext_ack *extack)
1208{
1209	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1210	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1211	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1212	struct net_device *orig_dev = vlan->obj.orig_dev;
1213	struct mlxsw_sp_bridge_port *bridge_port;
1214	u16 vid;
1215
1216	if (netif_is_bridge_master(orig_dev)) {
1217		int err = 0;
1218
1219		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1220		    br_vlan_enabled(orig_dev) &&
1221		    switchdev_trans_ph_prepare(trans))
1222			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1223							      orig_dev, vlan);
1224		if (!err)
1225			err = -EOPNOTSUPP;
1226		return err;
1227	}
1228
1229	if (switchdev_trans_ph_commit(trans))
1230		return 0;
1231
1232	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1233	if (WARN_ON(!bridge_port))
1234		return -EINVAL;
1235
1236	if (!bridge_port->bridge_device->vlan_enabled)
1237		return 0;
1238
1239	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1240		int err;
1241
1242		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1243						    vid, flag_untagged,
1244						    flag_pvid, extack);
1245		if (err)
1246			return err;
1247	}
1248
1249	return 0;
1250}
1251
1252static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1253{
1254	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1255			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1256}
1257
1258static int
1259mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1260			       struct mlxsw_sp_bridge_port *bridge_port,
1261			       u16 fid_index)
1262{
1263	bool lagged = bridge_port->lagged;
1264	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1265	u16 system_port;
1266
1267	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1268	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1269	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1270	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1271
1272	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1273}
1274
1275static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1276{
1277	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1278			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1279}
1280
1281static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1282{
1283	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1284			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1285}
1286
1287static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1288					  const char *mac, u16 fid,
1289					  enum mlxsw_sp_l3proto proto,
1290					  const union mlxsw_sp_l3addr *addr,
1291					  bool adding, bool dynamic)
1292{
1293	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1294	char *sfd_pl;
1295	u8 num_rec;
1296	u32 uip;
1297	int err;
1298
1299	switch (proto) {
1300	case MLXSW_SP_L3_PROTO_IPV4:
1301		uip = be32_to_cpu(addr->addr4);
1302		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1303		break;
1304	case MLXSW_SP_L3_PROTO_IPV6:
1305	default:
1306		WARN_ON(1);
1307		return -EOPNOTSUPP;
1308	}
1309
1310	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1311	if (!sfd_pl)
1312		return -ENOMEM;
1313
1314	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1315	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1316				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1317				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1318				     sfd_proto);
1319	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1320	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1321	if (err)
1322		goto out;
1323
1324	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1325		err = -EBUSY;
1326
1327out:
1328	kfree(sfd_pl);
1329	return err;
1330}
1331
1332static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1333				     const char *mac, u16 fid, bool adding,
1334				     enum mlxsw_reg_sfd_rec_action action,
1335				     enum mlxsw_reg_sfd_rec_policy policy)
1336{
1337	char *sfd_pl;
1338	u8 num_rec;
1339	int err;
1340
1341	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1342	if (!sfd_pl)
1343		return -ENOMEM;
1344
1345	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1346	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1347	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1348	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1349	if (err)
1350		goto out;
1351
1352	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1353		err = -EBUSY;
1354
1355out:
1356	kfree(sfd_pl);
1357	return err;
1358}
1359
1360static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1361				   const char *mac, u16 fid, bool adding,
1362				   bool dynamic)
1363{
1364	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1365					 MLXSW_REG_SFD_REC_ACTION_NOP,
1366					 mlxsw_sp_sfd_rec_policy(dynamic));
1367}
1368
1369int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1370			bool adding)
1371{
1372	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1373					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1374					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1375}
1376
1377static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1378				       const char *mac, u16 fid, u16 lag_vid,
1379				       bool adding, bool dynamic)
1380{
1381	char *sfd_pl;
1382	u8 num_rec;
1383	int err;
1384
1385	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1386	if (!sfd_pl)
1387		return -ENOMEM;
1388
1389	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1390	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1391				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1392				  lag_vid, lag_id);
1393	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1394	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1395	if (err)
1396		goto out;
1397
1398	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1399		err = -EBUSY;
1400
1401out:
1402	kfree(sfd_pl);
1403	return err;
1404}
1405
1406static int
1407mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1409{
1410	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1411	struct net_device *orig_dev = fdb_info->info.dev;
1412	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1413	struct mlxsw_sp_bridge_device *bridge_device;
1414	struct mlxsw_sp_bridge_port *bridge_port;
1415	u16 fid_index, vid;
1416
1417	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1418	if (!bridge_port)
1419		return -EINVAL;
1420
1421	bridge_device = bridge_port->bridge_device;
1422	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1423							       bridge_device,
1424							       fdb_info->vid);
1425	if (!mlxsw_sp_port_vlan)
1426		return 0;
1427
1428	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1429	vid = mlxsw_sp_port_vlan->vid;
1430
1431	if (!bridge_port->lagged)
1432		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1433					       bridge_port->system_port,
1434					       fdb_info->addr, fid_index,
1435					       adding, false);
1436	else
1437		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1438						   bridge_port->lag_id,
1439						   fdb_info->addr, fid_index,
1440						   vid, adding, false);
1441}
1442
1443static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1444				u16 fid, u16 mid_idx, bool adding)
1445{
1446	char *sfd_pl;
1447	u8 num_rec;
1448	int err;
1449
1450	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1451	if (!sfd_pl)
1452		return -ENOMEM;
1453
1454	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1455	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1456			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1457	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1458	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1459	if (err)
1460		goto out;
1461
1462	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1463		err = -EBUSY;
1464
1465out:
1466	kfree(sfd_pl);
1467	return err;
1468}
1469
1470static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1471					 long *ports_bitmap,
1472					 bool set_router_port)
1473{
1474	char *smid_pl;
1475	int err, i;
1476
1477	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1478	if (!smid_pl)
1479		return -ENOMEM;
1480
1481	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1482	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1483		if (mlxsw_sp->ports[i])
1484			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1485	}
1486
1487	mlxsw_reg_smid_port_mask_set(smid_pl,
1488				     mlxsw_sp_router_port(mlxsw_sp), 1);
1489
1490	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1491		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1492
1493	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1494				set_router_port);
1495
1496	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1497	kfree(smid_pl);
1498	return err;
1499}
1500
1501static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1502				  u16 mid_idx, bool add)
1503{
1504	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1505	char *smid_pl;
1506	int err;
1507
1508	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1509	if (!smid_pl)
1510		return -ENOMEM;
1511
1512	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1513	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1514	kfree(smid_pl);
1515	return err;
1516}
1517
1518static struct
1519mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1520				const unsigned char *addr,
1521				u16 fid)
1522{
1523	struct mlxsw_sp_mid *mid;
1524
1525	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1526		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1527			return mid;
1528	}
1529	return NULL;
1530}
1531
1532static void
1533mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1534				      struct mlxsw_sp_bridge_port *bridge_port,
1535				      unsigned long *ports_bitmap)
1536{
1537	struct mlxsw_sp_port *mlxsw_sp_port;
1538	u64 max_lag_members, i;
1539	int lag_id;
1540
1541	if (!bridge_port->lagged) {
1542		set_bit(bridge_port->system_port, ports_bitmap);
1543	} else {
1544		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1545						     MAX_LAG_MEMBERS);
1546		lag_id = bridge_port->lag_id;
1547		for (i = 0; i < max_lag_members; i++) {
1548			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1549								 lag_id, i);
1550			if (mlxsw_sp_port)
1551				set_bit(mlxsw_sp_port->local_port,
1552					ports_bitmap);
1553		}
1554	}
1555}
1556
1557static void
1558mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1559				struct mlxsw_sp_bridge_device *bridge_device,
1560				struct mlxsw_sp *mlxsw_sp)
1561{
1562	struct mlxsw_sp_bridge_port *bridge_port;
1563
1564	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1565		if (bridge_port->mrouter) {
1566			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1567							      bridge_port,
1568							      flood_bitmap);
1569		}
1570	}
1571}
1572
1573static bool
1574mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1575			    struct mlxsw_sp_mid *mid,
1576			    struct mlxsw_sp_bridge_device *bridge_device)
1577{
1578	long *flood_bitmap;
1579	int num_of_ports;
1580	int alloc_size;
1581	u16 mid_idx;
1582	int err;
1583
1584	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1585				      MLXSW_SP_MID_MAX);
1586	if (mid_idx == MLXSW_SP_MID_MAX)
1587		return false;
1588
1589	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1590	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1591	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1592	if (!flood_bitmap)
1593		return false;
1594
1595	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1596	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1597
1598	mid->mid = mid_idx;
1599	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1600					    bridge_device->mrouter);
1601	kfree(flood_bitmap);
1602	if (err)
1603		return false;
1604
1605	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1606				   true);
1607	if (err)
1608		return false;
1609
1610	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1611	mid->in_hw = true;
1612	return true;
1613}
1614
1615static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1616					struct mlxsw_sp_mid *mid)
1617{
1618	if (!mid->in_hw)
1619		return 0;
1620
1621	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1622	mid->in_hw = false;
1623	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1624				    false);
1625}
1626
1627static struct
1628mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1629				  struct mlxsw_sp_bridge_device *bridge_device,
1630				  const unsigned char *addr,
1631				  u16 fid)
1632{
1633	struct mlxsw_sp_mid *mid;
1634
1635	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1636	if (!mid)
1637		return NULL;
1638
1639	mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
1640					  GFP_KERNEL);
1641	if (!mid->ports_in_mid)
1642		goto err_ports_in_mid_alloc;
1643
1644	ether_addr_copy(mid->addr, addr);
1645	mid->fid = fid;
1646	mid->in_hw = false;
1647
1648	if (!bridge_device->multicast_enabled)
1649		goto out;
1650
1651	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1652		goto err_write_mdb_entry;
1653
1654out:
1655	list_add_tail(&mid->list, &bridge_device->mids_list);
1656	return mid;
1657
1658err_write_mdb_entry:
1659	bitmap_free(mid->ports_in_mid);
1660err_ports_in_mid_alloc:
1661	kfree(mid);
1662	return NULL;
1663}
1664
1665static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1666					 struct mlxsw_sp_mid *mid)
1667{
1668	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1669	int err = 0;
1670
1671	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1672	if (bitmap_empty(mid->ports_in_mid,
1673			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1674		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1675		list_del(&mid->list);
1676		bitmap_free(mid->ports_in_mid);
1677		kfree(mid);
1678	}
1679	return err;
1680}
1681
1682static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1683				 const struct switchdev_obj_port_mdb *mdb,
1684				 struct switchdev_trans *trans)
1685{
1686	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1687	struct net_device *orig_dev = mdb->obj.orig_dev;
1688	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1689	struct net_device *dev = mlxsw_sp_port->dev;
1690	struct mlxsw_sp_bridge_device *bridge_device;
1691	struct mlxsw_sp_bridge_port *bridge_port;
1692	struct mlxsw_sp_mid *mid;
1693	u16 fid_index;
1694	int err = 0;
1695
1696	if (switchdev_trans_ph_commit(trans))
1697		return 0;
1698
1699	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1700	if (!bridge_port)
1701		return 0;
1702
1703	bridge_device = bridge_port->bridge_device;
1704	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1705							       bridge_device,
1706							       mdb->vid);
1707	if (!mlxsw_sp_port_vlan)
1708		return 0;
1709
1710	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1711
1712	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1713	if (!mid) {
1714		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1715					  fid_index);
1716		if (!mid) {
1717			netdev_err(dev, "Unable to allocate MC group\n");
1718			return -ENOMEM;
1719		}
1720	}
1721	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1722
1723	if (!bridge_device->multicast_enabled)
1724		return 0;
1725
1726	if (bridge_port->mrouter)
1727		return 0;
1728
1729	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1730	if (err) {
1731		netdev_err(dev, "Unable to set SMID\n");
1732		goto err_out;
1733	}
1734
1735	return 0;
1736
1737err_out:
1738	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1739	return err;
1740}
1741
1742static void
1743mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1744				   struct mlxsw_sp_bridge_device
1745				   *bridge_device)
1746{
1747	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1748	struct mlxsw_sp_mid *mid;
1749	bool mc_enabled;
1750
1751	mc_enabled = bridge_device->multicast_enabled;
1752
1753	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1754		if (mc_enabled)
1755			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1756						    bridge_device);
1757		else
1758			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1759	}
1760}
1761
1762static void
1763mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1764				 struct mlxsw_sp_bridge_port *bridge_port,
1765				 bool add)
1766{
1767	struct mlxsw_sp_bridge_device *bridge_device;
1768	struct mlxsw_sp_mid *mid;
1769
1770	bridge_device = bridge_port->bridge_device;
1771
1772	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1773		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1774			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1775	}
1776}
1777
1778static int mlxsw_sp_port_obj_add(struct net_device *dev,
1779				 const struct switchdev_obj *obj,
1780				 struct switchdev_trans *trans,
1781				 struct netlink_ext_ack *extack)
1782{
1783	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1784	const struct switchdev_obj_port_vlan *vlan;
1785	int err = 0;
1786
1787	switch (obj->id) {
1788	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1789		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1790		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
1791					      extack);
1792
1793		if (switchdev_trans_ph_prepare(trans)) {
1794			/* The event is emitted before the changes are actually
1795			 * applied to the bridge. Therefore schedule the respin
1796			 * call for later, so that the respin logic sees the
1797			 * updated bridge state.
1798			 */
1799			mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1800		}
1801		break;
1802	case SWITCHDEV_OBJ_ID_PORT_MDB:
1803		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1804					    SWITCHDEV_OBJ_PORT_MDB(obj),
1805					    trans);
1806		break;
1807	default:
1808		err = -EOPNOTSUPP;
1809		break;
1810	}
1811
1812	return err;
1813}
1814
1815static void
1816mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1817			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1818{
1819	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1820	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1821
1822	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1823	if (WARN_ON(!mlxsw_sp_port_vlan))
1824		return;
1825
1826	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1827	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1828	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1829	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1830}
1831
1832static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1833				   const struct switchdev_obj_port_vlan *vlan)
1834{
1835	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1836	struct net_device *orig_dev = vlan->obj.orig_dev;
1837	struct mlxsw_sp_bridge_port *bridge_port;
1838	u16 vid;
1839
1840	if (netif_is_bridge_master(orig_dev))
1841		return -EOPNOTSUPP;
1842
1843	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1844	if (WARN_ON(!bridge_port))
1845		return -EINVAL;
1846
1847	if (!bridge_port->bridge_device->vlan_enabled)
1848		return 0;
1849
1850	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1851		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1852
1853	return 0;
1854}
1855
1856static int
1857__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1858			struct mlxsw_sp_bridge_port *bridge_port,
1859			struct mlxsw_sp_mid *mid)
1860{
1861	struct net_device *dev = mlxsw_sp_port->dev;
1862	int err;
1863
1864	if (bridge_port->bridge_device->multicast_enabled &&
1865	    !bridge_port->mrouter) {
1866		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1867		if (err)
1868			netdev_err(dev, "Unable to remove port from SMID\n");
1869	}
1870
1871	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1872	if (err)
1873		netdev_err(dev, "Unable to remove MC SFD\n");
1874
1875	return err;
1876}
1877
1878static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1879				 const struct switchdev_obj_port_mdb *mdb)
1880{
1881	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1882	struct net_device *orig_dev = mdb->obj.orig_dev;
1883	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1884	struct mlxsw_sp_bridge_device *bridge_device;
1885	struct net_device *dev = mlxsw_sp_port->dev;
1886	struct mlxsw_sp_bridge_port *bridge_port;
1887	struct mlxsw_sp_mid *mid;
1888	u16 fid_index;
1889
1890	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1891	if (!bridge_port)
1892		return 0;
1893
1894	bridge_device = bridge_port->bridge_device;
1895	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1896							       bridge_device,
1897							       mdb->vid);
1898	if (!mlxsw_sp_port_vlan)
1899		return 0;
1900
1901	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1902
1903	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1904	if (!mid) {
1905		netdev_err(dev, "Unable to remove port from MC DB\n");
1906		return -EINVAL;
1907	}
1908
1909	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1910}
1911
1912static void
1913mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1914			       struct mlxsw_sp_bridge_port *bridge_port)
1915{
1916	struct mlxsw_sp_bridge_device *bridge_device;
1917	struct mlxsw_sp_mid *mid, *tmp;
1918
1919	bridge_device = bridge_port->bridge_device;
1920
1921	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1922		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1923			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1924						mid);
1925		} else if (bridge_device->multicast_enabled &&
1926			   bridge_port->mrouter) {
1927			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1928		}
1929	}
1930}
1931
1932static int mlxsw_sp_port_obj_del(struct net_device *dev,
1933				 const struct switchdev_obj *obj)
1934{
1935	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1936	int err = 0;
1937
1938	switch (obj->id) {
1939	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1940		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1941					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1942		break;
1943	case SWITCHDEV_OBJ_ID_PORT_MDB:
1944		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1945					    SWITCHDEV_OBJ_PORT_MDB(obj));
1946		break;
1947	default:
1948		err = -EOPNOTSUPP;
1949		break;
1950	}
1951
1952	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1953
1954	return err;
1955}
1956
1957static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1958						   u16 lag_id)
1959{
1960	struct mlxsw_sp_port *mlxsw_sp_port;
1961	u64 max_lag_members;
1962	int i;
1963
1964	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1965					     MAX_LAG_MEMBERS);
1966	for (i = 0; i < max_lag_members; i++) {
1967		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1968		if (mlxsw_sp_port)
1969			return mlxsw_sp_port;
1970	}
1971	return NULL;
1972}
1973
1974static int
1975mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1976				struct mlxsw_sp_bridge_port *bridge_port,
1977				struct mlxsw_sp_port *mlxsw_sp_port,
1978				struct netlink_ext_ack *extack)
1979{
1980	if (is_vlan_dev(bridge_port->dev)) {
1981		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1982		return -EINVAL;
1983	}
1984
1985	/* Port is no longer usable as a router interface */
1986	if (mlxsw_sp_port->default_vlan->fid)
1987		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1988
1989	return 0;
1990}
1991
1992static void
1993mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1994				 struct mlxsw_sp_bridge_port *bridge_port,
1995				 struct mlxsw_sp_port *mlxsw_sp_port)
1996{
1997	/* Make sure untagged frames are allowed to ingress */
1998	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1999}
2000
2001static int
2002mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2003				 const struct net_device *vxlan_dev, u16 vid,
2004				 struct netlink_ext_ack *extack)
2005{
2006	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2007	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2008	struct mlxsw_sp_nve_params params = {
2009		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2010		.vni = vxlan->cfg.vni,
2011		.dev = vxlan_dev,
2012	};
2013	struct mlxsw_sp_fid *fid;
2014	int err;
2015
2016	/* If the VLAN is 0, we need to find the VLAN that is configured as
2017	 * PVID and egress untagged on the bridge port of the VxLAN device.
2018	 * It is possible no such VLAN exists
2019	 */
2020	if (!vid) {
2021		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2022		if (err || !vid)
2023			return err;
2024	}
2025
2026	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2027	if (IS_ERR(fid)) {
2028		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2029		return PTR_ERR(fid);
2030	}
2031
2032	if (mlxsw_sp_fid_vni_is_set(fid)) {
2033		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2034		err = -EINVAL;
2035		goto err_vni_exists;
2036	}
2037
2038	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2039	if (err)
2040		goto err_nve_fid_enable;
2041
2042	return 0;
2043
2044err_nve_fid_enable:
2045err_vni_exists:
2046	mlxsw_sp_fid_put(fid);
2047	return err;
2048}
2049
2050static struct net_device *
2051mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2052{
2053	struct net_device *dev;
2054	struct list_head *iter;
2055
2056	netdev_for_each_lower_dev(br_dev, dev, iter) {
2057		u16 pvid;
2058		int err;
2059
2060		if (!netif_is_vxlan(dev))
2061			continue;
2062
2063		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2064		if (err || pvid != vid)
2065			continue;
2066
2067		return dev;
2068	}
2069
2070	return NULL;
2071}
2072
2073static struct mlxsw_sp_fid *
2074mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2075			      u16 vid, struct netlink_ext_ack *extack)
2076{
2077	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2078
2079	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2080}
2081
2082static struct mlxsw_sp_fid *
2083mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2084				 u16 vid)
2085{
2086	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2087
2088	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2089}
2090
2091static u16
2092mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2093			      const struct mlxsw_sp_fid *fid)
2094{
2095	return mlxsw_sp_fid_8021q_vid(fid);
2096}
2097
2098static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2099	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2100	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2101	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2102	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2103	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2104	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2105};
2106
2107static bool
2108mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2109			   const struct net_device *br_dev)
2110{
2111	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2112
2113	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2114			    list) {
2115		if (mlxsw_sp_port_vlan->bridge_port &&
2116		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2117		    br_dev)
2118			return true;
2119	}
2120
2121	return false;
2122}
2123
2124static int
2125mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2126				struct mlxsw_sp_bridge_port *bridge_port,
2127				struct mlxsw_sp_port *mlxsw_sp_port,
2128				struct netlink_ext_ack *extack)
2129{
2130	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2131	struct net_device *dev = bridge_port->dev;
2132	u16 vid;
2133
2134	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2135	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2136	if (WARN_ON(!mlxsw_sp_port_vlan))
2137		return -EINVAL;
2138
2139	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2140		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2141		return -EINVAL;
2142	}
2143
2144	/* Port is no longer usable as a router interface */
2145	if (mlxsw_sp_port_vlan->fid)
2146		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2147
2148	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2149					      extack);
2150}
2151
2152static void
2153mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2154				 struct mlxsw_sp_bridge_port *bridge_port,
2155				 struct mlxsw_sp_port *mlxsw_sp_port)
2156{
2157	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2158	struct net_device *dev = bridge_port->dev;
2159	u16 vid;
2160
2161	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2162	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2163	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2164		return;
2165
2166	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2167}
2168
2169static int
2170mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2171				 const struct net_device *vxlan_dev, u16 vid,
2172				 struct netlink_ext_ack *extack)
2173{
2174	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2175	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2176	struct mlxsw_sp_nve_params params = {
2177		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2178		.vni = vxlan->cfg.vni,
2179		.dev = vxlan_dev,
2180	};
2181	struct mlxsw_sp_fid *fid;
2182	int err;
2183
2184	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2185	if (IS_ERR(fid)) {
2186		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2187		return -EINVAL;
2188	}
2189
2190	if (mlxsw_sp_fid_vni_is_set(fid)) {
2191		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2192		err = -EINVAL;
2193		goto err_vni_exists;
2194	}
2195
2196	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2197	if (err)
2198		goto err_nve_fid_enable;
2199
2200	return 0;
2201
2202err_nve_fid_enable:
2203err_vni_exists:
2204	mlxsw_sp_fid_put(fid);
2205	return err;
2206}
2207
2208static struct mlxsw_sp_fid *
2209mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2210			      u16 vid, struct netlink_ext_ack *extack)
2211{
2212	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2213
2214	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2215}
2216
2217static struct mlxsw_sp_fid *
2218mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2219				 u16 vid)
2220{
2221	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2222
2223	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2224	if (vid)
2225		return NULL;
2226
2227	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2228}
2229
2230static u16
2231mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2232			      const struct mlxsw_sp_fid *fid)
2233{
2234	return 0;
2235}
2236
2237static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2238	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2239	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2240	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2241	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2242	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2243	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2244};
2245
2246int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2247			      struct net_device *brport_dev,
2248			      struct net_device *br_dev,
2249			      struct netlink_ext_ack *extack)
2250{
2251	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2252	struct mlxsw_sp_bridge_device *bridge_device;
2253	struct mlxsw_sp_bridge_port *bridge_port;
2254	int err;
2255
2256	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2257					       extack);
2258	if (IS_ERR(bridge_port))
2259		return PTR_ERR(bridge_port);
2260	bridge_device = bridge_port->bridge_device;
2261
2262	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2263					    mlxsw_sp_port, extack);
2264	if (err)
2265		goto err_port_join;
2266
2267	return 0;
2268
2269err_port_join:
2270	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2271	return err;
2272}
2273
2274void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2275				struct net_device *brport_dev,
2276				struct net_device *br_dev)
2277{
2278	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2279	struct mlxsw_sp_bridge_device *bridge_device;
2280	struct mlxsw_sp_bridge_port *bridge_port;
2281
2282	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2283	if (!bridge_device)
2284		return;
2285	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2286	if (!bridge_port)
2287		return;
2288
2289	bridge_device->ops->port_leave(bridge_device, bridge_port,
2290				       mlxsw_sp_port);
2291	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2292}
2293
2294int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2295			       const struct net_device *br_dev,
2296			       const struct net_device *vxlan_dev, u16 vid,
2297			       struct netlink_ext_ack *extack)
2298{
2299	struct mlxsw_sp_bridge_device *bridge_device;
2300
2301	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2302	if (WARN_ON(!bridge_device))
2303		return -EINVAL;
2304
2305	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2306					      extack);
2307}
2308
2309void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2310				 const struct net_device *vxlan_dev)
2311{
2312	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2313	struct mlxsw_sp_fid *fid;
2314
2315	/* If the VxLAN device is down, then the FID does not have a VNI */
2316	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2317	if (!fid)
2318		return;
2319
2320	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2321	/* Drop both the reference we just took during lookup and the reference
2322	 * the VXLAN device took.
2323	 */
2324	mlxsw_sp_fid_put(fid);
2325	mlxsw_sp_fid_put(fid);
2326}
2327
2328static void
2329mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2330				      enum mlxsw_sp_l3proto *proto,
2331				      union mlxsw_sp_l3addr *addr)
2332{
2333	if (vxlan_addr->sa.sa_family == AF_INET) {
2334		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2335		*proto = MLXSW_SP_L3_PROTO_IPV4;
2336	} else {
2337		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2338		*proto = MLXSW_SP_L3_PROTO_IPV6;
2339	}
2340}
2341
2342static void
2343mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2344				      const union mlxsw_sp_l3addr *addr,
2345				      union vxlan_addr *vxlan_addr)
2346{
2347	switch (proto) {
2348	case MLXSW_SP_L3_PROTO_IPV4:
2349		vxlan_addr->sa.sa_family = AF_INET;
2350		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2351		break;
2352	case MLXSW_SP_L3_PROTO_IPV6:
2353		vxlan_addr->sa.sa_family = AF_INET6;
2354		vxlan_addr->sin6.sin6_addr = addr->addr6;
2355		break;
2356	}
2357}
2358
2359static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2360					      const char *mac,
2361					      enum mlxsw_sp_l3proto proto,
2362					      union mlxsw_sp_l3addr *addr,
2363					      __be32 vni, bool adding)
2364{
2365	struct switchdev_notifier_vxlan_fdb_info info;
2366	struct vxlan_dev *vxlan = netdev_priv(dev);
2367	enum switchdev_notifier_type type;
2368
2369	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2370			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2371	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2372	info.remote_port = vxlan->cfg.dst_port;
2373	info.remote_vni = vni;
2374	info.remote_ifindex = 0;
2375	ether_addr_copy(info.eth_addr, mac);
2376	info.vni = vni;
2377	info.offloaded = adding;
2378	call_switchdev_notifiers(type, dev, &info.info, NULL);
2379}
2380
2381static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2382					    const char *mac,
2383					    enum mlxsw_sp_l3proto proto,
2384					    union mlxsw_sp_l3addr *addr,
2385					    __be32 vni,
2386					    bool adding)
2387{
2388	if (netif_is_vxlan(dev))
2389		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2390						  adding);
2391}
2392
2393static void
2394mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2395			    const char *mac, u16 vid,
2396			    struct net_device *dev, bool offloaded)
2397{
2398	struct switchdev_notifier_fdb_info info;
2399
2400	info.addr = mac;
2401	info.vid = vid;
2402	info.offloaded = offloaded;
2403	call_switchdev_notifiers(type, dev, &info.info, NULL);
2404}
2405
2406static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2407					    char *sfn_pl, int rec_index,
2408					    bool adding)
2409{
2410	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2411	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2412	struct mlxsw_sp_bridge_device *bridge_device;
2413	struct mlxsw_sp_bridge_port *bridge_port;
2414	struct mlxsw_sp_port *mlxsw_sp_port;
2415	enum switchdev_notifier_type type;
2416	char mac[ETH_ALEN];
2417	u8 local_port;
2418	u16 vid, fid;
2419	bool do_notification = true;
2420	int err;
2421
2422	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2423
2424	if (WARN_ON_ONCE(local_port >= max_ports))
2425		return;
2426	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2427	if (!mlxsw_sp_port) {
2428		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2429		goto just_remove;
2430	}
2431
2432	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2433		goto just_remove;
2434
2435	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2436	if (!mlxsw_sp_port_vlan) {
2437		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2438		goto just_remove;
2439	}
2440
2441	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2442	if (!bridge_port) {
2443		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2444		goto just_remove;
2445	}
2446
2447	bridge_device = bridge_port->bridge_device;
2448	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2449
2450do_fdb_op:
2451	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2452				      adding, true);
2453	if (err) {
2454		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2455		return;
2456	}
2457
2458	if (!do_notification)
2459		return;
2460	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2461	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2462
2463	return;
2464
2465just_remove:
2466	adding = false;
2467	do_notification = false;
2468	goto do_fdb_op;
2469}
2470
2471static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2472						char *sfn_pl, int rec_index,
2473						bool adding)
2474{
2475	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2476	struct mlxsw_sp_bridge_device *bridge_device;
2477	struct mlxsw_sp_bridge_port *bridge_port;
2478	struct mlxsw_sp_port *mlxsw_sp_port;
2479	enum switchdev_notifier_type type;
2480	char mac[ETH_ALEN];
2481	u16 lag_vid = 0;
2482	u16 lag_id;
2483	u16 vid, fid;
2484	bool do_notification = true;
2485	int err;
2486
2487	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2488	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2489	if (!mlxsw_sp_port) {
2490		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2491		goto just_remove;
2492	}
2493
2494	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2495		goto just_remove;
2496
2497	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2498	if (!mlxsw_sp_port_vlan) {
2499		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2500		goto just_remove;
2501	}
2502
2503	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2504	if (!bridge_port) {
2505		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2506		goto just_remove;
2507	}
2508
2509	bridge_device = bridge_port->bridge_device;
2510	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2511	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2512		  mlxsw_sp_port_vlan->vid : 0;
2513
2514do_fdb_op:
2515	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2516					  adding, true);
2517	if (err) {
2518		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2519		return;
2520	}
2521
2522	if (!do_notification)
2523		return;
2524	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2525	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2526
2527	return;
2528
2529just_remove:
2530	adding = false;
2531	do_notification = false;
2532	goto do_fdb_op;
2533}
2534
2535static int
2536__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2537					    const struct mlxsw_sp_fid *fid,
2538					    bool adding,
2539					    struct net_device **nve_dev,
2540					    u16 *p_vid, __be32 *p_vni)
2541{
2542	struct mlxsw_sp_bridge_device *bridge_device;
2543	struct net_device *br_dev, *dev;
2544	int nve_ifindex;
2545	int err;
2546
2547	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2548	if (err)
2549		return err;
2550
2551	err = mlxsw_sp_fid_vni(fid, p_vni);
2552	if (err)
2553		return err;
2554
2555	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2556	if (!dev)
2557		return -EINVAL;
2558	*nve_dev = dev;
2559
2560	if (!netif_running(dev))
2561		return -EINVAL;
2562
2563	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2564		return -EINVAL;
2565
2566	if (adding && netif_is_vxlan(dev)) {
2567		struct vxlan_dev *vxlan = netdev_priv(dev);
2568
2569		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2570			return -EINVAL;
2571	}
2572
2573	br_dev = netdev_master_upper_dev_get(dev);
2574	if (!br_dev)
2575		return -EINVAL;
2576
2577	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2578	if (!bridge_device)
2579		return -EINVAL;
2580
2581	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2582
2583	return 0;
2584}
2585
2586static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2587						      char *sfn_pl,
2588						      int rec_index,
2589						      bool adding)
2590{
2591	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2592	enum switchdev_notifier_type type;
2593	struct net_device *nve_dev;
2594	union mlxsw_sp_l3addr addr;
2595	struct mlxsw_sp_fid *fid;
2596	char mac[ETH_ALEN];
2597	u16 fid_index, vid;
2598	__be32 vni;
2599	u32 uip;
2600	int err;
2601
2602	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2603				       &uip, &sfn_proto);
2604
2605	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2606	if (!fid)
2607		goto err_fid_lookup;
2608
2609	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2610					      (enum mlxsw_sp_l3proto) sfn_proto,
2611					      &addr);
2612	if (err)
2613		goto err_ip_resolve;
2614
2615	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2616							  &nve_dev, &vid, &vni);
2617	if (err)
2618		goto err_fdb_process;
2619
2620	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2621					     (enum mlxsw_sp_l3proto) sfn_proto,
2622					     &addr, adding, true);
2623	if (err)
2624		goto err_fdb_op;
2625
2626	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2627					(enum mlxsw_sp_l3proto) sfn_proto,
2628					&addr, vni, adding);
2629
2630	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2631			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2632	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2633
2634	mlxsw_sp_fid_put(fid);
2635
2636	return;
2637
2638err_fdb_op:
2639err_fdb_process:
2640err_ip_resolve:
2641	mlxsw_sp_fid_put(fid);
2642err_fid_lookup:
2643	/* Remove an FDB entry in case we cannot process it. Otherwise the
2644	 * device will keep sending the same notification over and over again.
2645	 */
2646	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2647				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2648				       false, true);
2649}
2650
2651static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2652					    char *sfn_pl, int rec_index)
2653{
2654	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2655	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2656		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2657						rec_index, true);
2658		break;
2659	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2660		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2661						rec_index, false);
2662		break;
2663	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2664		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2665						    rec_index, true);
2666		break;
2667	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2668		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2669						    rec_index, false);
2670		break;
2671	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2672		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2673							  rec_index, true);
2674		break;
2675	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2676		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2677							  rec_index, false);
2678		break;
2679	}
2680}
2681
2682static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
2683					      bool no_delay)
2684{
2685	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2686	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
2687
2688	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2689			       msecs_to_jiffies(interval));
2690}
2691
2692#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2693
2694static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2695{
2696	struct mlxsw_sp_bridge *bridge;
2697	struct mlxsw_sp *mlxsw_sp;
2698	char *sfn_pl;
2699	int queries;
2700	u8 num_rec;
2701	int i;
2702	int err;
2703
2704	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2705	if (!sfn_pl)
2706		return;
2707
2708	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2709	mlxsw_sp = bridge->mlxsw_sp;
2710
2711	rtnl_lock();
2712	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2713	while (queries > 0) {
2714		mlxsw_reg_sfn_pack(sfn_pl);
2715		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2716		if (err) {
2717			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2718			goto out;
2719		}
2720		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2721		for (i = 0; i < num_rec; i++)
2722			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2723		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2724			goto out;
2725		queries--;
2726	}
2727
2728out:
2729	rtnl_unlock();
2730	kfree(sfn_pl);
2731	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2732}
2733
2734struct mlxsw_sp_switchdev_event_work {
2735	struct work_struct work;
2736	union {
2737		struct switchdev_notifier_fdb_info fdb_info;
2738		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2739	};
2740	struct net_device *dev;
2741	unsigned long event;
2742};
2743
2744static void
2745mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2746					  struct mlxsw_sp_switchdev_event_work *
2747					  switchdev_work,
2748					  struct mlxsw_sp_fid *fid, __be32 vni)
2749{
2750	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2751	struct switchdev_notifier_fdb_info *fdb_info;
2752	struct net_device *dev = switchdev_work->dev;
2753	enum mlxsw_sp_l3proto proto;
2754	union mlxsw_sp_l3addr addr;
2755	int err;
2756
2757	fdb_info = &switchdev_work->fdb_info;
2758	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2759	if (err)
2760		return;
2761
2762	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2763					      &proto, &addr);
2764
2765	switch (switchdev_work->event) {
2766	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2767		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2768						     vxlan_fdb_info.eth_addr,
2769						     mlxsw_sp_fid_index(fid),
2770						     proto, &addr, true, false);
2771		if (err)
2772			return;
2773		vxlan_fdb_info.offloaded = true;
2774		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2775					 &vxlan_fdb_info.info, NULL);
2776		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2777					    vxlan_fdb_info.eth_addr,
2778					    fdb_info->vid, dev, true);
2779		break;
2780	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2781		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2782						     vxlan_fdb_info.eth_addr,
2783						     mlxsw_sp_fid_index(fid),
2784						     proto, &addr, false,
2785						     false);
2786		vxlan_fdb_info.offloaded = false;
2787		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2788					 &vxlan_fdb_info.info, NULL);
2789		break;
2790	}
2791}
2792
2793static void
2794mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2795					switchdev_work)
2796{
2797	struct mlxsw_sp_bridge_device *bridge_device;
2798	struct net_device *dev = switchdev_work->dev;
2799	struct net_device *br_dev;
2800	struct mlxsw_sp *mlxsw_sp;
2801	struct mlxsw_sp_fid *fid;
2802	__be32 vni;
2803	int err;
2804
2805	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2806	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2807		return;
2808
2809	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2810	    !switchdev_work->fdb_info.added_by_user)
2811		return;
2812
2813	if (!netif_running(dev))
2814		return;
2815	br_dev = netdev_master_upper_dev_get(dev);
2816	if (!br_dev)
2817		return;
2818	if (!netif_is_bridge_master(br_dev))
2819		return;
2820	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2821	if (!mlxsw_sp)
2822		return;
2823	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2824	if (!bridge_device)
2825		return;
2826
2827	fid = bridge_device->ops->fid_lookup(bridge_device,
2828					     switchdev_work->fdb_info.vid);
2829	if (!fid)
2830		return;
2831
2832	err = mlxsw_sp_fid_vni(fid, &vni);
2833	if (err)
2834		goto out;
2835
2836	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2837						  vni);
2838
2839out:
2840	mlxsw_sp_fid_put(fid);
2841}
2842
2843static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2844{
2845	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2846		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2847	struct net_device *dev = switchdev_work->dev;
2848	struct switchdev_notifier_fdb_info *fdb_info;
2849	struct mlxsw_sp_port *mlxsw_sp_port;
2850	int err;
2851
2852	rtnl_lock();
2853	if (netif_is_vxlan(dev)) {
2854		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2855		goto out;
2856	}
2857
2858	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2859	if (!mlxsw_sp_port)
2860		goto out;
2861
2862	switch (switchdev_work->event) {
2863	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2864		fdb_info = &switchdev_work->fdb_info;
2865		if (!fdb_info->added_by_user)
2866			break;
2867		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2868		if (err)
2869			break;
2870		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2871					    fdb_info->addr,
2872					    fdb_info->vid, dev, true);
2873		break;
2874	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2875		fdb_info = &switchdev_work->fdb_info;
2876		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2877		break;
2878	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
2879	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2880		/* These events are only used to potentially update an existing
2881		 * SPAN mirror.
2882		 */
2883		break;
2884	}
2885
2886	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2887
2888out:
2889	rtnl_unlock();
2890	kfree(switchdev_work->fdb_info.addr);
2891	kfree(switchdev_work);
2892	dev_put(dev);
2893}
2894
2895static void
2896mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2897				 struct mlxsw_sp_switchdev_event_work *
2898				 switchdev_work)
2899{
2900	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2901	struct mlxsw_sp_bridge_device *bridge_device;
2902	struct net_device *dev = switchdev_work->dev;
2903	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2904	enum mlxsw_sp_l3proto proto;
2905	union mlxsw_sp_l3addr addr;
2906	struct net_device *br_dev;
2907	struct mlxsw_sp_fid *fid;
2908	u16 vid;
2909	int err;
2910
2911	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2912	br_dev = netdev_master_upper_dev_get(dev);
2913
2914	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2915	if (!bridge_device)
2916		return;
2917
2918	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2919	if (!fid)
2920		return;
2921
2922	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2923					      &proto, &addr);
2924
2925	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2926		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2927		if (err) {
2928			mlxsw_sp_fid_put(fid);
2929			return;
2930		}
2931		vxlan_fdb_info->offloaded = true;
2932		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2933					 &vxlan_fdb_info->info, NULL);
2934		mlxsw_sp_fid_put(fid);
2935		return;
2936	}
2937
2938	/* The device has a single FDB table, whereas Linux has two - one
2939	 * in the bridge driver and another in the VxLAN driver. We only
2940	 * program an entry to the device if the MAC points to the VxLAN
2941	 * device in the bridge's FDB table
2942	 */
2943	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2944	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2945		goto err_br_fdb_find;
2946
2947	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2948					     mlxsw_sp_fid_index(fid), proto,
2949					     &addr, true, false);
2950	if (err)
2951		goto err_fdb_tunnel_uc_op;
2952	vxlan_fdb_info->offloaded = true;
2953	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2954				 &vxlan_fdb_info->info, NULL);
2955	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2956				    vxlan_fdb_info->eth_addr, vid, dev, true);
2957
2958	mlxsw_sp_fid_put(fid);
2959
2960	return;
2961
2962err_fdb_tunnel_uc_op:
2963err_br_fdb_find:
2964	mlxsw_sp_fid_put(fid);
2965}
2966
2967static void
2968mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2969				 struct mlxsw_sp_switchdev_event_work *
2970				 switchdev_work)
2971{
2972	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2973	struct mlxsw_sp_bridge_device *bridge_device;
2974	struct net_device *dev = switchdev_work->dev;
2975	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2976	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2977	enum mlxsw_sp_l3proto proto;
2978	union mlxsw_sp_l3addr addr;
2979	struct mlxsw_sp_fid *fid;
2980	u16 vid;
2981
2982	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2983
2984	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2985	if (!bridge_device)
2986		return;
2987
2988	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2989	if (!fid)
2990		return;
2991
2992	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2993					      &proto, &addr);
2994
2995	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2996		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
2997		mlxsw_sp_fid_put(fid);
2998		return;
2999	}
3000
3001	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3002				       mlxsw_sp_fid_index(fid), proto, &addr,
3003				       false, false);
3004	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3005	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3006				    vxlan_fdb_info->eth_addr, vid, dev, false);
3007
3008	mlxsw_sp_fid_put(fid);
3009}
3010
3011static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3012{
3013	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3014		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3015	struct net_device *dev = switchdev_work->dev;
3016	struct mlxsw_sp *mlxsw_sp;
3017	struct net_device *br_dev;
3018
3019	rtnl_lock();
3020
3021	if (!netif_running(dev))
3022		goto out;
3023	br_dev = netdev_master_upper_dev_get(dev);
3024	if (!br_dev)
3025		goto out;
3026	if (!netif_is_bridge_master(br_dev))
3027		goto out;
3028	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3029	if (!mlxsw_sp)
3030		goto out;
3031
3032	switch (switchdev_work->event) {
3033	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3034		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3035		break;
3036	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3037		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3038		break;
3039	}
3040
3041out:
3042	rtnl_unlock();
3043	kfree(switchdev_work);
3044	dev_put(dev);
3045}
3046
3047static int
3048mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3049				      switchdev_work,
3050				      struct switchdev_notifier_info *info)
3051{
3052	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3053	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3054	struct vxlan_config *cfg = &vxlan->cfg;
3055	struct netlink_ext_ack *extack;
3056
3057	extack = switchdev_notifier_info_to_extack(info);
3058	vxlan_fdb_info = container_of(info,
3059				      struct switchdev_notifier_vxlan_fdb_info,
3060				      info);
3061
3062	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3063		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3064		return -EOPNOTSUPP;
3065	}
3066	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3067	    vxlan_fdb_info->vni != cfg->vni) {
3068		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3069		return -EOPNOTSUPP;
3070	}
3071	if (vxlan_fdb_info->remote_ifindex) {
3072		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3073		return -EOPNOTSUPP;
3074	}
3075	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3076		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3077		return -EOPNOTSUPP;
3078	}
3079	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3080		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3081		return -EOPNOTSUPP;
3082	}
3083
3084	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3085
3086	return 0;
3087}
3088
3089/* Called under rcu_read_lock() */
3090static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3091				    unsigned long event, void *ptr)
3092{
3093	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3094	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3095	struct switchdev_notifier_fdb_info *fdb_info;
3096	struct switchdev_notifier_info *info = ptr;
3097	struct net_device *br_dev;
3098	int err;
3099
3100	if (event == SWITCHDEV_PORT_ATTR_SET) {
3101		err = switchdev_handle_port_attr_set(dev, ptr,
3102						     mlxsw_sp_port_dev_check,
3103						     mlxsw_sp_port_attr_set);
3104		return notifier_from_errno(err);
3105	}
3106
3107	/* Tunnel devices are not our uppers, so check their master instead */
3108	br_dev = netdev_master_upper_dev_get_rcu(dev);
3109	if (!br_dev)
3110		return NOTIFY_DONE;
3111	if (!netif_is_bridge_master(br_dev))
3112		return NOTIFY_DONE;
3113	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3114		return NOTIFY_DONE;
3115
3116	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3117	if (!switchdev_work)
3118		return NOTIFY_BAD;
3119
3120	switchdev_work->dev = dev;
3121	switchdev_work->event = event;
3122
3123	switch (event) {
3124	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3125	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3126	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3127	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3128		fdb_info = container_of(info,
3129					struct switchdev_notifier_fdb_info,
3130					info);
3131		INIT_WORK(&switchdev_work->work,
3132			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3133		memcpy(&switchdev_work->fdb_info, ptr,
3134		       sizeof(switchdev_work->fdb_info));
3135		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3136		if (!switchdev_work->fdb_info.addr)
3137			goto err_addr_alloc;
3138		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3139				fdb_info->addr);
3140		/* Take a reference on the device. This can be either
3141		 * upper device containig mlxsw_sp_port or just a
3142		 * mlxsw_sp_port
3143		 */
3144		dev_hold(dev);
3145		break;
3146	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3147	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3148		INIT_WORK(&switchdev_work->work,
3149			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3150		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3151							    info);
3152		if (err)
3153			goto err_vxlan_work_prepare;
3154		dev_hold(dev);
3155		break;
3156	default:
3157		kfree(switchdev_work);
3158		return NOTIFY_DONE;
3159	}
3160
3161	mlxsw_core_schedule_work(&switchdev_work->work);
3162
3163	return NOTIFY_DONE;
3164
3165err_vxlan_work_prepare:
3166err_addr_alloc:
3167	kfree(switchdev_work);
3168	return NOTIFY_BAD;
3169}
3170
3171struct notifier_block mlxsw_sp_switchdev_notifier = {
3172	.notifier_call = mlxsw_sp_switchdev_event,
3173};
3174
3175static int
3176mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3177				  struct mlxsw_sp_bridge_device *bridge_device,
3178				  const struct net_device *vxlan_dev, u16 vid,
3179				  bool flag_untagged, bool flag_pvid,
3180				  struct netlink_ext_ack *extack)
3181{
3182	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3183	__be32 vni = vxlan->cfg.vni;
3184	struct mlxsw_sp_fid *fid;
3185	u16 old_vid;
3186	int err;
3187
3188	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3189	 * VxLAN devices. Note that we get this notification before the VLAN is
3190	 * actually added to the bridge's database, so it is not possible for
3191	 * the lookup function to return 'vxlan_dev'
3192	 */
3193	if (flag_untagged && flag_pvid &&
3194	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3195		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3196		return -EINVAL;
3197	}
3198
3199	if (!netif_running(vxlan_dev))
3200		return 0;
3201
3202	/* First case: FID is not associated with this VNI, but the new VLAN
3203	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3204	 * it exists
3205	 */
3206	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3207	if (!fid) {
3208		if (!flag_untagged || !flag_pvid)
3209			return 0;
3210		return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3211							vxlan_dev, vid, extack);
3212	}
3213
3214	/* Second case: FID is associated with the VNI and the VLAN associated
3215	 * with the FID is the same as the notified VLAN. This means the flags
3216	 * (PVID / egress untagged) were toggled and that NVE should be
3217	 * disabled on the FID
3218	 */
3219	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3220	if (vid == old_vid) {
3221		if (WARN_ON(flag_untagged && flag_pvid)) {
3222			mlxsw_sp_fid_put(fid);
3223			return -EINVAL;
3224		}
3225		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3226		mlxsw_sp_fid_put(fid);
3227		return 0;
3228	}
3229
3230	/* Third case: A new VLAN was configured on the VxLAN device, but this
3231	 * VLAN is not PVID, so there is nothing to do.
3232	 */
3233	if (!flag_pvid) {
3234		mlxsw_sp_fid_put(fid);
3235		return 0;
3236	}
3237
3238	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3239	 * mapped to the VNI should be unmapped
3240	 */
3241	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3242	mlxsw_sp_fid_put(fid);
3243
3244	/* Fifth case: The new VLAN is also egress untagged, which means the
3245	 * VLAN needs to be mapped to the VNI
3246	 */
3247	if (!flag_untagged)
3248		return 0;
3249
3250	err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3251					       extack);
3252	if (err)
3253		goto err_vxlan_join;
3254
3255	return 0;
3256
3257err_vxlan_join:
3258	mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3259					 NULL);
3260	return err;
3261}
3262
3263static void
3264mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3265				  struct mlxsw_sp_bridge_device *bridge_device,
3266				  const struct net_device *vxlan_dev, u16 vid)
3267{
3268	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3269	__be32 vni = vxlan->cfg.vni;
3270	struct mlxsw_sp_fid *fid;
3271
3272	if (!netif_running(vxlan_dev))
3273		return;
3274
3275	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3276	if (!fid)
3277		return;
3278
3279	/* A different VLAN than the one mapped to the VNI is deleted */
3280	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3281		goto out;
3282
3283	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3284
3285out:
3286	mlxsw_sp_fid_put(fid);
3287}
3288
3289static int
3290mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3291				   struct switchdev_notifier_port_obj_info *
3292				   port_obj_info)
3293{
3294	struct switchdev_obj_port_vlan *vlan =
3295		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3296	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3297	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3298	struct switchdev_trans *trans = port_obj_info->trans;
3299	struct mlxsw_sp_bridge_device *bridge_device;
3300	struct netlink_ext_ack *extack;
3301	struct mlxsw_sp *mlxsw_sp;
3302	struct net_device *br_dev;
3303	u16 vid;
3304
3305	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3306	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3307	if (!br_dev)
3308		return 0;
3309
3310	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3311	if (!mlxsw_sp)
3312		return 0;
3313
3314	port_obj_info->handled = true;
3315
3316	if (switchdev_trans_ph_commit(trans))
3317		return 0;
3318
3319	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3320	if (!bridge_device)
3321		return -EINVAL;
3322
3323	if (!bridge_device->vlan_enabled)
3324		return 0;
3325
3326	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3327		int err;
3328
3329		err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3330							vxlan_dev, vid,
3331							flag_untagged,
3332							flag_pvid, extack);
3333		if (err)
3334			return err;
3335	}
3336
3337	return 0;
3338}
3339
3340static void
3341mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3342				   struct switchdev_notifier_port_obj_info *
3343				   port_obj_info)
3344{
3345	struct switchdev_obj_port_vlan *vlan =
3346		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3347	struct mlxsw_sp_bridge_device *bridge_device;
3348	struct mlxsw_sp *mlxsw_sp;
3349	struct net_device *br_dev;
3350	u16 vid;
3351
3352	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3353	if (!br_dev)
3354		return;
3355
3356	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3357	if (!mlxsw_sp)
3358		return;
3359
3360	port_obj_info->handled = true;
3361
3362	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3363	if (!bridge_device)
3364		return;
3365
3366	if (!bridge_device->vlan_enabled)
3367		return;
3368
3369	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3370		mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3371						  vxlan_dev, vid);
3372}
3373
3374static int
3375mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3376					struct switchdev_notifier_port_obj_info *
3377					port_obj_info)
3378{
3379	int err = 0;
3380
3381	switch (port_obj_info->obj->id) {
3382	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3383		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3384							 port_obj_info);
3385		break;
3386	default:
3387		break;
3388	}
3389
3390	return err;
3391}
3392
3393static void
3394mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3395					struct switchdev_notifier_port_obj_info *
3396					port_obj_info)
3397{
3398	switch (port_obj_info->obj->id) {
3399	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3400		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3401		break;
3402	default:
3403		break;
3404	}
3405}
3406
3407static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3408					     unsigned long event, void *ptr)
3409{
3410	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3411	int err = 0;
3412
3413	switch (event) {
3414	case SWITCHDEV_PORT_OBJ_ADD:
3415		if (netif_is_vxlan(dev))
3416			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3417		else
3418			err = switchdev_handle_port_obj_add(dev, ptr,
3419							mlxsw_sp_port_dev_check,
3420							mlxsw_sp_port_obj_add);
3421		return notifier_from_errno(err);
3422	case SWITCHDEV_PORT_OBJ_DEL:
3423		if (netif_is_vxlan(dev))
3424			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3425		else
3426			err = switchdev_handle_port_obj_del(dev, ptr,
3427							mlxsw_sp_port_dev_check,
3428							mlxsw_sp_port_obj_del);
3429		return notifier_from_errno(err);
3430	case SWITCHDEV_PORT_ATTR_SET:
3431		err = switchdev_handle_port_attr_set(dev, ptr,
3432						     mlxsw_sp_port_dev_check,
3433						     mlxsw_sp_port_attr_set);
3434		return notifier_from_errno(err);
3435	}
3436
3437	return NOTIFY_DONE;
3438}
3439
3440static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3441	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3442};
3443
3444u8
3445mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3446{
3447	return bridge_port->stp_state;
3448}
3449
3450static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3451{
3452	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3453	struct notifier_block *nb;
3454	int err;
3455
3456	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3457	if (err) {
3458		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3459		return err;
3460	}
3461
3462	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3463	if (err) {
3464		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3465		return err;
3466	}
3467
3468	nb = &mlxsw_sp_switchdev_blocking_notifier;
3469	err = register_switchdev_blocking_notifier(nb);
3470	if (err) {
3471		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3472		goto err_register_switchdev_blocking_notifier;
3473	}
3474
3475	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3476	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3477	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
3478	return 0;
3479
3480err_register_switchdev_blocking_notifier:
3481	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3482	return err;
3483}
3484
3485static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3486{
3487	struct notifier_block *nb;
3488
3489	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3490
3491	nb = &mlxsw_sp_switchdev_blocking_notifier;
3492	unregister_switchdev_blocking_notifier(nb);
3493
3494	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3495}
3496
3497int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3498{
3499	struct mlxsw_sp_bridge *bridge;
3500
3501	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3502	if (!bridge)
3503		return -ENOMEM;
3504	mlxsw_sp->bridge = bridge;
3505	bridge->mlxsw_sp = mlxsw_sp;
3506
3507	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3508
3509	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3510	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3511
3512	return mlxsw_sp_fdb_init(mlxsw_sp);
3513}
3514
3515void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3516{
3517	mlxsw_sp_fdb_fini(mlxsw_sp);
3518	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3519	kfree(mlxsw_sp->bridge);
3520}
3521
3522