xref: /kernel/linux/linux-5.10/net/dsa/slave.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7#include <linux/list.h>
8#include <linux/etherdevice.h>
9#include <linux/netdevice.h>
10#include <linux/phy.h>
11#include <linux/phy_fixed.h>
12#include <linux/phylink.h>
13#include <linux/of_net.h>
14#include <linux/of_mdio.h>
15#include <linux/mdio.h>
16#include <net/rtnetlink.h>
17#include <net/pkt_cls.h>
18#include <net/tc_act/tc_mirred.h>
19#include <linux/if_bridge.h>
20#include <linux/netpoll.h>
21#include <linux/ptp_classify.h>
22
23#include "dsa_priv.h"
24
25/* slave mii_bus handling ***************************************************/
26static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
27{
28	struct dsa_switch *ds = bus->priv;
29
30	if (ds->phys_mii_mask & (1 << addr))
31		return ds->ops->phy_read(ds, addr, reg);
32
33	return 0xffff;
34}
35
36static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
37{
38	struct dsa_switch *ds = bus->priv;
39
40	if (ds->phys_mii_mask & (1 << addr))
41		return ds->ops->phy_write(ds, addr, reg, val);
42
43	return 0;
44}
45
46void dsa_slave_mii_bus_init(struct dsa_switch *ds)
47{
48	ds->slave_mii_bus->priv = (void *)ds;
49	ds->slave_mii_bus->name = "dsa slave smi";
50	ds->slave_mii_bus->read = dsa_slave_phy_read;
51	ds->slave_mii_bus->write = dsa_slave_phy_write;
52	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
53		 ds->dst->index, ds->index);
54	ds->slave_mii_bus->parent = ds->dev;
55	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
56}
57
58
59/* slave device handling ****************************************************/
60static int dsa_slave_get_iflink(const struct net_device *dev)
61{
62	return dsa_slave_to_master(dev)->ifindex;
63}
64
65static int dsa_slave_open(struct net_device *dev)
66{
67	struct net_device *master = dsa_slave_to_master(dev);
68	struct dsa_port *dp = dsa_slave_to_port(dev);
69	int err;
70
71	if (!(master->flags & IFF_UP))
72		return -ENETDOWN;
73
74	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
75		err = dev_uc_add(master, dev->dev_addr);
76		if (err < 0)
77			goto out;
78	}
79
80	if (dev->flags & IFF_ALLMULTI) {
81		err = dev_set_allmulti(master, 1);
82		if (err < 0)
83			goto del_unicast;
84	}
85	if (dev->flags & IFF_PROMISC) {
86		err = dev_set_promiscuity(master, 1);
87		if (err < 0)
88			goto clear_allmulti;
89	}
90
91	err = dsa_port_enable_rt(dp, dev->phydev);
92	if (err)
93		goto clear_promisc;
94
95	return 0;
96
97clear_promisc:
98	if (dev->flags & IFF_PROMISC)
99		dev_set_promiscuity(master, -1);
100clear_allmulti:
101	if (dev->flags & IFF_ALLMULTI)
102		dev_set_allmulti(master, -1);
103del_unicast:
104	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
105		dev_uc_del(master, dev->dev_addr);
106out:
107	return err;
108}
109
110static int dsa_slave_close(struct net_device *dev)
111{
112	struct net_device *master = dsa_slave_to_master(dev);
113	struct dsa_port *dp = dsa_slave_to_port(dev);
114
115	dsa_port_disable_rt(dp);
116
117	dev_mc_unsync(master, dev);
118	dev_uc_unsync(master, dev);
119	if (dev->flags & IFF_ALLMULTI)
120		dev_set_allmulti(master, -1);
121	if (dev->flags & IFF_PROMISC)
122		dev_set_promiscuity(master, -1);
123
124	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
125		dev_uc_del(master, dev->dev_addr);
126
127	return 0;
128}
129
130static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
131{
132	struct net_device *master = dsa_slave_to_master(dev);
133	if (dev->flags & IFF_UP) {
134		if (change & IFF_ALLMULTI)
135			dev_set_allmulti(master,
136					 dev->flags & IFF_ALLMULTI ? 1 : -1);
137		if (change & IFF_PROMISC)
138			dev_set_promiscuity(master,
139					    dev->flags & IFF_PROMISC ? 1 : -1);
140	}
141}
142
143static void dsa_slave_set_rx_mode(struct net_device *dev)
144{
145	struct net_device *master = dsa_slave_to_master(dev);
146
147	dev_mc_sync(master, dev);
148	dev_uc_sync(master, dev);
149}
150
151static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
152{
153	struct net_device *master = dsa_slave_to_master(dev);
154	struct sockaddr *addr = a;
155	int err;
156
157	if (!is_valid_ether_addr(addr->sa_data))
158		return -EADDRNOTAVAIL;
159
160	if (!(dev->flags & IFF_UP))
161		goto out;
162
163	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
164		err = dev_uc_add(master, addr->sa_data);
165		if (err < 0)
166			return err;
167	}
168
169	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
170		dev_uc_del(master, dev->dev_addr);
171
172out:
173	ether_addr_copy(dev->dev_addr, addr->sa_data);
174
175	return 0;
176}
177
178struct dsa_slave_dump_ctx {
179	struct net_device *dev;
180	struct sk_buff *skb;
181	struct netlink_callback *cb;
182	int idx;
183};
184
185static int
186dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
187			   bool is_static, void *data)
188{
189	struct dsa_slave_dump_ctx *dump = data;
190	u32 portid = NETLINK_CB(dump->cb->skb).portid;
191	u32 seq = dump->cb->nlh->nlmsg_seq;
192	struct nlmsghdr *nlh;
193	struct ndmsg *ndm;
194
195	if (dump->idx < dump->cb->args[2])
196		goto skip;
197
198	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
199			sizeof(*ndm), NLM_F_MULTI);
200	if (!nlh)
201		return -EMSGSIZE;
202
203	ndm = nlmsg_data(nlh);
204	ndm->ndm_family  = AF_BRIDGE;
205	ndm->ndm_pad1    = 0;
206	ndm->ndm_pad2    = 0;
207	ndm->ndm_flags   = NTF_SELF;
208	ndm->ndm_type    = 0;
209	ndm->ndm_ifindex = dump->dev->ifindex;
210	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
211
212	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
213		goto nla_put_failure;
214
215	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
216		goto nla_put_failure;
217
218	nlmsg_end(dump->skb, nlh);
219
220skip:
221	dump->idx++;
222	return 0;
223
224nla_put_failure:
225	nlmsg_cancel(dump->skb, nlh);
226	return -EMSGSIZE;
227}
228
229static int
230dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
231		   struct net_device *dev, struct net_device *filter_dev,
232		   int *idx)
233{
234	struct dsa_port *dp = dsa_slave_to_port(dev);
235	struct dsa_slave_dump_ctx dump = {
236		.dev = dev,
237		.skb = skb,
238		.cb = cb,
239		.idx = *idx,
240	};
241	int err;
242
243	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
244	*idx = dump.idx;
245
246	return err;
247}
248
249static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
250{
251	struct dsa_slave_priv *p = netdev_priv(dev);
252	struct dsa_switch *ds = p->dp->ds;
253	int port = p->dp->index;
254
255	/* Pass through to switch driver if it supports timestamping */
256	switch (cmd) {
257	case SIOCGHWTSTAMP:
258		if (ds->ops->port_hwtstamp_get)
259			return ds->ops->port_hwtstamp_get(ds, port, ifr);
260		break;
261	case SIOCSHWTSTAMP:
262		if (ds->ops->port_hwtstamp_set)
263			return ds->ops->port_hwtstamp_set(ds, port, ifr);
264		break;
265	}
266
267	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
268}
269
270static int dsa_slave_port_attr_set(struct net_device *dev,
271				   const struct switchdev_attr *attr,
272				   struct switchdev_trans *trans)
273{
274	struct dsa_port *dp = dsa_slave_to_port(dev);
275	int ret;
276
277	switch (attr->id) {
278	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
279		ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
280		break;
281	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
282		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
283					      trans);
284		break;
285	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
286		ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
287		break;
288	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
289		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
290						trans);
291		break;
292	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
293		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
294		break;
295	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
296		ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
297		break;
298	default:
299		ret = -EOPNOTSUPP;
300		break;
301	}
302
303	return ret;
304}
305
306/* Must be called under rcu_read_lock() */
307static int
308dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
309				      const struct switchdev_obj_port_vlan *vlan)
310{
311	struct net_device *upper_dev;
312	struct list_head *iter;
313
314	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
315		u16 vid;
316
317		if (!is_vlan_dev(upper_dev))
318			continue;
319
320		vid = vlan_dev_vlan_id(upper_dev);
321		if (vid >= vlan->vid_begin && vid <= vlan->vid_end)
322			return -EBUSY;
323	}
324
325	return 0;
326}
327
328static int dsa_slave_vlan_add(struct net_device *dev,
329			      const struct switchdev_obj *obj,
330			      struct switchdev_trans *trans)
331{
332	struct net_device *master = dsa_slave_to_master(dev);
333	struct dsa_port *dp = dsa_slave_to_port(dev);
334	struct switchdev_obj_port_vlan vlan;
335	int vid, err;
336
337	if (obj->orig_dev != dev)
338		return -EOPNOTSUPP;
339
340	if (dsa_port_skip_vlan_configuration(dp))
341		return 0;
342
343	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
344
345	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
346	 * the same VID.
347	 */
348	if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) {
349		rcu_read_lock();
350		err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
351		rcu_read_unlock();
352		if (err)
353			return err;
354	}
355
356	err = dsa_port_vlan_add(dp, &vlan, trans);
357	if (err)
358		return err;
359
360	/* We need the dedicated CPU port to be a member of the VLAN as well.
361	 * Even though drivers often handle CPU membership in special ways,
362	 * it doesn't make sense to program a PVID, so clear this flag.
363	 */
364	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
365
366	err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans);
367	if (err)
368		return err;
369
370	for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) {
371		err = vlan_vid_add(master, htons(ETH_P_8021Q), vid);
372		if (err)
373			return err;
374	}
375
376	return 0;
377}
378
379static int dsa_slave_port_obj_add(struct net_device *dev,
380				  const struct switchdev_obj *obj,
381				  struct switchdev_trans *trans,
382				  struct netlink_ext_ack *extack)
383{
384	struct dsa_port *dp = dsa_slave_to_port(dev);
385	int err;
386
387	/* For the prepare phase, ensure the full set of changes is feasable in
388	 * one go in order to signal a failure properly. If an operation is not
389	 * supported, return -EOPNOTSUPP.
390	 */
391
392	switch (obj->id) {
393	case SWITCHDEV_OBJ_ID_PORT_MDB:
394		if (obj->orig_dev != dev)
395			return -EOPNOTSUPP;
396		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
397		break;
398	case SWITCHDEV_OBJ_ID_HOST_MDB:
399		/* DSA can directly translate this to a normal MDB add,
400		 * but on the CPU port.
401		 */
402		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
403				       trans);
404		break;
405	case SWITCHDEV_OBJ_ID_PORT_VLAN:
406		err = dsa_slave_vlan_add(dev, obj, trans);
407		break;
408	default:
409		err = -EOPNOTSUPP;
410		break;
411	}
412
413	return err;
414}
415
416static int dsa_slave_vlan_del(struct net_device *dev,
417			      const struct switchdev_obj *obj)
418{
419	struct net_device *master = dsa_slave_to_master(dev);
420	struct dsa_port *dp = dsa_slave_to_port(dev);
421	struct switchdev_obj_port_vlan *vlan;
422	int vid, err;
423
424	if (obj->orig_dev != dev)
425		return -EOPNOTSUPP;
426
427	if (dsa_port_skip_vlan_configuration(dp))
428		return 0;
429
430	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
431
432	/* Do not deprogram the CPU port as it may be shared with other user
433	 * ports which can be members of this VLAN as well.
434	 */
435	err = dsa_port_vlan_del(dp, vlan);
436	if (err)
437		return err;
438
439	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
440		vlan_vid_del(master, htons(ETH_P_8021Q), vid);
441
442	return 0;
443}
444
445static int dsa_slave_port_obj_del(struct net_device *dev,
446				  const struct switchdev_obj *obj)
447{
448	struct dsa_port *dp = dsa_slave_to_port(dev);
449	int err;
450
451	switch (obj->id) {
452	case SWITCHDEV_OBJ_ID_PORT_MDB:
453		if (obj->orig_dev != dev)
454			return -EOPNOTSUPP;
455		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
456		break;
457	case SWITCHDEV_OBJ_ID_HOST_MDB:
458		/* DSA can directly translate this to a normal MDB add,
459		 * but on the CPU port.
460		 */
461		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
462		break;
463	case SWITCHDEV_OBJ_ID_PORT_VLAN:
464		err = dsa_slave_vlan_del(dev, obj);
465		break;
466	default:
467		err = -EOPNOTSUPP;
468		break;
469	}
470
471	return err;
472}
473
474static int dsa_slave_get_port_parent_id(struct net_device *dev,
475					struct netdev_phys_item_id *ppid)
476{
477	struct dsa_port *dp = dsa_slave_to_port(dev);
478	struct dsa_switch *ds = dp->ds;
479	struct dsa_switch_tree *dst = ds->dst;
480
481	/* For non-legacy ports, devlink is used and it takes
482	 * care of the name generation. This ndo implementation
483	 * should be removed with legacy support.
484	 */
485	if (dp->ds->devlink)
486		return -EOPNOTSUPP;
487
488	ppid->id_len = sizeof(dst->index);
489	memcpy(&ppid->id, &dst->index, ppid->id_len);
490
491	return 0;
492}
493
494static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
495						     struct sk_buff *skb)
496{
497#ifdef CONFIG_NET_POLL_CONTROLLER
498	struct dsa_slave_priv *p = netdev_priv(dev);
499
500	return netpoll_send_skb(p->netpoll, skb);
501#else
502	BUG();
503	return NETDEV_TX_OK;
504#endif
505}
506
507static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
508				 struct sk_buff *skb)
509{
510	struct dsa_switch *ds = p->dp->ds;
511	struct sk_buff *clone;
512	unsigned int type;
513
514	type = ptp_classify_raw(skb);
515	if (type == PTP_CLASS_NONE)
516		return;
517
518	if (!ds->ops->port_txtstamp)
519		return;
520
521	clone = skb_clone_sk(skb);
522	if (!clone)
523		return;
524
525	DSA_SKB_CB(skb)->clone = clone;
526
527	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
528		return;
529
530	kfree_skb(clone);
531}
532
533netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
534{
535	/* SKB for netpoll still need to be mangled with the protocol-specific
536	 * tag to be successfully transmitted
537	 */
538	if (unlikely(netpoll_tx_running(dev)))
539		return dsa_slave_netpoll_send_skb(dev, skb);
540
541	/* Queue the SKB for transmission on the parent interface, but
542	 * do not modify its EtherType
543	 */
544	skb->dev = dsa_slave_to_master(dev);
545	dev_queue_xmit(skb);
546
547	return NETDEV_TX_OK;
548}
549EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
550
551static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
552{
553	int needed_headroom = dev->needed_headroom;
554	int needed_tailroom = dev->needed_tailroom;
555
556	/* For tail taggers, we need to pad short frames ourselves, to ensure
557	 * that the tail tag does not fail at its role of being at the end of
558	 * the packet, once the master interface pads the frame. Account for
559	 * that pad length here, and pad later.
560	 */
561	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
562		needed_tailroom += ETH_ZLEN - skb->len;
563	/* skb_headroom() returns unsigned int... */
564	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
565	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
566
567	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
568		/* No reallocation needed, yay! */
569		return 0;
570
571	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
572				GFP_ATOMIC);
573}
574
575static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
576{
577	struct dsa_slave_priv *p = netdev_priv(dev);
578	struct pcpu_sw_netstats *s;
579	struct sk_buff *nskb;
580
581	s = this_cpu_ptr(p->stats64);
582	u64_stats_update_begin(&s->syncp);
583	s->tx_packets++;
584	s->tx_bytes += skb->len;
585	u64_stats_update_end(&s->syncp);
586
587	DSA_SKB_CB(skb)->clone = NULL;
588
589	/* Identify PTP protocol packets, clone them, and pass them to the
590	 * switch driver
591	 */
592	dsa_skb_tx_timestamp(p, skb);
593
594	if (dsa_realloc_skb(skb, dev)) {
595		dev_kfree_skb_any(skb);
596		return NETDEV_TX_OK;
597	}
598
599	/* needed_tailroom should still be 'warm' in the cache line from
600	 * dsa_realloc_skb(), which has also ensured that padding is safe.
601	 */
602	if (dev->needed_tailroom)
603		eth_skb_pad(skb);
604
605	/* Transmit function may have to reallocate the original SKB,
606	 * in which case it must have freed it. Only free it here on error.
607	 */
608	nskb = p->xmit(skb, dev);
609	if (!nskb) {
610		kfree_skb(skb);
611		return NETDEV_TX_OK;
612	}
613
614	return dsa_enqueue_skb(nskb, dev);
615}
616
617/* ethtool operations *******************************************************/
618
619static void dsa_slave_get_drvinfo(struct net_device *dev,
620				  struct ethtool_drvinfo *drvinfo)
621{
622	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
623	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
624	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
625}
626
627static int dsa_slave_get_regs_len(struct net_device *dev)
628{
629	struct dsa_port *dp = dsa_slave_to_port(dev);
630	struct dsa_switch *ds = dp->ds;
631
632	if (ds->ops->get_regs_len)
633		return ds->ops->get_regs_len(ds, dp->index);
634
635	return -EOPNOTSUPP;
636}
637
638static void
639dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
640{
641	struct dsa_port *dp = dsa_slave_to_port(dev);
642	struct dsa_switch *ds = dp->ds;
643
644	if (ds->ops->get_regs)
645		ds->ops->get_regs(ds, dp->index, regs, _p);
646}
647
648static int dsa_slave_nway_reset(struct net_device *dev)
649{
650	struct dsa_port *dp = dsa_slave_to_port(dev);
651
652	return phylink_ethtool_nway_reset(dp->pl);
653}
654
655static int dsa_slave_get_eeprom_len(struct net_device *dev)
656{
657	struct dsa_port *dp = dsa_slave_to_port(dev);
658	struct dsa_switch *ds = dp->ds;
659
660	if (ds->cd && ds->cd->eeprom_len)
661		return ds->cd->eeprom_len;
662
663	if (ds->ops->get_eeprom_len)
664		return ds->ops->get_eeprom_len(ds);
665
666	return 0;
667}
668
669static int dsa_slave_get_eeprom(struct net_device *dev,
670				struct ethtool_eeprom *eeprom, u8 *data)
671{
672	struct dsa_port *dp = dsa_slave_to_port(dev);
673	struct dsa_switch *ds = dp->ds;
674
675	if (ds->ops->get_eeprom)
676		return ds->ops->get_eeprom(ds, eeprom, data);
677
678	return -EOPNOTSUPP;
679}
680
681static int dsa_slave_set_eeprom(struct net_device *dev,
682				struct ethtool_eeprom *eeprom, u8 *data)
683{
684	struct dsa_port *dp = dsa_slave_to_port(dev);
685	struct dsa_switch *ds = dp->ds;
686
687	if (ds->ops->set_eeprom)
688		return ds->ops->set_eeprom(ds, eeprom, data);
689
690	return -EOPNOTSUPP;
691}
692
693static void dsa_slave_get_strings(struct net_device *dev,
694				  uint32_t stringset, uint8_t *data)
695{
696	struct dsa_port *dp = dsa_slave_to_port(dev);
697	struct dsa_switch *ds = dp->ds;
698
699	if (stringset == ETH_SS_STATS) {
700		int len = ETH_GSTRING_LEN;
701
702		strncpy(data, "tx_packets", len);
703		strncpy(data + len, "tx_bytes", len);
704		strncpy(data + 2 * len, "rx_packets", len);
705		strncpy(data + 3 * len, "rx_bytes", len);
706		if (ds->ops->get_strings)
707			ds->ops->get_strings(ds, dp->index, stringset,
708					     data + 4 * len);
709	}
710}
711
712static void dsa_slave_get_ethtool_stats(struct net_device *dev,
713					struct ethtool_stats *stats,
714					uint64_t *data)
715{
716	struct dsa_port *dp = dsa_slave_to_port(dev);
717	struct dsa_slave_priv *p = netdev_priv(dev);
718	struct dsa_switch *ds = dp->ds;
719	struct pcpu_sw_netstats *s;
720	unsigned int start;
721	int i;
722
723	for_each_possible_cpu(i) {
724		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
725
726		s = per_cpu_ptr(p->stats64, i);
727		do {
728			start = u64_stats_fetch_begin_irq(&s->syncp);
729			tx_packets = s->tx_packets;
730			tx_bytes = s->tx_bytes;
731			rx_packets = s->rx_packets;
732			rx_bytes = s->rx_bytes;
733		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
734		data[0] += tx_packets;
735		data[1] += tx_bytes;
736		data[2] += rx_packets;
737		data[3] += rx_bytes;
738	}
739	if (ds->ops->get_ethtool_stats)
740		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
741}
742
743static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
744{
745	struct dsa_port *dp = dsa_slave_to_port(dev);
746	struct dsa_switch *ds = dp->ds;
747
748	if (sset == ETH_SS_STATS) {
749		int count = 0;
750
751		if (ds->ops->get_sset_count) {
752			count = ds->ops->get_sset_count(ds, dp->index, sset);
753			if (count < 0)
754				return count;
755		}
756
757		return count + 4;
758	}
759
760	return -EOPNOTSUPP;
761}
762
763static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
764{
765	struct dsa_port *dp = dsa_slave_to_port(dev);
766	struct dsa_switch *ds = dp->ds;
767
768	phylink_ethtool_get_wol(dp->pl, w);
769
770	if (ds->ops->get_wol)
771		ds->ops->get_wol(ds, dp->index, w);
772}
773
774static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
775{
776	struct dsa_port *dp = dsa_slave_to_port(dev);
777	struct dsa_switch *ds = dp->ds;
778	int ret = -EOPNOTSUPP;
779
780	phylink_ethtool_set_wol(dp->pl, w);
781
782	if (ds->ops->set_wol)
783		ret = ds->ops->set_wol(ds, dp->index, w);
784
785	return ret;
786}
787
788static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
789{
790	struct dsa_port *dp = dsa_slave_to_port(dev);
791	struct dsa_switch *ds = dp->ds;
792	int ret;
793
794	/* Port's PHY and MAC both need to be EEE capable */
795	if (!dev->phydev || !dp->pl)
796		return -ENODEV;
797
798	if (!ds->ops->set_mac_eee)
799		return -EOPNOTSUPP;
800
801	ret = ds->ops->set_mac_eee(ds, dp->index, e);
802	if (ret)
803		return ret;
804
805	return phylink_ethtool_set_eee(dp->pl, e);
806}
807
808static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
809{
810	struct dsa_port *dp = dsa_slave_to_port(dev);
811	struct dsa_switch *ds = dp->ds;
812	int ret;
813
814	/* Port's PHY and MAC both need to be EEE capable */
815	if (!dev->phydev || !dp->pl)
816		return -ENODEV;
817
818	if (!ds->ops->get_mac_eee)
819		return -EOPNOTSUPP;
820
821	ret = ds->ops->get_mac_eee(ds, dp->index, e);
822	if (ret)
823		return ret;
824
825	return phylink_ethtool_get_eee(dp->pl, e);
826}
827
828static int dsa_slave_get_link_ksettings(struct net_device *dev,
829					struct ethtool_link_ksettings *cmd)
830{
831	struct dsa_port *dp = dsa_slave_to_port(dev);
832
833	return phylink_ethtool_ksettings_get(dp->pl, cmd);
834}
835
836static int dsa_slave_set_link_ksettings(struct net_device *dev,
837					const struct ethtool_link_ksettings *cmd)
838{
839	struct dsa_port *dp = dsa_slave_to_port(dev);
840
841	return phylink_ethtool_ksettings_set(dp->pl, cmd);
842}
843
844static void dsa_slave_get_pauseparam(struct net_device *dev,
845				     struct ethtool_pauseparam *pause)
846{
847	struct dsa_port *dp = dsa_slave_to_port(dev);
848
849	phylink_ethtool_get_pauseparam(dp->pl, pause);
850}
851
852static int dsa_slave_set_pauseparam(struct net_device *dev,
853				    struct ethtool_pauseparam *pause)
854{
855	struct dsa_port *dp = dsa_slave_to_port(dev);
856
857	return phylink_ethtool_set_pauseparam(dp->pl, pause);
858}
859
860#ifdef CONFIG_NET_POLL_CONTROLLER
861static int dsa_slave_netpoll_setup(struct net_device *dev,
862				   struct netpoll_info *ni)
863{
864	struct net_device *master = dsa_slave_to_master(dev);
865	struct dsa_slave_priv *p = netdev_priv(dev);
866	struct netpoll *netpoll;
867	int err = 0;
868
869	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
870	if (!netpoll)
871		return -ENOMEM;
872
873	err = __netpoll_setup(netpoll, master);
874	if (err) {
875		kfree(netpoll);
876		goto out;
877	}
878
879	p->netpoll = netpoll;
880out:
881	return err;
882}
883
884static void dsa_slave_netpoll_cleanup(struct net_device *dev)
885{
886	struct dsa_slave_priv *p = netdev_priv(dev);
887	struct netpoll *netpoll = p->netpoll;
888
889	if (!netpoll)
890		return;
891
892	p->netpoll = NULL;
893
894	__netpoll_free(netpoll);
895}
896
897static void dsa_slave_poll_controller(struct net_device *dev)
898{
899}
900#endif
901
902static int dsa_slave_get_phys_port_name(struct net_device *dev,
903					char *name, size_t len)
904{
905	struct dsa_port *dp = dsa_slave_to_port(dev);
906
907	/* For non-legacy ports, devlink is used and it takes
908	 * care of the name generation. This ndo implementation
909	 * should be removed with legacy support.
910	 */
911	if (dp->ds->devlink)
912		return -EOPNOTSUPP;
913
914	if (snprintf(name, len, "p%d", dp->index) >= len)
915		return -EINVAL;
916
917	return 0;
918}
919
920static struct dsa_mall_tc_entry *
921dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
922{
923	struct dsa_slave_priv *p = netdev_priv(dev);
924	struct dsa_mall_tc_entry *mall_tc_entry;
925
926	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
927		if (mall_tc_entry->cookie == cookie)
928			return mall_tc_entry;
929
930	return NULL;
931}
932
933static int
934dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
935				  struct tc_cls_matchall_offload *cls,
936				  bool ingress)
937{
938	struct dsa_port *dp = dsa_slave_to_port(dev);
939	struct dsa_slave_priv *p = netdev_priv(dev);
940	struct dsa_mall_mirror_tc_entry *mirror;
941	struct dsa_mall_tc_entry *mall_tc_entry;
942	struct dsa_switch *ds = dp->ds;
943	struct flow_action_entry *act;
944	struct dsa_port *to_dp;
945	int err;
946
947	if (!ds->ops->port_mirror_add)
948		return -EOPNOTSUPP;
949
950	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
951					      cls->common.extack))
952		return -EOPNOTSUPP;
953
954	act = &cls->rule->action.entries[0];
955
956	if (!act->dev)
957		return -EINVAL;
958
959	if (!dsa_slave_dev_check(act->dev))
960		return -EOPNOTSUPP;
961
962	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
963	if (!mall_tc_entry)
964		return -ENOMEM;
965
966	mall_tc_entry->cookie = cls->cookie;
967	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
968	mirror = &mall_tc_entry->mirror;
969
970	to_dp = dsa_slave_to_port(act->dev);
971
972	mirror->to_local_port = to_dp->index;
973	mirror->ingress = ingress;
974
975	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
976	if (err) {
977		kfree(mall_tc_entry);
978		return err;
979	}
980
981	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
982
983	return err;
984}
985
986static int
987dsa_slave_add_cls_matchall_police(struct net_device *dev,
988				  struct tc_cls_matchall_offload *cls,
989				  bool ingress)
990{
991	struct netlink_ext_ack *extack = cls->common.extack;
992	struct dsa_port *dp = dsa_slave_to_port(dev);
993	struct dsa_slave_priv *p = netdev_priv(dev);
994	struct dsa_mall_policer_tc_entry *policer;
995	struct dsa_mall_tc_entry *mall_tc_entry;
996	struct dsa_switch *ds = dp->ds;
997	struct flow_action_entry *act;
998	int err;
999
1000	if (!ds->ops->port_policer_add) {
1001		NL_SET_ERR_MSG_MOD(extack,
1002				   "Policing offload not implemented");
1003		return -EOPNOTSUPP;
1004	}
1005
1006	if (!ingress) {
1007		NL_SET_ERR_MSG_MOD(extack,
1008				   "Only supported on ingress qdisc");
1009		return -EOPNOTSUPP;
1010	}
1011
1012	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1013					      cls->common.extack))
1014		return -EOPNOTSUPP;
1015
1016	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1017		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1018			NL_SET_ERR_MSG_MOD(extack,
1019					   "Only one port policer allowed");
1020			return -EEXIST;
1021		}
1022	}
1023
1024	act = &cls->rule->action.entries[0];
1025
1026	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1027	if (!mall_tc_entry)
1028		return -ENOMEM;
1029
1030	mall_tc_entry->cookie = cls->cookie;
1031	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1032	policer = &mall_tc_entry->policer;
1033	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1034	policer->burst = act->police.burst;
1035
1036	err = ds->ops->port_policer_add(ds, dp->index, policer);
1037	if (err) {
1038		kfree(mall_tc_entry);
1039		return err;
1040	}
1041
1042	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1043
1044	return err;
1045}
1046
1047static int dsa_slave_add_cls_matchall(struct net_device *dev,
1048				      struct tc_cls_matchall_offload *cls,
1049				      bool ingress)
1050{
1051	int err = -EOPNOTSUPP;
1052
1053	if (cls->common.protocol == htons(ETH_P_ALL) &&
1054	    flow_offload_has_one_action(&cls->rule->action) &&
1055	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1056		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1057	else if (flow_offload_has_one_action(&cls->rule->action) &&
1058		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1059		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1060
1061	return err;
1062}
1063
1064static void dsa_slave_del_cls_matchall(struct net_device *dev,
1065				       struct tc_cls_matchall_offload *cls)
1066{
1067	struct dsa_port *dp = dsa_slave_to_port(dev);
1068	struct dsa_mall_tc_entry *mall_tc_entry;
1069	struct dsa_switch *ds = dp->ds;
1070
1071	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1072	if (!mall_tc_entry)
1073		return;
1074
1075	list_del(&mall_tc_entry->list);
1076
1077	switch (mall_tc_entry->type) {
1078	case DSA_PORT_MALL_MIRROR:
1079		if (ds->ops->port_mirror_del)
1080			ds->ops->port_mirror_del(ds, dp->index,
1081						 &mall_tc_entry->mirror);
1082		break;
1083	case DSA_PORT_MALL_POLICER:
1084		if (ds->ops->port_policer_del)
1085			ds->ops->port_policer_del(ds, dp->index);
1086		break;
1087	default:
1088		WARN_ON(1);
1089	}
1090
1091	kfree(mall_tc_entry);
1092}
1093
1094static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1095					   struct tc_cls_matchall_offload *cls,
1096					   bool ingress)
1097{
1098	if (cls->common.chain_index)
1099		return -EOPNOTSUPP;
1100
1101	switch (cls->command) {
1102	case TC_CLSMATCHALL_REPLACE:
1103		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1104	case TC_CLSMATCHALL_DESTROY:
1105		dsa_slave_del_cls_matchall(dev, cls);
1106		return 0;
1107	default:
1108		return -EOPNOTSUPP;
1109	}
1110}
1111
1112static int dsa_slave_add_cls_flower(struct net_device *dev,
1113				    struct flow_cls_offload *cls,
1114				    bool ingress)
1115{
1116	struct dsa_port *dp = dsa_slave_to_port(dev);
1117	struct dsa_switch *ds = dp->ds;
1118	int port = dp->index;
1119
1120	if (!ds->ops->cls_flower_add)
1121		return -EOPNOTSUPP;
1122
1123	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1124}
1125
1126static int dsa_slave_del_cls_flower(struct net_device *dev,
1127				    struct flow_cls_offload *cls,
1128				    bool ingress)
1129{
1130	struct dsa_port *dp = dsa_slave_to_port(dev);
1131	struct dsa_switch *ds = dp->ds;
1132	int port = dp->index;
1133
1134	if (!ds->ops->cls_flower_del)
1135		return -EOPNOTSUPP;
1136
1137	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1138}
1139
1140static int dsa_slave_stats_cls_flower(struct net_device *dev,
1141				      struct flow_cls_offload *cls,
1142				      bool ingress)
1143{
1144	struct dsa_port *dp = dsa_slave_to_port(dev);
1145	struct dsa_switch *ds = dp->ds;
1146	int port = dp->index;
1147
1148	if (!ds->ops->cls_flower_stats)
1149		return -EOPNOTSUPP;
1150
1151	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1152}
1153
1154static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1155					 struct flow_cls_offload *cls,
1156					 bool ingress)
1157{
1158	switch (cls->command) {
1159	case FLOW_CLS_REPLACE:
1160		return dsa_slave_add_cls_flower(dev, cls, ingress);
1161	case FLOW_CLS_DESTROY:
1162		return dsa_slave_del_cls_flower(dev, cls, ingress);
1163	case FLOW_CLS_STATS:
1164		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1165	default:
1166		return -EOPNOTSUPP;
1167	}
1168}
1169
1170static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1171				       void *cb_priv, bool ingress)
1172{
1173	struct net_device *dev = cb_priv;
1174
1175	if (!tc_can_offload(dev))
1176		return -EOPNOTSUPP;
1177
1178	switch (type) {
1179	case TC_SETUP_CLSMATCHALL:
1180		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1181	case TC_SETUP_CLSFLOWER:
1182		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1183	default:
1184		return -EOPNOTSUPP;
1185	}
1186}
1187
1188static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1189					  void *type_data, void *cb_priv)
1190{
1191	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1192}
1193
1194static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1195					  void *type_data, void *cb_priv)
1196{
1197	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1198}
1199
1200static LIST_HEAD(dsa_slave_block_cb_list);
1201
1202static int dsa_slave_setup_tc_block(struct net_device *dev,
1203				    struct flow_block_offload *f)
1204{
1205	struct flow_block_cb *block_cb;
1206	flow_setup_cb_t *cb;
1207
1208	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1209		cb = dsa_slave_setup_tc_block_cb_ig;
1210	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1211		cb = dsa_slave_setup_tc_block_cb_eg;
1212	else
1213		return -EOPNOTSUPP;
1214
1215	f->driver_block_list = &dsa_slave_block_cb_list;
1216
1217	switch (f->command) {
1218	case FLOW_BLOCK_BIND:
1219		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1220			return -EBUSY;
1221
1222		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1223		if (IS_ERR(block_cb))
1224			return PTR_ERR(block_cb);
1225
1226		flow_block_cb_add(block_cb, f);
1227		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1228		return 0;
1229	case FLOW_BLOCK_UNBIND:
1230		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1231		if (!block_cb)
1232			return -ENOENT;
1233
1234		flow_block_cb_remove(block_cb, f);
1235		list_del(&block_cb->driver_list);
1236		return 0;
1237	default:
1238		return -EOPNOTSUPP;
1239	}
1240}
1241
1242static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1243			      void *type_data)
1244{
1245	struct dsa_port *dp = dsa_slave_to_port(dev);
1246	struct dsa_switch *ds = dp->ds;
1247
1248	if (type == TC_SETUP_BLOCK)
1249		return dsa_slave_setup_tc_block(dev, type_data);
1250
1251	if (!ds->ops->port_setup_tc)
1252		return -EOPNOTSUPP;
1253
1254	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1255}
1256
1257static void dsa_slave_get_stats64(struct net_device *dev,
1258				  struct rtnl_link_stats64 *stats)
1259{
1260	struct dsa_slave_priv *p = netdev_priv(dev);
1261
1262	netdev_stats_to_stats64(stats, &dev->stats);
1263	dev_fetch_sw_netstats(stats, p->stats64);
1264}
1265
1266static int dsa_slave_get_rxnfc(struct net_device *dev,
1267			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1268{
1269	struct dsa_port *dp = dsa_slave_to_port(dev);
1270	struct dsa_switch *ds = dp->ds;
1271
1272	if (!ds->ops->get_rxnfc)
1273		return -EOPNOTSUPP;
1274
1275	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1276}
1277
1278static int dsa_slave_set_rxnfc(struct net_device *dev,
1279			       struct ethtool_rxnfc *nfc)
1280{
1281	struct dsa_port *dp = dsa_slave_to_port(dev);
1282	struct dsa_switch *ds = dp->ds;
1283
1284	if (!ds->ops->set_rxnfc)
1285		return -EOPNOTSUPP;
1286
1287	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1288}
1289
1290static int dsa_slave_get_ts_info(struct net_device *dev,
1291				 struct ethtool_ts_info *ts)
1292{
1293	struct dsa_slave_priv *p = netdev_priv(dev);
1294	struct dsa_switch *ds = p->dp->ds;
1295
1296	if (!ds->ops->get_ts_info)
1297		return -EOPNOTSUPP;
1298
1299	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1300}
1301
1302static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1303				     u16 vid)
1304{
1305	struct net_device *master = dsa_slave_to_master(dev);
1306	struct dsa_port *dp = dsa_slave_to_port(dev);
1307	struct switchdev_obj_port_vlan vlan = {
1308		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1309		.vid_begin = vid,
1310		.vid_end = vid,
1311		/* This API only allows programming tagged, non-PVID VIDs */
1312		.flags = 0,
1313	};
1314	struct switchdev_trans trans;
1315	int ret;
1316
1317	/* User port... */
1318	trans.ph_prepare = true;
1319	ret = dsa_port_vlan_add(dp, &vlan, &trans);
1320	if (ret)
1321		return ret;
1322
1323	trans.ph_prepare = false;
1324	ret = dsa_port_vlan_add(dp, &vlan, &trans);
1325	if (ret)
1326		return ret;
1327
1328	/* And CPU port... */
1329	trans.ph_prepare = true;
1330	ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
1331	if (ret)
1332		return ret;
1333
1334	trans.ph_prepare = false;
1335	ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
1336	if (ret)
1337		return ret;
1338
1339	return vlan_vid_add(master, proto, vid);
1340}
1341
1342static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1343				      u16 vid)
1344{
1345	struct net_device *master = dsa_slave_to_master(dev);
1346	struct dsa_port *dp = dsa_slave_to_port(dev);
1347	struct switchdev_obj_port_vlan vlan = {
1348		.vid_begin = vid,
1349		.vid_end = vid,
1350		/* This API only allows programming tagged, non-PVID VIDs */
1351		.flags = 0,
1352	};
1353	int err;
1354
1355	/* Do not deprogram the CPU port as it may be shared with other user
1356	 * ports which can be members of this VLAN as well.
1357	 */
1358	err = dsa_port_vlan_del(dp, &vlan);
1359	if (err)
1360		return err;
1361
1362	vlan_vid_del(master, proto, vid);
1363
1364	return 0;
1365}
1366
1367struct dsa_hw_port {
1368	struct list_head list;
1369	struct net_device *dev;
1370	int old_mtu;
1371};
1372
1373static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1374{
1375	const struct dsa_hw_port *p;
1376	int err;
1377
1378	list_for_each_entry(p, hw_port_list, list) {
1379		if (p->dev->mtu == mtu)
1380			continue;
1381
1382		err = dev_set_mtu(p->dev, mtu);
1383		if (err)
1384			goto rollback;
1385	}
1386
1387	return 0;
1388
1389rollback:
1390	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1391		if (p->dev->mtu == p->old_mtu)
1392			continue;
1393
1394		if (dev_set_mtu(p->dev, p->old_mtu))
1395			netdev_err(p->dev, "Failed to restore MTU\n");
1396	}
1397
1398	return err;
1399}
1400
1401static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1402{
1403	struct dsa_hw_port *p, *n;
1404
1405	list_for_each_entry_safe(p, n, hw_port_list, list)
1406		kfree(p);
1407}
1408
1409/* Make the hardware datapath to/from @dev limited to a common MTU */
1410static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1411{
1412	struct list_head hw_port_list;
1413	struct dsa_switch_tree *dst;
1414	int min_mtu = ETH_MAX_MTU;
1415	struct dsa_port *other_dp;
1416	int err;
1417
1418	if (!dp->ds->mtu_enforcement_ingress)
1419		return;
1420
1421	if (!dp->bridge_dev)
1422		return;
1423
1424	INIT_LIST_HEAD(&hw_port_list);
1425
1426	/* Populate the list of ports that are part of the same bridge
1427	 * as the newly added/modified port
1428	 */
1429	list_for_each_entry(dst, &dsa_tree_list, list) {
1430		list_for_each_entry(other_dp, &dst->ports, list) {
1431			struct dsa_hw_port *hw_port;
1432			struct net_device *slave;
1433
1434			if (other_dp->type != DSA_PORT_TYPE_USER)
1435				continue;
1436
1437			if (other_dp->bridge_dev != dp->bridge_dev)
1438				continue;
1439
1440			if (!other_dp->ds->mtu_enforcement_ingress)
1441				continue;
1442
1443			slave = other_dp->slave;
1444
1445			if (min_mtu > slave->mtu)
1446				min_mtu = slave->mtu;
1447
1448			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1449			if (!hw_port)
1450				goto out;
1451
1452			hw_port->dev = slave;
1453			hw_port->old_mtu = slave->mtu;
1454
1455			list_add(&hw_port->list, &hw_port_list);
1456		}
1457	}
1458
1459	/* Attempt to configure the entire hardware bridge to the newly added
1460	 * interface's MTU first, regardless of whether the intention of the
1461	 * user was to raise or lower it.
1462	 */
1463	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1464	if (!err)
1465		goto out;
1466
1467	/* Clearly that didn't work out so well, so just set the minimum MTU on
1468	 * all hardware bridge ports now. If this fails too, then all ports will
1469	 * still have their old MTU rolled back anyway.
1470	 */
1471	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1472
1473out:
1474	dsa_hw_port_list_free(&hw_port_list);
1475}
1476
1477static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1478{
1479	struct net_device *master = dsa_slave_to_master(dev);
1480	struct dsa_port *dp = dsa_slave_to_port(dev);
1481	struct dsa_slave_priv *p = netdev_priv(dev);
1482	struct dsa_switch *ds = p->dp->ds;
1483	struct dsa_port *cpu_dp;
1484	int port = p->dp->index;
1485	int largest_mtu = 0;
1486	int new_master_mtu;
1487	int old_master_mtu;
1488	int mtu_limit;
1489	int cpu_mtu;
1490	int err, i;
1491
1492	if (!ds->ops->port_change_mtu)
1493		return -EOPNOTSUPP;
1494
1495	for (i = 0; i < ds->num_ports; i++) {
1496		int slave_mtu;
1497
1498		if (!dsa_is_user_port(ds, i))
1499			continue;
1500
1501		/* During probe, this function will be called for each slave
1502		 * device, while not all of them have been allocated. That's
1503		 * ok, it doesn't change what the maximum is, so ignore it.
1504		 */
1505		if (!dsa_to_port(ds, i)->slave)
1506			continue;
1507
1508		/* Pretend that we already applied the setting, which we
1509		 * actually haven't (still haven't done all integrity checks)
1510		 */
1511		if (i == port)
1512			slave_mtu = new_mtu;
1513		else
1514			slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1515
1516		if (largest_mtu < slave_mtu)
1517			largest_mtu = slave_mtu;
1518	}
1519
1520	cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1521
1522	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1523	old_master_mtu = master->mtu;
1524	new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1525	if (new_master_mtu > mtu_limit)
1526		return -ERANGE;
1527
1528	/* If the master MTU isn't over limit, there's no need to check the CPU
1529	 * MTU, since that surely isn't either.
1530	 */
1531	cpu_mtu = largest_mtu;
1532
1533	/* Start applying stuff */
1534	if (new_master_mtu != old_master_mtu) {
1535		err = dev_set_mtu(master, new_master_mtu);
1536		if (err < 0)
1537			goto out_master_failed;
1538
1539		/* We only need to propagate the MTU of the CPU port to
1540		 * upstream switches.
1541		 */
1542		err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1543		if (err)
1544			goto out_cpu_failed;
1545	}
1546
1547	err = dsa_port_mtu_change(dp, new_mtu, false);
1548	if (err)
1549		goto out_port_failed;
1550
1551	dev->mtu = new_mtu;
1552
1553	dsa_bridge_mtu_normalization(dp);
1554
1555	return 0;
1556
1557out_port_failed:
1558	if (new_master_mtu != old_master_mtu)
1559		dsa_port_mtu_change(cpu_dp, old_master_mtu -
1560				    cpu_dp->tag_ops->overhead,
1561				    true);
1562out_cpu_failed:
1563	if (new_master_mtu != old_master_mtu)
1564		dev_set_mtu(master, old_master_mtu);
1565out_master_failed:
1566	return err;
1567}
1568
1569static const struct ethtool_ops dsa_slave_ethtool_ops = {
1570	.get_drvinfo		= dsa_slave_get_drvinfo,
1571	.get_regs_len		= dsa_slave_get_regs_len,
1572	.get_regs		= dsa_slave_get_regs,
1573	.nway_reset		= dsa_slave_nway_reset,
1574	.get_link		= ethtool_op_get_link,
1575	.get_eeprom_len		= dsa_slave_get_eeprom_len,
1576	.get_eeprom		= dsa_slave_get_eeprom,
1577	.set_eeprom		= dsa_slave_set_eeprom,
1578	.get_strings		= dsa_slave_get_strings,
1579	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
1580	.get_sset_count		= dsa_slave_get_sset_count,
1581	.set_wol		= dsa_slave_set_wol,
1582	.get_wol		= dsa_slave_get_wol,
1583	.set_eee		= dsa_slave_set_eee,
1584	.get_eee		= dsa_slave_get_eee,
1585	.get_link_ksettings	= dsa_slave_get_link_ksettings,
1586	.set_link_ksettings	= dsa_slave_set_link_ksettings,
1587	.get_pauseparam		= dsa_slave_get_pauseparam,
1588	.set_pauseparam		= dsa_slave_set_pauseparam,
1589	.get_rxnfc		= dsa_slave_get_rxnfc,
1590	.set_rxnfc		= dsa_slave_set_rxnfc,
1591	.get_ts_info		= dsa_slave_get_ts_info,
1592};
1593
1594/* legacy way, bypassing the bridge *****************************************/
1595int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1596		       struct net_device *dev,
1597		       const unsigned char *addr, u16 vid,
1598		       u16 flags,
1599		       struct netlink_ext_ack *extack)
1600{
1601	struct dsa_port *dp = dsa_slave_to_port(dev);
1602
1603	return dsa_port_fdb_add(dp, addr, vid);
1604}
1605
1606int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1607		       struct net_device *dev,
1608		       const unsigned char *addr, u16 vid)
1609{
1610	struct dsa_port *dp = dsa_slave_to_port(dev);
1611
1612	return dsa_port_fdb_del(dp, addr, vid);
1613}
1614
1615static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1616{
1617	struct dsa_port *dp = dsa_slave_to_port(dev);
1618
1619	return dp->ds->devlink ? &dp->devlink_port : NULL;
1620}
1621
1622static const struct net_device_ops dsa_slave_netdev_ops = {
1623	.ndo_open	 	= dsa_slave_open,
1624	.ndo_stop		= dsa_slave_close,
1625	.ndo_start_xmit		= dsa_slave_xmit,
1626	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
1627	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
1628	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1629	.ndo_fdb_add		= dsa_legacy_fdb_add,
1630	.ndo_fdb_del		= dsa_legacy_fdb_del,
1631	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1632	.ndo_do_ioctl		= dsa_slave_ioctl,
1633	.ndo_get_iflink		= dsa_slave_get_iflink,
1634#ifdef CONFIG_NET_POLL_CONTROLLER
1635	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
1636	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
1637	.ndo_poll_controller	= dsa_slave_poll_controller,
1638#endif
1639	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1640	.ndo_setup_tc		= dsa_slave_setup_tc,
1641	.ndo_get_stats64	= dsa_slave_get_stats64,
1642	.ndo_get_port_parent_id	= dsa_slave_get_port_parent_id,
1643	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
1644	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
1645	.ndo_get_devlink_port	= dsa_slave_get_devlink_port,
1646	.ndo_change_mtu		= dsa_slave_change_mtu,
1647};
1648
1649static struct device_type dsa_type = {
1650	.name	= "dsa",
1651};
1652
1653void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1654{
1655	const struct dsa_port *dp = dsa_to_port(ds, port);
1656
1657	if (dp->pl)
1658		phylink_mac_change(dp->pl, up);
1659}
1660EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1661
1662static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1663					  struct phylink_link_state *state)
1664{
1665	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1666	struct dsa_switch *ds = dp->ds;
1667
1668	/* No need to check that this operation is valid, the callback would
1669	 * not be called if it was not.
1670	 */
1671	ds->ops->phylink_fixed_state(ds, dp->index, state);
1672}
1673
1674/* slave device setup *******************************************************/
1675static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1676{
1677	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1678	struct dsa_switch *ds = dp->ds;
1679
1680	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1681	if (!slave_dev->phydev) {
1682		netdev_err(slave_dev, "no phy at %d\n", addr);
1683		return -ENODEV;
1684	}
1685
1686	return phylink_connect_phy(dp->pl, slave_dev->phydev);
1687}
1688
1689static int dsa_slave_phy_setup(struct net_device *slave_dev)
1690{
1691	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1692	struct device_node *port_dn = dp->dn;
1693	struct dsa_switch *ds = dp->ds;
1694	phy_interface_t mode;
1695	u32 phy_flags = 0;
1696	int ret;
1697
1698	ret = of_get_phy_mode(port_dn, &mode);
1699	if (ret)
1700		mode = PHY_INTERFACE_MODE_NA;
1701
1702	dp->pl_config.dev = &slave_dev->dev;
1703	dp->pl_config.type = PHYLINK_NETDEV;
1704
1705	/* The get_fixed_state callback takes precedence over polling the
1706	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
1707	 * this if the switch provides such a callback.
1708	 */
1709	if (ds->ops->phylink_fixed_state) {
1710		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1711		dp->pl_config.poll_fixed_state = true;
1712	}
1713
1714	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1715				&dsa_port_phylink_mac_ops);
1716	if (IS_ERR(dp->pl)) {
1717		netdev_err(slave_dev,
1718			   "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1719		return PTR_ERR(dp->pl);
1720	}
1721
1722	if (ds->ops->get_phy_flags)
1723		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1724
1725	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1726	if (ret == -ENODEV && ds->slave_mii_bus) {
1727		/* We could not connect to a designated PHY or SFP, so try to
1728		 * use the switch internal MDIO bus instead
1729		 */
1730		ret = dsa_slave_phy_connect(slave_dev, dp->index);
1731	}
1732	if (ret) {
1733		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
1734			   ERR_PTR(ret));
1735		phylink_destroy(dp->pl);
1736	}
1737
1738	return ret;
1739}
1740
1741static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1742static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1743					    struct netdev_queue *txq,
1744					    void *_unused)
1745{
1746	lockdep_set_class(&txq->_xmit_lock,
1747			  &dsa_slave_netdev_xmit_lock_key);
1748}
1749
1750int dsa_slave_suspend(struct net_device *slave_dev)
1751{
1752	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1753
1754	if (!netif_running(slave_dev))
1755		return 0;
1756
1757	netif_device_detach(slave_dev);
1758
1759	rtnl_lock();
1760	phylink_stop(dp->pl);
1761	rtnl_unlock();
1762
1763	return 0;
1764}
1765
1766int dsa_slave_resume(struct net_device *slave_dev)
1767{
1768	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1769
1770	if (!netif_running(slave_dev))
1771		return 0;
1772
1773	netif_device_attach(slave_dev);
1774
1775	rtnl_lock();
1776	phylink_start(dp->pl);
1777	rtnl_unlock();
1778
1779	return 0;
1780}
1781
1782static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1783{
1784	struct net_device *master = dsa_slave_to_master(dev);
1785	struct dsa_port *dp = dsa_slave_to_port(dev);
1786	struct dsa_notifier_register_info rinfo = {
1787		.switch_number = dp->ds->index,
1788		.port_number = dp->index,
1789		.master = master,
1790		.info.dev = dev,
1791	};
1792
1793	call_dsa_notifiers(val, dev, &rinfo.info);
1794}
1795
1796int dsa_slave_create(struct dsa_port *port)
1797{
1798	const struct dsa_port *cpu_dp = port->cpu_dp;
1799	struct net_device *master = cpu_dp->master;
1800	struct dsa_switch *ds = port->ds;
1801	const char *name = port->name;
1802	struct net_device *slave_dev;
1803	struct dsa_slave_priv *p;
1804	int ret;
1805
1806	if (!ds->num_tx_queues)
1807		ds->num_tx_queues = 1;
1808
1809	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1810				     NET_NAME_UNKNOWN, ether_setup,
1811				     ds->num_tx_queues, 1);
1812	if (slave_dev == NULL)
1813		return -ENOMEM;
1814
1815	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1816	if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1817		slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1818	slave_dev->hw_features |= NETIF_F_HW_TC;
1819	slave_dev->features |= NETIF_F_LLTX;
1820	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1821	if (!IS_ERR_OR_NULL(port->mac))
1822		ether_addr_copy(slave_dev->dev_addr, port->mac);
1823	else
1824		eth_hw_addr_inherit(slave_dev, master);
1825	slave_dev->priv_flags |= IFF_NO_QUEUE;
1826	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1827	if (ds->ops->port_max_mtu)
1828		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1829	if (cpu_dp->tag_ops->tail_tag)
1830		slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead;
1831	else
1832		slave_dev->needed_headroom = cpu_dp->tag_ops->overhead;
1833	/* Try to save one extra realloc later in the TX path (in the master)
1834	 * by also inheriting the master's needed headroom and tailroom.
1835	 * The 8021q driver also does this.
1836	 */
1837	slave_dev->needed_headroom += master->needed_headroom;
1838	slave_dev->needed_tailroom += master->needed_tailroom;
1839	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1840
1841	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1842				 NULL);
1843
1844	SET_NETDEV_DEV(slave_dev, port->ds->dev);
1845	slave_dev->dev.of_node = port->dn;
1846	slave_dev->vlan_features = master->vlan_features;
1847
1848	p = netdev_priv(slave_dev);
1849	p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1850	if (!p->stats64) {
1851		free_netdev(slave_dev);
1852		return -ENOMEM;
1853	}
1854
1855	ret = gro_cells_init(&p->gcells, slave_dev);
1856	if (ret)
1857		goto out_free;
1858
1859	p->dp = port;
1860	INIT_LIST_HEAD(&p->mall_tc_list);
1861	p->xmit = cpu_dp->tag_ops->xmit;
1862	port->slave = slave_dev;
1863
1864	rtnl_lock();
1865	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1866	rtnl_unlock();
1867	if (ret && ret != -EOPNOTSUPP)
1868		dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
1869			 ret, port->index);
1870
1871	netif_carrier_off(slave_dev);
1872
1873	ret = dsa_slave_phy_setup(slave_dev);
1874	if (ret) {
1875		netdev_err(slave_dev,
1876			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
1877			   ret, ds->dst->index, ds->index, port->index);
1878		goto out_gcells;
1879	}
1880
1881	dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1882
1883	rtnl_lock();
1884
1885	ret = register_netdevice(slave_dev);
1886	if (ret) {
1887		netdev_err(master, "error %d registering interface %s\n",
1888			   ret, slave_dev->name);
1889		rtnl_unlock();
1890		goto out_phy;
1891	}
1892
1893	ret = netdev_upper_dev_link(master, slave_dev, NULL);
1894
1895	rtnl_unlock();
1896
1897	if (ret)
1898		goto out_unregister;
1899
1900	return 0;
1901
1902out_unregister:
1903	unregister_netdev(slave_dev);
1904out_phy:
1905	rtnl_lock();
1906	phylink_disconnect_phy(p->dp->pl);
1907	rtnl_unlock();
1908	phylink_destroy(p->dp->pl);
1909out_gcells:
1910	gro_cells_destroy(&p->gcells);
1911out_free:
1912	free_percpu(p->stats64);
1913	free_netdev(slave_dev);
1914	port->slave = NULL;
1915	return ret;
1916}
1917
1918void dsa_slave_destroy(struct net_device *slave_dev)
1919{
1920	struct net_device *master = dsa_slave_to_master(slave_dev);
1921	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1922	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1923
1924	netif_carrier_off(slave_dev);
1925	rtnl_lock();
1926	netdev_upper_dev_unlink(master, slave_dev);
1927	unregister_netdevice(slave_dev);
1928	phylink_disconnect_phy(dp->pl);
1929	rtnl_unlock();
1930
1931	dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1932	phylink_destroy(dp->pl);
1933	gro_cells_destroy(&p->gcells);
1934	free_percpu(p->stats64);
1935	free_netdev(slave_dev);
1936}
1937
1938bool dsa_slave_dev_check(const struct net_device *dev)
1939{
1940	return dev->netdev_ops == &dsa_slave_netdev_ops;
1941}
1942
1943static int dsa_slave_changeupper(struct net_device *dev,
1944				 struct netdev_notifier_changeupper_info *info)
1945{
1946	struct dsa_port *dp = dsa_slave_to_port(dev);
1947	int err = NOTIFY_DONE;
1948
1949	if (netif_is_bridge_master(info->upper_dev)) {
1950		if (info->linking) {
1951			err = dsa_port_bridge_join(dp, info->upper_dev);
1952			if (!err)
1953				dsa_bridge_mtu_normalization(dp);
1954			err = notifier_from_errno(err);
1955		} else {
1956			dsa_port_bridge_leave(dp, info->upper_dev);
1957			err = NOTIFY_OK;
1958		}
1959	}
1960
1961	return err;
1962}
1963
1964static int
1965dsa_prevent_bridging_8021q_upper(struct net_device *dev,
1966				 struct netdev_notifier_changeupper_info *info)
1967{
1968	struct netlink_ext_ack *ext_ack;
1969	struct net_device *slave;
1970	struct dsa_port *dp;
1971
1972	ext_ack = netdev_notifier_info_to_extack(&info->info);
1973
1974	if (!is_vlan_dev(dev))
1975		return NOTIFY_DONE;
1976
1977	slave = vlan_dev_real_dev(dev);
1978	if (!dsa_slave_dev_check(slave))
1979		return NOTIFY_DONE;
1980
1981	dp = dsa_slave_to_port(slave);
1982	if (!dp->bridge_dev)
1983		return NOTIFY_DONE;
1984
1985	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
1986	if (br_vlan_enabled(dp->bridge_dev) &&
1987	    netif_is_bridge_master(info->upper_dev) && info->linking) {
1988		NL_SET_ERR_MSG_MOD(ext_ack,
1989				   "Cannot enslave VLAN device into VLAN aware bridge");
1990		return notifier_from_errno(-EINVAL);
1991	}
1992
1993	return NOTIFY_DONE;
1994}
1995
1996static int
1997dsa_slave_check_8021q_upper(struct net_device *dev,
1998			    struct netdev_notifier_changeupper_info *info)
1999{
2000	struct dsa_port *dp = dsa_slave_to_port(dev);
2001	struct net_device *br = dp->bridge_dev;
2002	struct bridge_vlan_info br_info;
2003	struct netlink_ext_ack *extack;
2004	int err = NOTIFY_DONE;
2005	u16 vid;
2006
2007	if (!br || !br_vlan_enabled(br))
2008		return NOTIFY_DONE;
2009
2010	extack = netdev_notifier_info_to_extack(&info->info);
2011	vid = vlan_dev_vlan_id(info->upper_dev);
2012
2013	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
2014	 * device, respectively the VID is not found, returning
2015	 * 0 means success, which is a failure for us here.
2016	 */
2017	err = br_vlan_get_info(br, vid, &br_info);
2018	if (err == 0) {
2019		NL_SET_ERR_MSG_MOD(extack,
2020				   "This VLAN is already configured by the bridge");
2021		return notifier_from_errno(-EBUSY);
2022	}
2023
2024	return NOTIFY_DONE;
2025}
2026
2027static int dsa_slave_netdevice_event(struct notifier_block *nb,
2028				     unsigned long event, void *ptr)
2029{
2030	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2031
2032	switch (event) {
2033	case NETDEV_PRECHANGEUPPER: {
2034		struct netdev_notifier_changeupper_info *info = ptr;
2035
2036		if (!dsa_slave_dev_check(dev))
2037			return dsa_prevent_bridging_8021q_upper(dev, ptr);
2038
2039		if (is_vlan_dev(info->upper_dev))
2040			return dsa_slave_check_8021q_upper(dev, ptr);
2041		break;
2042	}
2043	case NETDEV_CHANGEUPPER:
2044		if (!dsa_slave_dev_check(dev))
2045			return NOTIFY_DONE;
2046
2047		return dsa_slave_changeupper(dev, ptr);
2048	}
2049
2050	return NOTIFY_DONE;
2051}
2052
2053struct dsa_switchdev_event_work {
2054	struct work_struct work;
2055	struct switchdev_notifier_fdb_info fdb_info;
2056	struct net_device *dev;
2057	unsigned long event;
2058};
2059
2060static void dsa_slave_switchdev_event_work(struct work_struct *work)
2061{
2062	struct dsa_switchdev_event_work *switchdev_work =
2063		container_of(work, struct dsa_switchdev_event_work, work);
2064	struct net_device *dev = switchdev_work->dev;
2065	struct switchdev_notifier_fdb_info *fdb_info;
2066	struct dsa_port *dp = dsa_slave_to_port(dev);
2067	int err;
2068
2069	rtnl_lock();
2070	switch (switchdev_work->event) {
2071	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2072		fdb_info = &switchdev_work->fdb_info;
2073		if (!fdb_info->added_by_user)
2074			break;
2075
2076		err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
2077		if (err) {
2078			netdev_dbg(dev, "fdb add failed err=%d\n", err);
2079			break;
2080		}
2081		fdb_info->offloaded = true;
2082		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2083					 &fdb_info->info, NULL);
2084		break;
2085
2086	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2087		fdb_info = &switchdev_work->fdb_info;
2088		if (!fdb_info->added_by_user)
2089			break;
2090
2091		err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
2092		if (err) {
2093			netdev_dbg(dev, "fdb del failed err=%d\n", err);
2094			dev_close(dev);
2095		}
2096		break;
2097	}
2098	rtnl_unlock();
2099
2100	kfree(switchdev_work->fdb_info.addr);
2101	kfree(switchdev_work);
2102	dev_put(dev);
2103}
2104
2105static int
2106dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
2107				  switchdev_work,
2108				  const struct switchdev_notifier_fdb_info *
2109				  fdb_info)
2110{
2111	memcpy(&switchdev_work->fdb_info, fdb_info,
2112	       sizeof(switchdev_work->fdb_info));
2113	switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2114	if (!switchdev_work->fdb_info.addr)
2115		return -ENOMEM;
2116	ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2117			fdb_info->addr);
2118	return 0;
2119}
2120
2121/* Called under rcu_read_lock() */
2122static int dsa_slave_switchdev_event(struct notifier_block *unused,
2123				     unsigned long event, void *ptr)
2124{
2125	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2126	struct dsa_switchdev_event_work *switchdev_work;
2127	int err;
2128
2129	if (event == SWITCHDEV_PORT_ATTR_SET) {
2130		err = switchdev_handle_port_attr_set(dev, ptr,
2131						     dsa_slave_dev_check,
2132						     dsa_slave_port_attr_set);
2133		return notifier_from_errno(err);
2134	}
2135
2136	if (!dsa_slave_dev_check(dev))
2137		return NOTIFY_DONE;
2138
2139	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2140	if (!switchdev_work)
2141		return NOTIFY_BAD;
2142
2143	INIT_WORK(&switchdev_work->work,
2144		  dsa_slave_switchdev_event_work);
2145	switchdev_work->dev = dev;
2146	switchdev_work->event = event;
2147
2148	switch (event) {
2149	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2150	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2151		if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
2152			goto err_fdb_work_init;
2153		dev_hold(dev);
2154		break;
2155	default:
2156		kfree(switchdev_work);
2157		return NOTIFY_DONE;
2158	}
2159
2160	dsa_schedule_work(&switchdev_work->work);
2161	return NOTIFY_OK;
2162
2163err_fdb_work_init:
2164	kfree(switchdev_work);
2165	return NOTIFY_BAD;
2166}
2167
2168static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
2169					      unsigned long event, void *ptr)
2170{
2171	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2172	int err;
2173
2174	switch (event) {
2175	case SWITCHDEV_PORT_OBJ_ADD:
2176		err = switchdev_handle_port_obj_add(dev, ptr,
2177						    dsa_slave_dev_check,
2178						    dsa_slave_port_obj_add);
2179		return notifier_from_errno(err);
2180	case SWITCHDEV_PORT_OBJ_DEL:
2181		err = switchdev_handle_port_obj_del(dev, ptr,
2182						    dsa_slave_dev_check,
2183						    dsa_slave_port_obj_del);
2184		return notifier_from_errno(err);
2185	case SWITCHDEV_PORT_ATTR_SET:
2186		err = switchdev_handle_port_attr_set(dev, ptr,
2187						     dsa_slave_dev_check,
2188						     dsa_slave_port_attr_set);
2189		return notifier_from_errno(err);
2190	}
2191
2192	return NOTIFY_DONE;
2193}
2194
2195static struct notifier_block dsa_slave_nb __read_mostly = {
2196	.notifier_call  = dsa_slave_netdevice_event,
2197};
2198
2199static struct notifier_block dsa_slave_switchdev_notifier = {
2200	.notifier_call = dsa_slave_switchdev_event,
2201};
2202
2203static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
2204	.notifier_call = dsa_slave_switchdev_blocking_event,
2205};
2206
2207int dsa_slave_register_notifier(void)
2208{
2209	struct notifier_block *nb;
2210	int err;
2211
2212	err = register_netdevice_notifier(&dsa_slave_nb);
2213	if (err)
2214		return err;
2215
2216	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
2217	if (err)
2218		goto err_switchdev_nb;
2219
2220	nb = &dsa_slave_switchdev_blocking_notifier;
2221	err = register_switchdev_blocking_notifier(nb);
2222	if (err)
2223		goto err_switchdev_blocking_nb;
2224
2225	return 0;
2226
2227err_switchdev_blocking_nb:
2228	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2229err_switchdev_nb:
2230	unregister_netdevice_notifier(&dsa_slave_nb);
2231	return err;
2232}
2233
2234void dsa_slave_unregister_notifier(void)
2235{
2236	struct notifier_block *nb;
2237	int err;
2238
2239	nb = &dsa_slave_switchdev_blocking_notifier;
2240	err = unregister_switchdev_blocking_notifier(nb);
2241	if (err)
2242		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
2243
2244	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
2245	if (err)
2246		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
2247
2248	err = unregister_netdevice_notifier(&dsa_slave_nb);
2249	if (err)
2250		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
2251}
2252