1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
14#include <linux/phylink.h>
15#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
23#include <linux/dsa/8021q.h>
24#include "sja1105.h"
25#include "sja1105_sgmii.h"
26#include "sja1105_tas.h"
27
28#define SJA1105_DEFAULT_VLAN		(VLAN_N_VID - 1)
29
30static const struct dsa_switch_ops sja1105_switch_ops;
31
32static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
33			     unsigned int startup_delay)
34{
35	gpiod_set_value_cansleep(gpio, 1);
36	/* Wait for minimum reset pulse length */
37	msleep(pulse_len);
38	gpiod_set_value_cansleep(gpio, 0);
39	/* Wait until chip is ready after reset */
40	msleep(startup_delay);
41}
42
43static void
44sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
45			   int from, int to, bool allow)
46{
47	if (allow) {
48		l2_fwd[from].bc_domain  |= BIT(to);
49		l2_fwd[from].reach_port |= BIT(to);
50		l2_fwd[from].fl_domain  |= BIT(to);
51	} else {
52		l2_fwd[from].bc_domain  &= ~BIT(to);
53		l2_fwd[from].reach_port &= ~BIT(to);
54		l2_fwd[from].fl_domain  &= ~BIT(to);
55	}
56}
57
58/* Structure used to temporarily transport device tree
59 * settings into sja1105_setup
60 */
61struct sja1105_dt_port {
62	phy_interface_t phy_mode;
63	sja1105_mii_role_t role;
64};
65
66static int sja1105_init_mac_settings(struct sja1105_private *priv)
67{
68	struct sja1105_mac_config_entry default_mac = {
69		/* Enable all 8 priority queues on egress.
70		 * Every queue i holds top[i] - base[i] frames.
71		 * Sum of top[i] - base[i] is 511 (max hardware limit).
72		 */
73		.top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
74		.base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
75		.enabled = {true, true, true, true, true, true, true, true},
76		/* Keep standard IFG of 12 bytes on egress. */
77		.ifg = 0,
78		/* Always put the MAC speed in automatic mode, where it can be
79		 * adjusted at runtime by PHYLINK.
80		 */
81		.speed = SJA1105_SPEED_AUTO,
82		/* No static correction for 1-step 1588 events */
83		.tp_delin = 0,
84		.tp_delout = 0,
85		/* Disable aging for critical TTEthernet traffic */
86		.maxage = 0xFF,
87		/* Internal VLAN (pvid) to apply to untagged ingress */
88		.vlanprio = 0,
89		.vlanid = 1,
90		.ing_mirr = false,
91		.egr_mirr = false,
92		/* Don't drop traffic with other EtherType than ETH_P_IP */
93		.drpnona664 = false,
94		/* Don't drop double-tagged traffic */
95		.drpdtag = false,
96		/* Don't drop untagged traffic */
97		.drpuntag = false,
98		/* Don't retag 802.1p (VID 0) traffic with the pvid */
99		.retag = false,
100		/* Disable learning and I/O on user ports by default -
101		 * STP will enable it.
102		 */
103		.dyn_learn = false,
104		.egress = false,
105		.ingress = false,
106	};
107	struct sja1105_mac_config_entry *mac;
108	struct sja1105_table *table;
109	int i;
110
111	table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
112
113	/* Discard previous MAC Configuration Table */
114	if (table->entry_count) {
115		kfree(table->entries);
116		table->entry_count = 0;
117	}
118
119	table->entries = kcalloc(SJA1105_NUM_PORTS,
120				 table->ops->unpacked_entry_size, GFP_KERNEL);
121	if (!table->entries)
122		return -ENOMEM;
123
124	table->entry_count = SJA1105_NUM_PORTS;
125
126	mac = table->entries;
127
128	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
129		mac[i] = default_mac;
130		if (i == dsa_upstream_port(priv->ds, i)) {
131			/* STP doesn't get called for CPU port, so we need to
132			 * set the I/O parameters statically.
133			 */
134			mac[i].dyn_learn = true;
135			mac[i].ingress = true;
136			mac[i].egress = true;
137		}
138	}
139
140	return 0;
141}
142
143static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
144{
145	if (priv->info->part_no != SJA1105R_PART_NO &&
146	    priv->info->part_no != SJA1105S_PART_NO)
147		return false;
148
149	if (port != SJA1105_SGMII_PORT)
150		return false;
151
152	if (dsa_is_unused_port(priv->ds, port))
153		return false;
154
155	return true;
156}
157
158static int sja1105_init_mii_settings(struct sja1105_private *priv,
159				     struct sja1105_dt_port *ports)
160{
161	struct device *dev = &priv->spidev->dev;
162	struct sja1105_xmii_params_entry *mii;
163	struct sja1105_table *table;
164	int i;
165
166	table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
167
168	/* Discard previous xMII Mode Parameters Table */
169	if (table->entry_count) {
170		kfree(table->entries);
171		table->entry_count = 0;
172	}
173
174	table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
175				 table->ops->unpacked_entry_size, GFP_KERNEL);
176	if (!table->entries)
177		return -ENOMEM;
178
179	/* Override table based on PHYLINK DT bindings */
180	table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
181
182	mii = table->entries;
183
184	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
185		if (dsa_is_unused_port(priv->ds, i))
186			continue;
187
188		switch (ports[i].phy_mode) {
189		case PHY_INTERFACE_MODE_MII:
190			mii->xmii_mode[i] = XMII_MODE_MII;
191			break;
192		case PHY_INTERFACE_MODE_RMII:
193			mii->xmii_mode[i] = XMII_MODE_RMII;
194			break;
195		case PHY_INTERFACE_MODE_RGMII:
196		case PHY_INTERFACE_MODE_RGMII_ID:
197		case PHY_INTERFACE_MODE_RGMII_RXID:
198		case PHY_INTERFACE_MODE_RGMII_TXID:
199			mii->xmii_mode[i] = XMII_MODE_RGMII;
200			break;
201		case PHY_INTERFACE_MODE_SGMII:
202			if (!sja1105_supports_sgmii(priv, i))
203				return -EINVAL;
204			mii->xmii_mode[i] = XMII_MODE_SGMII;
205			break;
206		default:
207			dev_err(dev, "Unsupported PHY mode %s!\n",
208				phy_modes(ports[i].phy_mode));
209			return -EINVAL;
210		}
211
212		/* Even though the SerDes port is able to drive SGMII autoneg
213		 * like a PHY would, from the perspective of the XMII tables,
214		 * the SGMII port should always be put in MAC mode.
215		 */
216		if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
217			mii->phy_mac[i] = XMII_MAC;
218		else
219			mii->phy_mac[i] = ports[i].role;
220	}
221	return 0;
222}
223
224static int sja1105_init_static_fdb(struct sja1105_private *priv)
225{
226	struct sja1105_table *table;
227
228	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
229
230	/* We only populate the FDB table through dynamic
231	 * L2 Address Lookup entries
232	 */
233	if (table->entry_count) {
234		kfree(table->entries);
235		table->entry_count = 0;
236	}
237	return 0;
238}
239
240static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
241{
242	struct sja1105_table *table;
243	u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
244	struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
245		/* Learned FDB entries are forgotten after 300 seconds */
246		.maxage = SJA1105_AGEING_TIME_MS(300000),
247		/* All entries within a FDB bin are available for learning */
248		.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
249		/* And the P/Q/R/S equivalent setting: */
250		.start_dynspc = 0,
251		.maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
252			     max_fdb_entries, max_fdb_entries, },
253		/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
254		.poly = 0x97,
255		/* This selects between Independent VLAN Learning (IVL) and
256		 * Shared VLAN Learning (SVL)
257		 */
258		.shared_learn = true,
259		/* Don't discard management traffic based on ENFPORT -
260		 * we don't perform SMAC port enforcement anyway, so
261		 * what we are setting here doesn't matter.
262		 */
263		.no_enf_hostprt = false,
264		/* Don't learn SMAC for mac_fltres1 and mac_fltres0.
265		 * Maybe correlate with no_linklocal_learn from bridge driver?
266		 */
267		.no_mgmt_learn = true,
268		/* P/Q/R/S only */
269		.use_static = true,
270		/* Dynamically learned FDB entries can overwrite other (older)
271		 * dynamic FDB entries
272		 */
273		.owr_dyn = true,
274		.drpnolearn = true,
275	};
276
277	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
278
279	if (table->entry_count) {
280		kfree(table->entries);
281		table->entry_count = 0;
282	}
283
284	table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
285				 table->ops->unpacked_entry_size, GFP_KERNEL);
286	if (!table->entries)
287		return -ENOMEM;
288
289	table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
290
291	/* This table only has a single entry */
292	((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
293				default_l2_lookup_params;
294
295	return 0;
296}
297
298/* Set up a default VLAN for untagged traffic injected from the CPU
299 * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
300 * All DT-defined ports are members of this VLAN, and there are no
301 * restrictions on forwarding (since the CPU selects the destination).
302 * Frames from this VLAN will always be transmitted as untagged, and
303 * neither the bridge nor the 8021q module cannot create this VLAN ID.
304 */
305static int sja1105_init_static_vlan(struct sja1105_private *priv)
306{
307	struct sja1105_table *table;
308	struct sja1105_vlan_lookup_entry pvid = {
309		.ving_mirr = 0,
310		.vegr_mirr = 0,
311		.vmemb_port = 0,
312		.vlan_bc = 0,
313		.tag_port = 0,
314		.vlanid = SJA1105_DEFAULT_VLAN,
315	};
316	struct dsa_switch *ds = priv->ds;
317	int port;
318
319	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
320
321	if (table->entry_count) {
322		kfree(table->entries);
323		table->entry_count = 0;
324	}
325
326	table->entries = kcalloc(1, table->ops->unpacked_entry_size,
327				 GFP_KERNEL);
328	if (!table->entries)
329		return -ENOMEM;
330
331	table->entry_count = 1;
332
333	for (port = 0; port < ds->num_ports; port++) {
334		struct sja1105_bridge_vlan *v;
335
336		if (dsa_is_unused_port(ds, port))
337			continue;
338
339		pvid.vmemb_port |= BIT(port);
340		pvid.vlan_bc |= BIT(port);
341		pvid.tag_port &= ~BIT(port);
342
343		v = kzalloc(sizeof(*v), GFP_KERNEL);
344		if (!v)
345			return -ENOMEM;
346
347		v->port = port;
348		v->vid = SJA1105_DEFAULT_VLAN;
349		v->untagged = true;
350		if (dsa_is_cpu_port(ds, port))
351			v->pvid = true;
352		list_add(&v->list, &priv->dsa_8021q_vlans);
353
354		v = kmemdup(v, sizeof(*v), GFP_KERNEL);
355		if (!v)
356			return -ENOMEM;
357
358		list_add(&v->list, &priv->bridge_vlans);
359	}
360
361	((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
362	return 0;
363}
364
365static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
366{
367	struct sja1105_l2_forwarding_entry *l2fwd;
368	struct sja1105_table *table;
369	int i, j;
370
371	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
372
373	if (table->entry_count) {
374		kfree(table->entries);
375		table->entry_count = 0;
376	}
377
378	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
379				 table->ops->unpacked_entry_size, GFP_KERNEL);
380	if (!table->entries)
381		return -ENOMEM;
382
383	table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
384
385	l2fwd = table->entries;
386
387	/* First 5 entries define the forwarding rules */
388	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
389		unsigned int upstream = dsa_upstream_port(priv->ds, i);
390
391		for (j = 0; j < SJA1105_NUM_TC; j++)
392			l2fwd[i].vlan_pmap[j] = j;
393
394		if (i == upstream)
395			continue;
396
397		sja1105_port_allow_traffic(l2fwd, i, upstream, true);
398		sja1105_port_allow_traffic(l2fwd, upstream, i, true);
399	}
400	/* Next 8 entries define VLAN PCP mapping from ingress to egress.
401	 * Create a one-to-one mapping.
402	 */
403	for (i = 0; i < SJA1105_NUM_TC; i++)
404		for (j = 0; j < SJA1105_NUM_PORTS; j++)
405			l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
406
407	return 0;
408}
409
410static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
411{
412	struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
413		/* Disallow dynamic reconfiguration of vlan_pmap */
414		.max_dynp = 0,
415		/* Use a single memory partition for all ingress queues */
416		.part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
417	};
418	struct sja1105_table *table;
419
420	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
421
422	if (table->entry_count) {
423		kfree(table->entries);
424		table->entry_count = 0;
425	}
426
427	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
428				 table->ops->unpacked_entry_size, GFP_KERNEL);
429	if (!table->entries)
430		return -ENOMEM;
431
432	table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
433
434	/* This table only has a single entry */
435	((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
436				default_l2fwd_params;
437
438	return 0;
439}
440
441void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
442{
443	struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
444	struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
445	struct sja1105_table *table;
446	int max_mem;
447
448	/* VLAN retagging is implemented using a loopback port that consumes
449	 * frame buffers. That leaves less for us.
450	 */
451	if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
452		max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
453	else
454		max_mem = SJA1105_MAX_FRAME_MEMORY;
455
456	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
457	l2_fwd_params = table->entries;
458	l2_fwd_params->part_spc[0] = max_mem;
459
460	/* If we have any critical-traffic virtual links, we need to reserve
461	 * some frame buffer memory for them. At the moment, hardcode the value
462	 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
463	 * remaining for best-effort traffic. TODO: figure out a more flexible
464	 * way to perform the frame buffer partitioning.
465	 */
466	if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
467		return;
468
469	table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
470	vl_fwd_params = table->entries;
471
472	l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
473	vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
474}
475
476static int sja1105_init_general_params(struct sja1105_private *priv)
477{
478	struct sja1105_general_params_entry default_general_params = {
479		/* Allow dynamic changing of the mirror port */
480		.mirr_ptacu = true,
481		.switchid = priv->ds->index,
482		/* Priority queue for link-local management frames
483		 * (both ingress to and egress from CPU - PTP, STP etc)
484		 */
485		.hostprio = 7,
486		.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
487		.mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
488		.incl_srcpt1 = false,
489		.send_meta1  = false,
490		.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
491		.mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
492		.incl_srcpt0 = false,
493		.send_meta0  = false,
494		/* The destination for traffic matching mac_fltres1 and
495		 * mac_fltres0 on all ports except host_port. Such traffic
496		 * receieved on host_port itself would be dropped, except
497		 * by installing a temporary 'management route'
498		 */
499		.host_port = dsa_upstream_port(priv->ds, 0),
500		/* Default to an invalid value */
501		.mirr_port = SJA1105_NUM_PORTS,
502		/* Link-local traffic received on casc_port will be forwarded
503		 * to host_port without embedding the source port and device ID
504		 * info in the destination MAC address (presumably because it
505		 * is a cascaded port and a downstream SJA switch already did
506		 * that). Default to an invalid port (to disable the feature)
507		 * and overwrite this if we find any DSA (cascaded) ports.
508		 */
509		.casc_port = SJA1105_NUM_PORTS,
510		/* No TTEthernet */
511		.vllupformat = SJA1105_VL_FORMAT_PSFP,
512		.vlmarker = 0,
513		.vlmask = 0,
514		/* Only update correctionField for 1-step PTP (L2 transport) */
515		.ignore2stf = 0,
516		/* Forcefully disable VLAN filtering by telling
517		 * the switch that VLAN has a different EtherType.
518		 */
519		.tpid = ETH_P_SJA1105,
520		.tpid2 = ETH_P_SJA1105,
521	};
522	struct sja1105_table *table;
523
524	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
525
526	if (table->entry_count) {
527		kfree(table->entries);
528		table->entry_count = 0;
529	}
530
531	table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
532				 table->ops->unpacked_entry_size, GFP_KERNEL);
533	if (!table->entries)
534		return -ENOMEM;
535
536	table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
537
538	/* This table only has a single entry */
539	((struct sja1105_general_params_entry *)table->entries)[0] =
540				default_general_params;
541
542	return 0;
543}
544
545static int sja1105_init_avb_params(struct sja1105_private *priv)
546{
547	struct sja1105_avb_params_entry *avb;
548	struct sja1105_table *table;
549
550	table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
551
552	/* Discard previous AVB Parameters Table */
553	if (table->entry_count) {
554		kfree(table->entries);
555		table->entry_count = 0;
556	}
557
558	table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
559				 table->ops->unpacked_entry_size, GFP_KERNEL);
560	if (!table->entries)
561		return -ENOMEM;
562
563	table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
564
565	avb = table->entries;
566
567	/* Configure the MAC addresses for meta frames */
568	avb->destmeta = SJA1105_META_DMAC;
569	avb->srcmeta  = SJA1105_META_SMAC;
570	/* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
571	 * default. This is because there might be boards with a hardware
572	 * layout where enabling the pin as output might cause an electrical
573	 * clash. On E/T the pin is always an output, which the board designers
574	 * probably already knew, so even if there are going to be electrical
575	 * issues, there's nothing we can do.
576	 */
577	avb->cas_master = false;
578
579	return 0;
580}
581
582/* The L2 policing table is 2-stage. The table is looked up for each frame
583 * according to the ingress port, whether it was broadcast or not, and the
584 * classified traffic class (given by VLAN PCP). This portion of the lookup is
585 * fixed, and gives access to the SHARINDX, an indirection register pointing
586 * within the policing table itself, which is used to resolve the policer that
587 * will be used for this frame.
588 *
589 *  Stage 1                              Stage 2
590 * +------------+--------+              +---------------------------------+
591 * |Port 0 TC 0 |SHARINDX|              | Policer 0: Rate, Burst, MTU     |
592 * +------------+--------+              +---------------------------------+
593 * |Port 0 TC 1 |SHARINDX|              | Policer 1: Rate, Burst, MTU     |
594 * +------------+--------+              +---------------------------------+
595 *    ...                               | Policer 2: Rate, Burst, MTU     |
596 * +------------+--------+              +---------------------------------+
597 * |Port 0 TC 7 |SHARINDX|              | Policer 3: Rate, Burst, MTU     |
598 * +------------+--------+              +---------------------------------+
599 * |Port 1 TC 0 |SHARINDX|              | Policer 4: Rate, Burst, MTU     |
600 * +------------+--------+              +---------------------------------+
601 *    ...                               | Policer 5: Rate, Burst, MTU     |
602 * +------------+--------+              +---------------------------------+
603 * |Port 1 TC 7 |SHARINDX|              | Policer 6: Rate, Burst, MTU     |
604 * +------------+--------+              +---------------------------------+
605 *    ...                               | Policer 7: Rate, Burst, MTU     |
606 * +------------+--------+              +---------------------------------+
607 * |Port 4 TC 7 |SHARINDX|                 ...
608 * +------------+--------+
609 * |Port 0 BCAST|SHARINDX|                 ...
610 * +------------+--------+
611 * |Port 1 BCAST|SHARINDX|                 ...
612 * +------------+--------+
613 *    ...                                  ...
614 * +------------+--------+              +---------------------------------+
615 * |Port 4 BCAST|SHARINDX|              | Policer 44: Rate, Burst, MTU    |
616 * +------------+--------+              +---------------------------------+
617 *
618 * In this driver, we shall use policers 0-4 as statically alocated port
619 * (matchall) policers. So we need to make the SHARINDX for all lookups
620 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
621 * lookup) equal.
622 * The remaining policers (40) shall be dynamically allocated for flower
623 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
624 */
625#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
626
627static int sja1105_init_l2_policing(struct sja1105_private *priv)
628{
629	struct sja1105_l2_policing_entry *policing;
630	struct sja1105_table *table;
631	int port, tc;
632
633	table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
634
635	/* Discard previous L2 Policing Table */
636	if (table->entry_count) {
637		kfree(table->entries);
638		table->entry_count = 0;
639	}
640
641	table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
642				 table->ops->unpacked_entry_size, GFP_KERNEL);
643	if (!table->entries)
644		return -ENOMEM;
645
646	table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
647
648	policing = table->entries;
649
650	/* Setup shared indices for the matchall policers */
651	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
652		int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
653
654		for (tc = 0; tc < SJA1105_NUM_TC; tc++)
655			policing[port * SJA1105_NUM_TC + tc].sharindx = port;
656
657		policing[bcast].sharindx = port;
658	}
659
660	/* Setup the matchall policer parameters */
661	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
662		int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
663
664		if (dsa_is_cpu_port(priv->ds, port))
665			mtu += VLAN_HLEN;
666
667		policing[port].smax = 65535; /* Burst size in bytes */
668		policing[port].rate = SJA1105_RATE_MBPS(1000);
669		policing[port].maxlen = mtu;
670		policing[port].partition = 0;
671	}
672
673	return 0;
674}
675
676static int sja1105_static_config_load(struct sja1105_private *priv,
677				      struct sja1105_dt_port *ports)
678{
679	int rc;
680
681	sja1105_static_config_free(&priv->static_config);
682	rc = sja1105_static_config_init(&priv->static_config,
683					priv->info->static_ops,
684					priv->info->device_id);
685	if (rc)
686		return rc;
687
688	/* Build static configuration */
689	rc = sja1105_init_mac_settings(priv);
690	if (rc < 0)
691		return rc;
692	rc = sja1105_init_mii_settings(priv, ports);
693	if (rc < 0)
694		return rc;
695	rc = sja1105_init_static_fdb(priv);
696	if (rc < 0)
697		return rc;
698	rc = sja1105_init_static_vlan(priv);
699	if (rc < 0)
700		return rc;
701	rc = sja1105_init_l2_lookup_params(priv);
702	if (rc < 0)
703		return rc;
704	rc = sja1105_init_l2_forwarding(priv);
705	if (rc < 0)
706		return rc;
707	rc = sja1105_init_l2_forwarding_params(priv);
708	if (rc < 0)
709		return rc;
710	rc = sja1105_init_l2_policing(priv);
711	if (rc < 0)
712		return rc;
713	rc = sja1105_init_general_params(priv);
714	if (rc < 0)
715		return rc;
716	rc = sja1105_init_avb_params(priv);
717	if (rc < 0)
718		return rc;
719
720	/* Send initial configuration to hardware via SPI */
721	return sja1105_static_config_upload(priv);
722}
723
724static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
725				      const struct sja1105_dt_port *ports)
726{
727	int i;
728
729	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
730		if (ports[i].role == XMII_MAC)
731			continue;
732
733		if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
734		    ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
735			priv->rgmii_rx_delay[i] = true;
736
737		if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
738		    ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
739			priv->rgmii_tx_delay[i] = true;
740
741		if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
742		     !priv->info->setup_rgmii_delay)
743			return -EINVAL;
744	}
745	return 0;
746}
747
748static int sja1105_parse_ports_node(struct sja1105_private *priv,
749				    struct sja1105_dt_port *ports,
750				    struct device_node *ports_node)
751{
752	struct device *dev = &priv->spidev->dev;
753	struct device_node *child;
754
755	for_each_available_child_of_node(ports_node, child) {
756		struct device_node *phy_node;
757		phy_interface_t phy_mode;
758		u32 index;
759		int err;
760
761		/* Get switch port number from DT */
762		if (of_property_read_u32(child, "reg", &index) < 0) {
763			dev_err(dev, "Port number not defined in device tree "
764				"(property \"reg\")\n");
765			of_node_put(child);
766			return -ENODEV;
767		}
768
769		/* Get PHY mode from DT */
770		err = of_get_phy_mode(child, &phy_mode);
771		if (err) {
772			dev_err(dev, "Failed to read phy-mode or "
773				"phy-interface-type property for port %d\n",
774				index);
775			of_node_put(child);
776			return -ENODEV;
777		}
778		ports[index].phy_mode = phy_mode;
779
780		phy_node = of_parse_phandle(child, "phy-handle", 0);
781		if (!phy_node) {
782			if (!of_phy_is_fixed_link(child)) {
783				dev_err(dev, "phy-handle or fixed-link "
784					"properties missing!\n");
785				of_node_put(child);
786				return -ENODEV;
787			}
788			/* phy-handle is missing, but fixed-link isn't.
789			 * So it's a fixed link. Default to PHY role.
790			 */
791			ports[index].role = XMII_PHY;
792		} else {
793			/* phy-handle present => put port in MAC role */
794			ports[index].role = XMII_MAC;
795			of_node_put(phy_node);
796		}
797
798		/* The MAC/PHY role can be overridden with explicit bindings */
799		if (of_property_read_bool(child, "sja1105,role-mac"))
800			ports[index].role = XMII_MAC;
801		else if (of_property_read_bool(child, "sja1105,role-phy"))
802			ports[index].role = XMII_PHY;
803	}
804
805	return 0;
806}
807
808static int sja1105_parse_dt(struct sja1105_private *priv,
809			    struct sja1105_dt_port *ports)
810{
811	struct device *dev = &priv->spidev->dev;
812	struct device_node *switch_node = dev->of_node;
813	struct device_node *ports_node;
814	int rc;
815
816	ports_node = of_get_child_by_name(switch_node, "ports");
817	if (!ports_node) {
818		dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
819		return -ENODEV;
820	}
821
822	rc = sja1105_parse_ports_node(priv, ports, ports_node);
823	of_node_put(ports_node);
824
825	return rc;
826}
827
828static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
829{
830	const struct sja1105_regs *regs = priv->info->regs;
831	u32 val;
832	int rc;
833
834	rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
835			      NULL);
836	if (rc < 0)
837		return rc;
838
839	return val;
840}
841
842static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
843			       u16 pcs_val)
844{
845	const struct sja1105_regs *regs = priv->info->regs;
846	u32 val = pcs_val;
847	int rc;
848
849	rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
850			      NULL);
851	if (rc < 0)
852		return rc;
853
854	return val;
855}
856
857static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
858				     bool an_enabled, bool an_master)
859{
860	u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
861
862	/* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
863	 * stop the clock during LPI mode, make the MAC reconfigure
864	 * autonomously after PCS autoneg is done, flush the internal FIFOs.
865	 */
866	sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
867					       SJA1105_DC1_CLOCK_STOP_EN |
868					       SJA1105_DC1_MAC_AUTO_SW |
869					       SJA1105_DC1_INIT);
870	/* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
871	sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
872	/* AUTONEG_CONTROL: Use SGMII autoneg */
873	if (an_master)
874		ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
875	sja1105_sgmii_write(priv, SJA1105_AC, ac);
876	/* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
877	 * sja1105_sgmii_pcs_force_speed must be called later for the link
878	 * to become operational.
879	 */
880	if (an_enabled)
881		sja1105_sgmii_write(priv, MII_BMCR,
882				    BMCR_ANENABLE | BMCR_ANRESTART);
883}
884
885static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
886					  int speed)
887{
888	int pcs_speed;
889
890	switch (speed) {
891	case SPEED_1000:
892		pcs_speed = BMCR_SPEED1000;
893		break;
894	case SPEED_100:
895		pcs_speed = BMCR_SPEED100;
896		break;
897	case SPEED_10:
898		pcs_speed = BMCR_SPEED10;
899		break;
900	default:
901		dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
902		return;
903	}
904	sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
905}
906
907/* Convert link speed from SJA1105 to ethtool encoding */
908static int sja1105_speed[] = {
909	[SJA1105_SPEED_AUTO]		= SPEED_UNKNOWN,
910	[SJA1105_SPEED_10MBPS]		= SPEED_10,
911	[SJA1105_SPEED_100MBPS]		= SPEED_100,
912	[SJA1105_SPEED_1000MBPS]	= SPEED_1000,
913};
914
915/* Set link speed in the MAC configuration for a specific port. */
916static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
917				      int speed_mbps)
918{
919	struct sja1105_xmii_params_entry *mii;
920	struct sja1105_mac_config_entry *mac;
921	struct device *dev = priv->ds->dev;
922	sja1105_phy_interface_t phy_mode;
923	sja1105_speed_t speed;
924	int rc;
925
926	/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
927	 * tables. On E/T, MAC reconfig tables are not readable, only writable.
928	 * We have to *know* what the MAC looks like.  For the sake of keeping
929	 * the code common, we'll use the static configuration tables as a
930	 * reasonable approximation for both E/T and P/Q/R/S.
931	 */
932	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
933	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
934
935	switch (speed_mbps) {
936	case SPEED_UNKNOWN:
937		/* PHYLINK called sja1105_mac_config() to inform us about
938		 * the state->interface, but AN has not completed and the
939		 * speed is not yet valid. UM10944.pdf says that setting
940		 * SJA1105_SPEED_AUTO at runtime disables the port, so that is
941		 * ok for power consumption in case AN will never complete -
942		 * otherwise PHYLINK should come back with a new update.
943		 */
944		speed = SJA1105_SPEED_AUTO;
945		break;
946	case SPEED_10:
947		speed = SJA1105_SPEED_10MBPS;
948		break;
949	case SPEED_100:
950		speed = SJA1105_SPEED_100MBPS;
951		break;
952	case SPEED_1000:
953		speed = SJA1105_SPEED_1000MBPS;
954		break;
955	default:
956		dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
957		return -EINVAL;
958	}
959
960	/* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
961	 * table, since this will be used for the clocking setup, and we no
962	 * longer need to store it in the static config (already told hardware
963	 * we want auto during upload phase).
964	 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
965	 * we need to configure the PCS only (if even that).
966	 */
967	if (sja1105_supports_sgmii(priv, port))
968		mac[port].speed = SJA1105_SPEED_1000MBPS;
969	else
970		mac[port].speed = speed;
971
972	/* Write to the dynamic reconfiguration tables */
973	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
974					  &mac[port], true);
975	if (rc < 0) {
976		dev_err(dev, "Failed to write MAC config: %d\n", rc);
977		return rc;
978	}
979
980	/* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
981	 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
982	 * RMII no change of the clock setup is required. Actually, changing
983	 * the clock setup does interrupt the clock signal for a certain time
984	 * which causes trouble for all PHYs relying on this signal.
985	 */
986	phy_mode = mii->xmii_mode[port];
987	if (phy_mode != XMII_MODE_RGMII)
988		return 0;
989
990	return sja1105_clocking_setup_port(priv, port);
991}
992
993/* The SJA1105 MAC programming model is through the static config (the xMII
994 * Mode table cannot be dynamically reconfigured), and we have to program
995 * that early (earlier than PHYLINK calls us, anyway).
996 * So just error out in case the connected PHY attempts to change the initial
997 * system interface MII protocol from what is defined in the DT, at least for
998 * now.
999 */
1000static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1001				      phy_interface_t interface)
1002{
1003	struct sja1105_xmii_params_entry *mii;
1004	sja1105_phy_interface_t phy_mode;
1005
1006	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1007	phy_mode = mii->xmii_mode[port];
1008
1009	switch (interface) {
1010	case PHY_INTERFACE_MODE_MII:
1011		return (phy_mode != XMII_MODE_MII);
1012	case PHY_INTERFACE_MODE_RMII:
1013		return (phy_mode != XMII_MODE_RMII);
1014	case PHY_INTERFACE_MODE_RGMII:
1015	case PHY_INTERFACE_MODE_RGMII_ID:
1016	case PHY_INTERFACE_MODE_RGMII_RXID:
1017	case PHY_INTERFACE_MODE_RGMII_TXID:
1018		return (phy_mode != XMII_MODE_RGMII);
1019	case PHY_INTERFACE_MODE_SGMII:
1020		return (phy_mode != XMII_MODE_SGMII);
1021	default:
1022		return true;
1023	}
1024}
1025
1026static void sja1105_mac_config(struct dsa_switch *ds, int port,
1027			       unsigned int mode,
1028			       const struct phylink_link_state *state)
1029{
1030	struct sja1105_private *priv = ds->priv;
1031	bool is_sgmii = sja1105_supports_sgmii(priv, port);
1032
1033	if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1034		dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1035			phy_modes(state->interface));
1036		return;
1037	}
1038
1039	if (phylink_autoneg_inband(mode) && !is_sgmii) {
1040		dev_err(ds->dev, "In-band AN not supported!\n");
1041		return;
1042	}
1043
1044	if (is_sgmii)
1045		sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1046					 false);
1047}
1048
1049static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1050				  unsigned int mode,
1051				  phy_interface_t interface)
1052{
1053	sja1105_inhibit_tx(ds->priv, BIT(port), true);
1054}
1055
1056static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1057				unsigned int mode,
1058				phy_interface_t interface,
1059				struct phy_device *phydev,
1060				int speed, int duplex,
1061				bool tx_pause, bool rx_pause)
1062{
1063	struct sja1105_private *priv = ds->priv;
1064
1065	sja1105_adjust_port_config(priv, port, speed);
1066
1067	if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1068		sja1105_sgmii_pcs_force_speed(priv, speed);
1069
1070	sja1105_inhibit_tx(priv, BIT(port), false);
1071}
1072
1073static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1074				     unsigned long *supported,
1075				     struct phylink_link_state *state)
1076{
1077	/* Construct a new mask which exhaustively contains all link features
1078	 * supported by the MAC, and then apply that (logical AND) to what will
1079	 * be sent to the PHY for "marketing".
1080	 */
1081	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1082	struct sja1105_private *priv = ds->priv;
1083	struct sja1105_xmii_params_entry *mii;
1084
1085	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1086
1087	/* include/linux/phylink.h says:
1088	 *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1089	 *     expects the MAC driver to return all supported link modes.
1090	 */
1091	if (state->interface != PHY_INTERFACE_MODE_NA &&
1092	    sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1093		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1094		return;
1095	}
1096
1097	/* The MAC does not support pause frames, and also doesn't
1098	 * support half-duplex traffic modes.
1099	 */
1100	phylink_set(mask, Autoneg);
1101	phylink_set(mask, MII);
1102	phylink_set(mask, 10baseT_Full);
1103	phylink_set(mask, 100baseT_Full);
1104	phylink_set(mask, 100baseT1_Full);
1105	if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1106	    mii->xmii_mode[port] == XMII_MODE_SGMII)
1107		phylink_set(mask, 1000baseT_Full);
1108
1109	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1110	bitmap_and(state->advertising, state->advertising, mask,
1111		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1112}
1113
1114static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1115				     struct phylink_link_state *state)
1116{
1117	struct sja1105_private *priv = ds->priv;
1118	int ais;
1119
1120	/* Read the vendor-specific AUTONEG_INTR_STATUS register */
1121	ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1122	if (ais < 0)
1123		return ais;
1124
1125	switch (SJA1105_AIS_SPEED(ais)) {
1126	case 0:
1127		state->speed = SPEED_10;
1128		break;
1129	case 1:
1130		state->speed = SPEED_100;
1131		break;
1132	case 2:
1133		state->speed = SPEED_1000;
1134		break;
1135	default:
1136		dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1137			SJA1105_AIS_SPEED(ais));
1138	}
1139	state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1140	state->an_complete = SJA1105_AIS_COMPLETE(ais);
1141	state->link = SJA1105_AIS_LINK_STATUS(ais);
1142
1143	return 0;
1144}
1145
1146static int
1147sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1148			      const struct sja1105_l2_lookup_entry *requested)
1149{
1150	struct sja1105_l2_lookup_entry *l2_lookup;
1151	struct sja1105_table *table;
1152	int i;
1153
1154	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1155	l2_lookup = table->entries;
1156
1157	for (i = 0; i < table->entry_count; i++)
1158		if (l2_lookup[i].macaddr == requested->macaddr &&
1159		    l2_lookup[i].vlanid == requested->vlanid &&
1160		    l2_lookup[i].destports & BIT(port))
1161			return i;
1162
1163	return -1;
1164}
1165
1166/* We want FDB entries added statically through the bridge command to persist
1167 * across switch resets, which are a common thing during normal SJA1105
1168 * operation. So we have to back them up in the static configuration tables
1169 * and hence apply them on next static config upload... yay!
1170 */
1171static int
1172sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1173			  const struct sja1105_l2_lookup_entry *requested,
1174			  bool keep)
1175{
1176	struct sja1105_l2_lookup_entry *l2_lookup;
1177	struct sja1105_table *table;
1178	int rc, match;
1179
1180	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1181
1182	match = sja1105_find_static_fdb_entry(priv, port, requested);
1183	if (match < 0) {
1184		/* Can't delete a missing entry. */
1185		if (!keep)
1186			return 0;
1187
1188		/* No match => new entry */
1189		rc = sja1105_table_resize(table, table->entry_count + 1);
1190		if (rc)
1191			return rc;
1192
1193		match = table->entry_count - 1;
1194	}
1195
1196	/* Assign pointer after the resize (it may be new memory) */
1197	l2_lookup = table->entries;
1198
1199	/* We have a match.
1200	 * If the job was to add this FDB entry, it's already done (mostly
1201	 * anyway, since the port forwarding mask may have changed, case in
1202	 * which we update it).
1203	 * Otherwise we have to delete it.
1204	 */
1205	if (keep) {
1206		l2_lookup[match] = *requested;
1207		return 0;
1208	}
1209
1210	/* To remove, the strategy is to overwrite the element with
1211	 * the last one, and then reduce the array size by 1
1212	 */
1213	l2_lookup[match] = l2_lookup[table->entry_count - 1];
1214	return sja1105_table_resize(table, table->entry_count - 1);
1215}
1216
1217/* First-generation switches have a 4-way set associative TCAM that
1218 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1219 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1220 * For the placement of a newly learnt FDB entry, the switch selects the bin
1221 * based on a hash function, and the way within that bin incrementally.
1222 */
1223static int sja1105et_fdb_index(int bin, int way)
1224{
1225	return bin * SJA1105ET_FDB_BIN_SIZE + way;
1226}
1227
1228static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1229					 const u8 *addr, u16 vid,
1230					 struct sja1105_l2_lookup_entry *match,
1231					 int *last_unused)
1232{
1233	int way;
1234
1235	for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1236		struct sja1105_l2_lookup_entry l2_lookup = {0};
1237		int index = sja1105et_fdb_index(bin, way);
1238
1239		/* Skip unused entries, optionally marking them
1240		 * into the return value
1241		 */
1242		if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1243						index, &l2_lookup)) {
1244			if (last_unused)
1245				*last_unused = way;
1246			continue;
1247		}
1248
1249		if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1250		    l2_lookup.vlanid == vid) {
1251			if (match)
1252				*match = l2_lookup;
1253			return way;
1254		}
1255	}
1256	/* Return an invalid entry index if not found */
1257	return -1;
1258}
1259
1260int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1261		      const unsigned char *addr, u16 vid)
1262{
1263	struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
1264	struct sja1105_private *priv = ds->priv;
1265	struct device *dev = ds->dev;
1266	int last_unused = -1;
1267	int start, end, i;
1268	int bin, way, rc;
1269
1270	bin = sja1105et_fdb_hash(priv, addr, vid);
1271
1272	way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1273					    &l2_lookup, &last_unused);
1274	if (way >= 0) {
1275		/* We have an FDB entry. Is our port in the destination
1276		 * mask? If yes, we need to do nothing. If not, we need
1277		 * to rewrite the entry by adding this port to it.
1278		 */
1279		if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
1280			return 0;
1281		l2_lookup.destports |= BIT(port);
1282	} else {
1283		int index = sja1105et_fdb_index(bin, way);
1284
1285		/* We don't have an FDB entry. We construct a new one and
1286		 * try to find a place for it within the FDB table.
1287		 */
1288		l2_lookup.macaddr = ether_addr_to_u64(addr);
1289		l2_lookup.destports = BIT(port);
1290		l2_lookup.vlanid = vid;
1291
1292		if (last_unused >= 0) {
1293			way = last_unused;
1294		} else {
1295			/* Bin is full, need to evict somebody.
1296			 * Choose victim at random. If you get these messages
1297			 * often, you may need to consider changing the
1298			 * distribution function:
1299			 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1300			 */
1301			get_random_bytes(&way, sizeof(u8));
1302			way %= SJA1105ET_FDB_BIN_SIZE;
1303			dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1304				 bin, addr, way);
1305			/* Evict entry */
1306			sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1307						     index, NULL, false);
1308		}
1309	}
1310	l2_lookup.lockeds = true;
1311	l2_lookup.index = sja1105et_fdb_index(bin, way);
1312
1313	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1314					  l2_lookup.index, &l2_lookup,
1315					  true);
1316	if (rc < 0)
1317		return rc;
1318
1319	/* Invalidate a dynamically learned entry if that exists */
1320	start = sja1105et_fdb_index(bin, 0);
1321	end = sja1105et_fdb_index(bin, way);
1322
1323	for (i = start; i < end; i++) {
1324		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1325						 i, &tmp);
1326		if (rc == -ENOENT)
1327			continue;
1328		if (rc)
1329			return rc;
1330
1331		if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
1332			continue;
1333
1334		rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1335						  i, NULL, false);
1336		if (rc)
1337			return rc;
1338
1339		break;
1340	}
1341
1342	return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1343}
1344
1345int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1346		      const unsigned char *addr, u16 vid)
1347{
1348	struct sja1105_l2_lookup_entry l2_lookup = {0};
1349	struct sja1105_private *priv = ds->priv;
1350	int index, bin, way, rc;
1351	bool keep;
1352
1353	bin = sja1105et_fdb_hash(priv, addr, vid);
1354	way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1355					    &l2_lookup, NULL);
1356	if (way < 0)
1357		return 0;
1358	index = sja1105et_fdb_index(bin, way);
1359
1360	/* We have an FDB entry. Is our port in the destination mask? If yes,
1361	 * we need to remove it. If the resulting port mask becomes empty, we
1362	 * need to completely evict the FDB entry.
1363	 * Otherwise we just write it back.
1364	 */
1365	l2_lookup.destports &= ~BIT(port);
1366
1367	if (l2_lookup.destports)
1368		keep = true;
1369	else
1370		keep = false;
1371
1372	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1373					  index, &l2_lookup, keep);
1374	if (rc < 0)
1375		return rc;
1376
1377	return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1378}
1379
1380int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1381			const unsigned char *addr, u16 vid)
1382{
1383	struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
1384	struct sja1105_private *priv = ds->priv;
1385	int rc, i;
1386
1387	/* Search for an existing entry in the FDB table */
1388	l2_lookup.macaddr = ether_addr_to_u64(addr);
1389	l2_lookup.vlanid = vid;
1390	l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1391	l2_lookup.mask_vlanid = VLAN_VID_MASK;
1392	l2_lookup.destports = BIT(port);
1393
1394	rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1395					 SJA1105_SEARCH, &l2_lookup);
1396	if (rc == 0) {
1397		/* Found a static entry and this port is already in the entry's
1398		 * port mask => job done
1399		 */
1400		if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
1401			return 0;
1402		/* l2_lookup.index is populated by the switch in case it
1403		 * found something.
1404		 */
1405		l2_lookup.destports |= BIT(port);
1406		goto skip_finding_an_index;
1407	}
1408
1409	/* Not found, so try to find an unused spot in the FDB.
1410	 * This is slightly inefficient because the strategy is knock-knock at
1411	 * every possible position from 0 to 1023.
1412	 */
1413	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1414		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1415						 i, NULL);
1416		if (rc < 0)
1417			break;
1418	}
1419	if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1420		dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1421		return -EINVAL;
1422	}
1423	l2_lookup.index = i;
1424
1425skip_finding_an_index:
1426	l2_lookup.lockeds = true;
1427
1428	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1429					  l2_lookup.index, &l2_lookup,
1430					  true);
1431	if (rc < 0)
1432		return rc;
1433
1434	/* The switch learns dynamic entries and looks up the FDB left to
1435	 * right. It is possible that our addition was concurrent with the
1436	 * dynamic learning of the same address, so now that the static entry
1437	 * has been installed, we are certain that address learning for this
1438	 * particular address has been turned off, so the dynamic entry either
1439	 * is in the FDB at an index smaller than the static one, or isn't (it
1440	 * can also be at a larger index, but in that case it is inactive
1441	 * because the static FDB entry will match first, and the dynamic one
1442	 * will eventually age out). Search for a dynamically learned address
1443	 * prior to our static one and invalidate it.
1444	 */
1445	tmp = l2_lookup;
1446
1447	rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1448					 SJA1105_SEARCH, &tmp);
1449	if (rc < 0) {
1450		dev_err(ds->dev,
1451			"port %d failed to read back entry for %pM vid %d: %pe\n",
1452			port, addr, vid, ERR_PTR(rc));
1453		return rc;
1454	}
1455
1456	if (tmp.index < l2_lookup.index) {
1457		rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1458						  tmp.index, NULL, false);
1459		if (rc < 0)
1460			return rc;
1461	}
1462
1463	return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1464}
1465
1466int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1467			const unsigned char *addr, u16 vid)
1468{
1469	struct sja1105_l2_lookup_entry l2_lookup = {0};
1470	struct sja1105_private *priv = ds->priv;
1471	bool keep;
1472	int rc;
1473
1474	l2_lookup.macaddr = ether_addr_to_u64(addr);
1475	l2_lookup.vlanid = vid;
1476	l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1477	l2_lookup.mask_vlanid = VLAN_VID_MASK;
1478	l2_lookup.destports = BIT(port);
1479
1480	rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1481					 SJA1105_SEARCH, &l2_lookup);
1482	if (rc < 0)
1483		return 0;
1484
1485	l2_lookup.destports &= ~BIT(port);
1486
1487	/* Decide whether we remove just this port from the FDB entry,
1488	 * or if we remove it completely.
1489	 */
1490	if (l2_lookup.destports)
1491		keep = true;
1492	else
1493		keep = false;
1494
1495	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1496					  l2_lookup.index, &l2_lookup, keep);
1497	if (rc < 0)
1498		return rc;
1499
1500	return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1501}
1502
1503static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1504			   const unsigned char *addr, u16 vid)
1505{
1506	struct sja1105_private *priv = ds->priv;
1507
1508	/* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1509	 * so the switch still does some VLAN processing internally.
1510	 * But Shared VLAN Learning (SVL) is also active, and it will take
1511	 * care of autonomous forwarding between the unique pvid's of each
1512	 * port.  Here we just make sure that users can't add duplicate FDB
1513	 * entries when in this mode - the actual VID doesn't matter except
1514	 * for what gets printed in 'bridge fdb show'.  In the case of zero,
1515	 * no VID gets printed at all.
1516	 */
1517	if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1518		vid = 0;
1519
1520	return priv->info->fdb_add_cmd(ds, port, addr, vid);
1521}
1522
1523static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1524			   const unsigned char *addr, u16 vid)
1525{
1526	struct sja1105_private *priv = ds->priv;
1527
1528	if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1529		vid = 0;
1530
1531	return priv->info->fdb_del_cmd(ds, port, addr, vid);
1532}
1533
1534static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1535			    dsa_fdb_dump_cb_t *cb, void *data)
1536{
1537	struct sja1105_private *priv = ds->priv;
1538	struct device *dev = ds->dev;
1539	int i;
1540
1541	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1542		struct sja1105_l2_lookup_entry l2_lookup = {0};
1543		u8 macaddr[ETH_ALEN];
1544		int rc;
1545
1546		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1547						 i, &l2_lookup);
1548		/* No fdb entry at i, not an issue */
1549		if (rc == -ENOENT)
1550			continue;
1551		if (rc) {
1552			dev_err(dev, "Failed to dump FDB: %d\n", rc);
1553			return rc;
1554		}
1555
1556		/* FDB dump callback is per port. This means we have to
1557		 * disregard a valid entry if it's not for this port, even if
1558		 * only to revisit it later. This is inefficient because the
1559		 * 1024-sized FDB table needs to be traversed 4 times through
1560		 * SPI during a 'bridge fdb show' command.
1561		 */
1562		if (!(l2_lookup.destports & BIT(port)))
1563			continue;
1564		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1565
1566		/* We need to hide the dsa_8021q VLANs from the user. */
1567		if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1568			l2_lookup.vlanid = 0;
1569		rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1570		if (rc)
1571			return rc;
1572	}
1573	return 0;
1574}
1575
1576/* This callback needs to be present */
1577static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1578			       const struct switchdev_obj_port_mdb *mdb)
1579{
1580	return 0;
1581}
1582
1583static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1584			    const struct switchdev_obj_port_mdb *mdb)
1585{
1586	sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1587}
1588
1589static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1590			   const struct switchdev_obj_port_mdb *mdb)
1591{
1592	return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1593}
1594
1595static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1596				 struct net_device *br, bool member)
1597{
1598	struct sja1105_l2_forwarding_entry *l2_fwd;
1599	struct sja1105_private *priv = ds->priv;
1600	int i, rc;
1601
1602	l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1603
1604	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1605		/* Add this port to the forwarding matrix of the
1606		 * other ports in the same bridge, and viceversa.
1607		 */
1608		if (!dsa_is_user_port(ds, i))
1609			continue;
1610		/* For the ports already under the bridge, only one thing needs
1611		 * to be done, and that is to add this port to their
1612		 * reachability domain. So we can perform the SPI write for
1613		 * them immediately. However, for this port itself (the one
1614		 * that is new to the bridge), we need to add all other ports
1615		 * to its reachability domain. So we do that incrementally in
1616		 * this loop, and perform the SPI write only at the end, once
1617		 * the domain contains all other bridge ports.
1618		 */
1619		if (i == port)
1620			continue;
1621		if (dsa_to_port(ds, i)->bridge_dev != br)
1622			continue;
1623		sja1105_port_allow_traffic(l2_fwd, i, port, member);
1624		sja1105_port_allow_traffic(l2_fwd, port, i, member);
1625
1626		rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1627						  i, &l2_fwd[i], true);
1628		if (rc < 0)
1629			return rc;
1630	}
1631
1632	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1633					    port, &l2_fwd[port], true);
1634}
1635
1636static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1637					 u8 state)
1638{
1639	struct sja1105_private *priv = ds->priv;
1640	struct sja1105_mac_config_entry *mac;
1641
1642	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1643
1644	switch (state) {
1645	case BR_STATE_DISABLED:
1646	case BR_STATE_BLOCKING:
1647		/* From UM10944 description of DRPDTAG (why put this there?):
1648		 * "Management traffic flows to the port regardless of the state
1649		 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1650		 * At the moment no difference between DISABLED and BLOCKING.
1651		 */
1652		mac[port].ingress   = false;
1653		mac[port].egress    = false;
1654		mac[port].dyn_learn = false;
1655		break;
1656	case BR_STATE_LISTENING:
1657		mac[port].ingress   = true;
1658		mac[port].egress    = false;
1659		mac[port].dyn_learn = false;
1660		break;
1661	case BR_STATE_LEARNING:
1662		mac[port].ingress   = true;
1663		mac[port].egress    = false;
1664		mac[port].dyn_learn = true;
1665		break;
1666	case BR_STATE_FORWARDING:
1667		mac[port].ingress   = true;
1668		mac[port].egress    = true;
1669		mac[port].dyn_learn = true;
1670		break;
1671	default:
1672		dev_err(ds->dev, "invalid STP state: %d\n", state);
1673		return;
1674	}
1675
1676	sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1677				     &mac[port], true);
1678}
1679
1680static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1681			       struct net_device *br)
1682{
1683	return sja1105_bridge_member(ds, port, br, true);
1684}
1685
1686static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1687				 struct net_device *br)
1688{
1689	sja1105_bridge_member(ds, port, br, false);
1690}
1691
1692#define BYTES_PER_KBIT (1000LL / 8)
1693
1694static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
1695				   int port, int prio)
1696{
1697	int i;
1698
1699	for (i = 0; i < priv->info->num_cbs_shapers; i++)
1700		if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
1701			return i;
1702
1703	return -1;
1704}
1705
1706static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1707{
1708	int i;
1709
1710	for (i = 0; i < priv->info->num_cbs_shapers; i++)
1711		if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1712			return i;
1713
1714	return -1;
1715}
1716
1717static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1718				     int prio)
1719{
1720	int i;
1721
1722	for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1723		struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1724
1725		if (cbs->port == port && cbs->prio == prio) {
1726			memset(cbs, 0, sizeof(*cbs));
1727			return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1728							    i, cbs, true);
1729		}
1730	}
1731
1732	return 0;
1733}
1734
1735static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1736				struct tc_cbs_qopt_offload *offload)
1737{
1738	struct sja1105_private *priv = ds->priv;
1739	struct sja1105_cbs_entry *cbs;
1740	s64 port_transmit_rate_kbps;
1741	int index;
1742
1743	if (!offload->enable)
1744		return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1745
1746	/* The user may be replacing an existing shaper */
1747	index = sja1105_find_cbs_shaper(priv, port, offload->queue);
1748	if (index < 0) {
1749		/* That isn't the case - see if we can allocate a new one */
1750		index = sja1105_find_unused_cbs_shaper(priv);
1751		if (index < 0)
1752			return -ENOSPC;
1753	}
1754
1755	cbs = &priv->cbs[index];
1756	cbs->port = port;
1757	cbs->prio = offload->queue;
1758	/* locredit and sendslope are negative by definition. In hardware,
1759	 * positive values must be provided, and the negative sign is implicit.
1760	 */
1761	cbs->credit_hi = offload->hicredit;
1762	cbs->credit_lo = abs(offload->locredit);
1763	/* User space is in kbits/sec, while the hardware in bytes/sec times
1764	 * link speed. Since the given offload->sendslope is good only for the
1765	 * current link speed anyway, and user space is likely to reprogram it
1766	 * when that changes, don't even bother to track the port's link speed,
1767	 * but deduce the port transmit rate from idleslope - sendslope.
1768	 */
1769	port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
1770	cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
1771				  port_transmit_rate_kbps);
1772	cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
1773				  port_transmit_rate_kbps);
1774	/* Convert the negative values from 64-bit 2's complement
1775	 * to 32-bit 2's complement (for the case of 0x80000000 whose
1776	 * negative is still negative).
1777	 */
1778	cbs->credit_lo &= GENMASK_ULL(31, 0);
1779	cbs->send_slope &= GENMASK_ULL(31, 0);
1780
1781	return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1782					    true);
1783}
1784
1785static int sja1105_reload_cbs(struct sja1105_private *priv)
1786{
1787	int rc = 0, i;
1788
1789	/* The credit based shapers are only allocated if
1790	 * CONFIG_NET_SCH_CBS is enabled.
1791	 */
1792	if (!priv->cbs)
1793		return 0;
1794
1795	for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1796		struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1797
1798		if (!cbs->idle_slope && !cbs->send_slope)
1799			continue;
1800
1801		rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1802						  true);
1803		if (rc)
1804			break;
1805	}
1806
1807	return rc;
1808}
1809
1810static const char * const sja1105_reset_reasons[] = {
1811	[SJA1105_VLAN_FILTERING] = "VLAN filtering",
1812	[SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1813	[SJA1105_AGEING_TIME] = "Ageing time",
1814	[SJA1105_SCHEDULING] = "Time-aware scheduling",
1815	[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1816	[SJA1105_VIRTUAL_LINKS] = "Virtual links",
1817};
1818
1819/* For situations where we need to change a setting at runtime that is only
1820 * available through the static configuration, resetting the switch in order
1821 * to upload the new static config is unavoidable. Back up the settings we
1822 * modify at runtime (currently only MAC) and restore them after uploading,
1823 * such that this operation is relatively seamless.
1824 */
1825int sja1105_static_config_reload(struct sja1105_private *priv,
1826				 enum sja1105_reset_reason reason)
1827{
1828	struct ptp_system_timestamp ptp_sts_before;
1829	struct ptp_system_timestamp ptp_sts_after;
1830	struct sja1105_mac_config_entry *mac;
1831	int speed_mbps[SJA1105_NUM_PORTS];
1832	struct dsa_switch *ds = priv->ds;
1833	s64 t1, t2, t3, t4;
1834	s64 t12, t34;
1835	u16 bmcr = 0;
1836	int rc, i;
1837	s64 now;
1838
1839	mutex_lock(&priv->mgmt_lock);
1840
1841	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1842
1843	/* Back up the dynamic link speed changed by sja1105_adjust_port_config
1844	 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1845	 * switch wants to see in the static config in order to allow us to
1846	 * change it through the dynamic interface later.
1847	 */
1848	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1849		speed_mbps[i] = sja1105_speed[mac[i].speed];
1850		mac[i].speed = SJA1105_SPEED_AUTO;
1851	}
1852
1853	if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1854		bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1855
1856	/* No PTP operations can run right now */
1857	mutex_lock(&priv->ptp_data.lock);
1858
1859	rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1860	if (rc < 0)
1861		goto out_unlock_ptp;
1862
1863	/* Reset switch and send updated static configuration */
1864	rc = sja1105_static_config_upload(priv);
1865	if (rc < 0)
1866		goto out_unlock_ptp;
1867
1868	rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1869	if (rc < 0)
1870		goto out_unlock_ptp;
1871
1872	t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1873	t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1874	t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1875	t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1876	/* Mid point, corresponds to pre-reset PTPCLKVAL */
1877	t12 = t1 + (t2 - t1) / 2;
1878	/* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1879	t34 = t3 + (t4 - t3) / 2;
1880	/* Advance PTPCLKVAL by the time it took since its readout */
1881	now += (t34 - t12);
1882
1883	__sja1105_ptp_adjtime(ds, now);
1884
1885out_unlock_ptp:
1886	mutex_unlock(&priv->ptp_data.lock);
1887
1888	dev_info(priv->ds->dev,
1889		 "Reset switch and programmed static config. Reason: %s\n",
1890		 sja1105_reset_reasons[reason]);
1891
1892	/* Configure the CGU (PLLs) for MII and RMII PHYs.
1893	 * For these interfaces there is no dynamic configuration
1894	 * needed, since PLLs have same settings at all speeds.
1895	 */
1896	rc = sja1105_clocking_setup(priv);
1897	if (rc < 0)
1898		goto out;
1899
1900	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1901		rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1902		if (rc < 0)
1903			goto out;
1904	}
1905
1906	if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1907		bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1908
1909		sja1105_sgmii_pcs_config(priv, an_enabled, false);
1910
1911		if (!an_enabled) {
1912			int speed = SPEED_UNKNOWN;
1913
1914			if (bmcr & BMCR_SPEED1000)
1915				speed = SPEED_1000;
1916			else if (bmcr & BMCR_SPEED100)
1917				speed = SPEED_100;
1918			else
1919				speed = SPEED_10;
1920
1921			sja1105_sgmii_pcs_force_speed(priv, speed);
1922		}
1923	}
1924
1925	rc = sja1105_reload_cbs(priv);
1926	if (rc < 0)
1927		goto out;
1928out:
1929	mutex_unlock(&priv->mgmt_lock);
1930
1931	return rc;
1932}
1933
1934static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1935{
1936	struct sja1105_mac_config_entry *mac;
1937
1938	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1939
1940	mac[port].vlanid = pvid;
1941
1942	return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1943					   &mac[port], true);
1944}
1945
1946static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1947					 int tree_index, int sw_index,
1948					 int other_port, struct net_device *br)
1949{
1950	struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1951	struct sja1105_private *other_priv = other_ds->priv;
1952	struct sja1105_private *priv = ds->priv;
1953	int port, rc;
1954
1955	if (other_ds->ops != &sja1105_switch_ops)
1956		return 0;
1957
1958	for (port = 0; port < ds->num_ports; port++) {
1959		if (!dsa_is_user_port(ds, port))
1960			continue;
1961		if (dsa_to_port(ds, port)->bridge_dev != br)
1962			continue;
1963
1964		rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1965						     port,
1966						     other_priv->dsa_8021q_ctx,
1967						     other_port);
1968		if (rc)
1969			return rc;
1970
1971		rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
1972						     other_port,
1973						     priv->dsa_8021q_ctx,
1974						     port);
1975		if (rc)
1976			return rc;
1977	}
1978
1979	return 0;
1980}
1981
1982static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1983					   int tree_index, int sw_index,
1984					   int other_port,
1985					   struct net_device *br)
1986{
1987	struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1988	struct sja1105_private *other_priv = other_ds->priv;
1989	struct sja1105_private *priv = ds->priv;
1990	int port;
1991
1992	if (other_ds->ops != &sja1105_switch_ops)
1993		return;
1994
1995	for (port = 0; port < ds->num_ports; port++) {
1996		if (!dsa_is_user_port(ds, port))
1997			continue;
1998		if (dsa_to_port(ds, port)->bridge_dev != br)
1999			continue;
2000
2001		dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
2002						 other_priv->dsa_8021q_ctx,
2003						 other_port);
2004
2005		dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
2006						 other_port,
2007						 priv->dsa_8021q_ctx, port);
2008	}
2009}
2010
2011static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
2012{
2013	struct sja1105_private *priv = ds->priv;
2014	int rc;
2015
2016	rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
2017	if (rc)
2018		return rc;
2019
2020	dev_info(ds->dev, "%s switch tagging\n",
2021		 enabled ? "Enabled" : "Disabled");
2022	return 0;
2023}
2024
2025static enum dsa_tag_protocol
2026sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
2027			 enum dsa_tag_protocol mp)
2028{
2029	return DSA_TAG_PROTO_SJA1105;
2030}
2031
2032static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
2033{
2034	int subvlan;
2035
2036	if (pvid)
2037		return 0;
2038
2039	for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2040		if (subvlan_map[subvlan] == VLAN_N_VID)
2041			return subvlan;
2042
2043	return -1;
2044}
2045
2046static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
2047{
2048	int subvlan;
2049
2050	for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2051		if (subvlan_map[subvlan] == vid)
2052			return subvlan;
2053
2054	return -1;
2055}
2056
2057static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2058					  int port, u16 vid)
2059{
2060	struct sja1105_port *sp = &priv->ports[port];
2061
2062	return sja1105_find_subvlan(sp->subvlan_map, vid);
2063}
2064
2065static void sja1105_init_subvlan_map(u16 *subvlan_map)
2066{
2067	int subvlan;
2068
2069	for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2070		subvlan_map[subvlan] = VLAN_N_VID;
2071}
2072
2073static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2074				       u16 *subvlan_map)
2075{
2076	struct sja1105_port *sp = &priv->ports[port];
2077	int subvlan;
2078
2079	for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2080		sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2081}
2082
2083static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2084{
2085	struct sja1105_vlan_lookup_entry *vlan;
2086	int count, i;
2087
2088	vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2089	count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2090
2091	for (i = 0; i < count; i++)
2092		if (vlan[i].vlanid == vid)
2093			return i;
2094
2095	/* Return an invalid entry index if not found */
2096	return -1;
2097}
2098
2099static int
2100sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2101			     int count, int from_port, u16 from_vid,
2102			     u16 to_vid)
2103{
2104	int i;
2105
2106	for (i = 0; i < count; i++)
2107		if (retagging[i].ing_port == BIT(from_port) &&
2108		    retagging[i].vlan_ing == from_vid &&
2109		    retagging[i].vlan_egr == to_vid)
2110			return i;
2111
2112	/* Return an invalid entry index if not found */
2113	return -1;
2114}
2115
2116static int sja1105_commit_vlans(struct sja1105_private *priv,
2117				struct sja1105_vlan_lookup_entry *new_vlan,
2118				struct sja1105_retagging_entry *new_retagging,
2119				int num_retagging)
2120{
2121	struct sja1105_retagging_entry *retagging;
2122	struct sja1105_vlan_lookup_entry *vlan;
2123	struct sja1105_table *table;
2124	int num_vlans = 0;
2125	int rc, i, k = 0;
2126
2127	/* VLAN table */
2128	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2129	vlan = table->entries;
2130
2131	for (i = 0; i < VLAN_N_VID; i++) {
2132		int match = sja1105_is_vlan_configured(priv, i);
2133
2134		if (new_vlan[i].vlanid != VLAN_N_VID)
2135			num_vlans++;
2136
2137		if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2138			/* Was there before, no longer is. Delete */
2139			dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2140			rc = sja1105_dynamic_config_write(priv,
2141							  BLK_IDX_VLAN_LOOKUP,
2142							  i, &vlan[match], false);
2143			if (rc < 0)
2144				return rc;
2145		} else if (new_vlan[i].vlanid != VLAN_N_VID) {
2146			/* Nothing changed, don't do anything */
2147			if (match >= 0 &&
2148			    vlan[match].vlanid == new_vlan[i].vlanid &&
2149			    vlan[match].tag_port == new_vlan[i].tag_port &&
2150			    vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2151			    vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2152				continue;
2153			/* Update entry */
2154			dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2155			rc = sja1105_dynamic_config_write(priv,
2156							  BLK_IDX_VLAN_LOOKUP,
2157							  i, &new_vlan[i],
2158							  true);
2159			if (rc < 0)
2160				return rc;
2161		}
2162	}
2163
2164	if (table->entry_count)
2165		kfree(table->entries);
2166
2167	table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2168				 GFP_KERNEL);
2169	if (!table->entries)
2170		return -ENOMEM;
2171
2172	table->entry_count = num_vlans;
2173	vlan = table->entries;
2174
2175	for (i = 0; i < VLAN_N_VID; i++) {
2176		if (new_vlan[i].vlanid == VLAN_N_VID)
2177			continue;
2178		vlan[k++] = new_vlan[i];
2179	}
2180
2181	/* VLAN Retagging Table */
2182	table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2183	retagging = table->entries;
2184
2185	for (i = 0; i < table->entry_count; i++) {
2186		rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2187						  i, &retagging[i], false);
2188		if (rc)
2189			return rc;
2190	}
2191
2192	if (table->entry_count)
2193		kfree(table->entries);
2194
2195	table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2196				 GFP_KERNEL);
2197	if (!table->entries)
2198		return -ENOMEM;
2199
2200	table->entry_count = num_retagging;
2201	retagging = table->entries;
2202
2203	for (i = 0; i < num_retagging; i++) {
2204		retagging[i] = new_retagging[i];
2205
2206		/* Update entry */
2207		rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2208						  i, &retagging[i], true);
2209		if (rc < 0)
2210			return rc;
2211	}
2212
2213	return 0;
2214}
2215
2216struct sja1105_crosschip_vlan {
2217	struct list_head list;
2218	u16 vid;
2219	bool untagged;
2220	int port;
2221	int other_port;
2222	struct dsa_8021q_context *other_ctx;
2223};
2224
2225struct sja1105_crosschip_switch {
2226	struct list_head list;
2227	struct dsa_8021q_context *other_ctx;
2228};
2229
2230static int sja1105_commit_pvid(struct sja1105_private *priv)
2231{
2232	struct sja1105_bridge_vlan *v;
2233	struct list_head *vlan_list;
2234	int rc = 0;
2235
2236	if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2237		vlan_list = &priv->bridge_vlans;
2238	else
2239		vlan_list = &priv->dsa_8021q_vlans;
2240
2241	list_for_each_entry(v, vlan_list, list) {
2242		if (v->pvid) {
2243			rc = sja1105_pvid_apply(priv, v->port, v->vid);
2244			if (rc)
2245				break;
2246		}
2247	}
2248
2249	return rc;
2250}
2251
2252static int
2253sja1105_build_bridge_vlans(struct sja1105_private *priv,
2254			   struct sja1105_vlan_lookup_entry *new_vlan)
2255{
2256	struct sja1105_bridge_vlan *v;
2257
2258	if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2259		return 0;
2260
2261	list_for_each_entry(v, &priv->bridge_vlans, list) {
2262		int match = v->vid;
2263
2264		new_vlan[match].vlanid = v->vid;
2265		new_vlan[match].vmemb_port |= BIT(v->port);
2266		new_vlan[match].vlan_bc |= BIT(v->port);
2267		if (!v->untagged)
2268			new_vlan[match].tag_port |= BIT(v->port);
2269	}
2270
2271	return 0;
2272}
2273
2274static int
2275sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2276			      struct sja1105_vlan_lookup_entry *new_vlan)
2277{
2278	struct sja1105_bridge_vlan *v;
2279
2280	if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2281		return 0;
2282
2283	list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2284		int match = v->vid;
2285
2286		new_vlan[match].vlanid = v->vid;
2287		new_vlan[match].vmemb_port |= BIT(v->port);
2288		new_vlan[match].vlan_bc |= BIT(v->port);
2289		if (!v->untagged)
2290			new_vlan[match].tag_port |= BIT(v->port);
2291	}
2292
2293	return 0;
2294}
2295
2296static int sja1105_build_subvlans(struct sja1105_private *priv,
2297				  u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2298				  struct sja1105_vlan_lookup_entry *new_vlan,
2299				  struct sja1105_retagging_entry *new_retagging,
2300				  int *num_retagging)
2301{
2302	struct sja1105_bridge_vlan *v;
2303	int k = *num_retagging;
2304
2305	if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2306		return 0;
2307
2308	list_for_each_entry(v, &priv->bridge_vlans, list) {
2309		int upstream = dsa_upstream_port(priv->ds, v->port);
2310		int match, subvlan;
2311		u16 rx_vid;
2312
2313		/* Only sub-VLANs on user ports need to be applied.
2314		 * Bridge VLANs also include VLANs added automatically
2315		 * by DSA on the CPU port.
2316		 */
2317		if (!dsa_is_user_port(priv->ds, v->port))
2318			continue;
2319
2320		subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2321					       v->vid);
2322		if (subvlan < 0) {
2323			subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2324							    v->pvid);
2325			if (subvlan < 0) {
2326				dev_err(priv->ds->dev, "No more free subvlans\n");
2327				return -ENOSPC;
2328			}
2329		}
2330
2331		rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2332
2333		/* @v->vid on @v->port needs to be retagged to @rx_vid
2334		 * on @upstream. Assume @v->vid on @v->port and on
2335		 * @upstream was already configured by the previous
2336		 * iteration over bridge_vlans.
2337		 */
2338		match = rx_vid;
2339		new_vlan[match].vlanid = rx_vid;
2340		new_vlan[match].vmemb_port |= BIT(v->port);
2341		new_vlan[match].vmemb_port |= BIT(upstream);
2342		new_vlan[match].vlan_bc |= BIT(v->port);
2343		new_vlan[match].vlan_bc |= BIT(upstream);
2344		/* The "untagged" flag is set the same as for the
2345		 * original VLAN
2346		 */
2347		if (!v->untagged)
2348			new_vlan[match].tag_port |= BIT(v->port);
2349		/* But it's always tagged towards the CPU */
2350		new_vlan[match].tag_port |= BIT(upstream);
2351
2352		/* The Retagging Table generates packet *clones* with
2353		 * the new VLAN. This is a very odd hardware quirk
2354		 * which we need to suppress by dropping the original
2355		 * packet.
2356		 * Deny egress of the original VLAN towards the CPU
2357		 * port. This will force the switch to drop it, and
2358		 * we'll see only the retagged packets.
2359		 */
2360		match = v->vid;
2361		new_vlan[match].vlan_bc &= ~BIT(upstream);
2362
2363		/* And the retagging itself */
2364		new_retagging[k].vlan_ing = v->vid;
2365		new_retagging[k].vlan_egr = rx_vid;
2366		new_retagging[k].ing_port = BIT(v->port);
2367		new_retagging[k].egr_port = BIT(upstream);
2368		if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2369			dev_err(priv->ds->dev, "No more retagging rules\n");
2370			return -ENOSPC;
2371		}
2372
2373		subvlan_map[v->port][subvlan] = v->vid;
2374	}
2375
2376	*num_retagging = k;
2377
2378	return 0;
2379}
2380
2381/* Sadly, in crosschip scenarios where the CPU port is also the link to another
2382 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2383 * the CPU port of neighbour switches.
2384 */
2385static int
2386sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2387				 struct sja1105_vlan_lookup_entry *new_vlan,
2388				 struct sja1105_retagging_entry *new_retagging,
2389				 int *num_retagging)
2390{
2391	struct sja1105_crosschip_vlan *tmp, *pos;
2392	struct dsa_8021q_crosschip_link *c;
2393	struct sja1105_bridge_vlan *v, *w;
2394	struct list_head crosschip_vlans;
2395	int k = *num_retagging;
2396	int rc = 0;
2397
2398	if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2399		return 0;
2400
2401	INIT_LIST_HEAD(&crosschip_vlans);
2402
2403	list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2404		struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2405
2406		if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2407			continue;
2408
2409		/* Crosschip links are also added to the CPU ports.
2410		 * Ignore those.
2411		 */
2412		if (!dsa_is_user_port(priv->ds, c->port))
2413			continue;
2414		if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2415			continue;
2416
2417		/* Search for VLANs on the remote port */
2418		list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2419			bool already_added = false;
2420			bool we_have_it = false;
2421
2422			if (v->port != c->other_port)
2423				continue;
2424
2425			/* If @v is a pvid on @other_ds, it does not need
2426			 * re-retagging, because its SVL field is 0 and we
2427			 * already allow that, via the dsa_8021q crosschip
2428			 * links.
2429			 */
2430			if (v->pvid)
2431				continue;
2432
2433			/* Search for the VLAN on our local port */
2434			list_for_each_entry(w, &priv->bridge_vlans, list) {
2435				if (w->port == c->port && w->vid == v->vid) {
2436					we_have_it = true;
2437					break;
2438				}
2439			}
2440
2441			if (!we_have_it)
2442				continue;
2443
2444			list_for_each_entry(tmp, &crosschip_vlans, list) {
2445				if (tmp->vid == v->vid &&
2446				    tmp->untagged == v->untagged &&
2447				    tmp->port == c->port &&
2448				    tmp->other_port == v->port &&
2449				    tmp->other_ctx == c->other_ctx) {
2450					already_added = true;
2451					break;
2452				}
2453			}
2454
2455			if (already_added)
2456				continue;
2457
2458			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2459			if (!tmp) {
2460				dev_err(priv->ds->dev, "Failed to allocate memory\n");
2461				rc = -ENOMEM;
2462				goto out;
2463			}
2464			tmp->vid = v->vid;
2465			tmp->port = c->port;
2466			tmp->other_port = v->port;
2467			tmp->other_ctx = c->other_ctx;
2468			tmp->untagged = v->untagged;
2469			list_add(&tmp->list, &crosschip_vlans);
2470		}
2471	}
2472
2473	list_for_each_entry(tmp, &crosschip_vlans, list) {
2474		struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2475		int upstream = dsa_upstream_port(priv->ds, tmp->port);
2476		int match, subvlan;
2477		u16 rx_vid;
2478
2479		subvlan = sja1105_find_committed_subvlan(other_priv,
2480							 tmp->other_port,
2481							 tmp->vid);
2482		/* If this happens, it's a bug. The neighbour switch does not
2483		 * have a subvlan for tmp->vid on tmp->other_port, but it
2484		 * should, since we already checked for its vlan_state.
2485		 */
2486		if (WARN_ON(subvlan < 0)) {
2487			rc = -EINVAL;
2488			goto out;
2489		}
2490
2491		rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2492						  tmp->other_port,
2493						  subvlan);
2494
2495		/* The @rx_vid retagged from @tmp->vid on
2496		 * {@tmp->other_ds, @tmp->other_port} needs to be
2497		 * re-retagged to @tmp->vid on the way back to us.
2498		 *
2499		 * Assume the original @tmp->vid is already configured
2500		 * on this local switch, otherwise we wouldn't be
2501		 * retagging its subvlan on the other switch in the
2502		 * first place. We just need to add a reverse retagging
2503		 * rule for @rx_vid and install @rx_vid on our ports.
2504		 */
2505		match = rx_vid;
2506		new_vlan[match].vlanid = rx_vid;
2507		new_vlan[match].vmemb_port |= BIT(tmp->port);
2508		new_vlan[match].vmemb_port |= BIT(upstream);
2509		/* The "untagged" flag is set the same as for the
2510		 * original VLAN. And towards the CPU, it doesn't
2511		 * really matter, because @rx_vid will only receive
2512		 * traffic on that port. For consistency with other dsa_8021q
2513		 * VLANs, we'll keep the CPU port tagged.
2514		 */
2515		if (!tmp->untagged)
2516			new_vlan[match].tag_port |= BIT(tmp->port);
2517		new_vlan[match].tag_port |= BIT(upstream);
2518		/* Deny egress of @rx_vid towards our front-panel port.
2519		 * This will force the switch to drop it, and we'll see
2520		 * only the re-retagged packets (having the original,
2521		 * pre-initial-retagging, VLAN @tmp->vid).
2522		 */
2523		new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2524
2525		/* On reverse retagging, the same ingress VLAN goes to multiple
2526		 * ports. So we have an opportunity to create composite rules
2527		 * to not waste the limited space in the retagging table.
2528		 */
2529		k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2530						 upstream, rx_vid, tmp->vid);
2531		if (k < 0) {
2532			if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2533				dev_err(priv->ds->dev, "No more retagging rules\n");
2534				rc = -ENOSPC;
2535				goto out;
2536			}
2537			k = (*num_retagging)++;
2538		}
2539		/* And the retagging itself */
2540		new_retagging[k].vlan_ing = rx_vid;
2541		new_retagging[k].vlan_egr = tmp->vid;
2542		new_retagging[k].ing_port = BIT(upstream);
2543		new_retagging[k].egr_port |= BIT(tmp->port);
2544	}
2545
2546out:
2547	list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2548		list_del(&tmp->list);
2549		kfree(tmp);
2550	}
2551
2552	return rc;
2553}
2554
2555static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2556
2557static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2558{
2559	struct sja1105_crosschip_switch *s, *pos;
2560	struct list_head crosschip_switches;
2561	struct dsa_8021q_crosschip_link *c;
2562	int rc = 0;
2563
2564	INIT_LIST_HEAD(&crosschip_switches);
2565
2566	list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2567		bool already_added = false;
2568
2569		list_for_each_entry(s, &crosschip_switches, list) {
2570			if (s->other_ctx == c->other_ctx) {
2571				already_added = true;
2572				break;
2573			}
2574		}
2575
2576		if (already_added)
2577			continue;
2578
2579		s = kzalloc(sizeof(*s), GFP_KERNEL);
2580		if (!s) {
2581			dev_err(priv->ds->dev, "Failed to allocate memory\n");
2582			rc = -ENOMEM;
2583			goto out;
2584		}
2585		s->other_ctx = c->other_ctx;
2586		list_add(&s->list, &crosschip_switches);
2587	}
2588
2589	list_for_each_entry(s, &crosschip_switches, list) {
2590		struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2591
2592		rc = sja1105_build_vlan_table(other_priv, false);
2593		if (rc)
2594			goto out;
2595	}
2596
2597out:
2598	list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2599		list_del(&s->list);
2600		kfree(s);
2601	}
2602
2603	return rc;
2604}
2605
2606static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2607{
2608	u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2609	struct sja1105_retagging_entry *new_retagging;
2610	struct sja1105_vlan_lookup_entry *new_vlan;
2611	struct sja1105_table *table;
2612	int i, num_retagging = 0;
2613	int rc;
2614
2615	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2616	new_vlan = kcalloc(VLAN_N_VID,
2617			   table->ops->unpacked_entry_size, GFP_KERNEL);
2618	if (!new_vlan)
2619		return -ENOMEM;
2620
2621	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2622	new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2623				table->ops->unpacked_entry_size, GFP_KERNEL);
2624	if (!new_retagging) {
2625		kfree(new_vlan);
2626		return -ENOMEM;
2627	}
2628
2629	for (i = 0; i < VLAN_N_VID; i++)
2630		new_vlan[i].vlanid = VLAN_N_VID;
2631
2632	for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2633		new_retagging[i].vlan_ing = VLAN_N_VID;
2634
2635	for (i = 0; i < priv->ds->num_ports; i++)
2636		sja1105_init_subvlan_map(subvlan_map[i]);
2637
2638	/* Bridge VLANs */
2639	rc = sja1105_build_bridge_vlans(priv, new_vlan);
2640	if (rc)
2641		goto out;
2642
2643	/* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2644	 * - RX VLANs
2645	 * - TX VLANs
2646	 * - Crosschip links
2647	 */
2648	rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2649	if (rc)
2650		goto out;
2651
2652	/* Private VLANs necessary for dsa_8021q operation, which we need to
2653	 * determine on our own:
2654	 * - Sub-VLANs
2655	 * - Sub-VLANs of crosschip switches
2656	 */
2657	rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2658				    &num_retagging);
2659	if (rc)
2660		goto out;
2661
2662	rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2663					      &num_retagging);
2664	if (rc)
2665		goto out;
2666
2667	rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2668	if (rc)
2669		goto out;
2670
2671	rc = sja1105_commit_pvid(priv);
2672	if (rc)
2673		goto out;
2674
2675	for (i = 0; i < priv->ds->num_ports; i++)
2676		sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2677
2678	if (notify) {
2679		rc = sja1105_notify_crosschip_switches(priv);
2680		if (rc)
2681			goto out;
2682	}
2683
2684out:
2685	kfree(new_vlan);
2686	kfree(new_retagging);
2687
2688	return rc;
2689}
2690
2691static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
2692				const struct switchdev_obj_port_vlan *vlan)
2693{
2694	struct sja1105_private *priv = ds->priv;
2695	u16 vid;
2696
2697	if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2698		return 0;
2699
2700	/* If the user wants best-effort VLAN filtering (aka vlan_filtering
2701	 * bridge plus tagging), be sure to at least deny alterations to the
2702	 * configuration done by dsa_8021q.
2703	 */
2704	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2705		if (vid_is_dsa_8021q(vid)) {
2706			dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
2707			return -EBUSY;
2708		}
2709	}
2710
2711	return 0;
2712}
2713
2714/* The TPID setting belongs to the General Parameters table,
2715 * which can only be partially reconfigured at runtime (and not the TPID).
2716 * So a switch reset is required.
2717 */
2718int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2719			   struct switchdev_trans *trans)
2720{
2721	struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2722	struct sja1105_general_params_entry *general_params;
2723	struct sja1105_private *priv = ds->priv;
2724	enum sja1105_vlan_state state;
2725	struct sja1105_table *table;
2726	struct sja1105_rule *rule;
2727	bool want_tagging;
2728	u16 tpid, tpid2;
2729	int rc;
2730
2731	if (switchdev_trans_ph_prepare(trans)) {
2732		list_for_each_entry(rule, &priv->flow_block.rules, list) {
2733			if (rule->type == SJA1105_RULE_VL) {
2734				dev_err(ds->dev,
2735					"Cannot change VLAN filtering with active VL rules\n");
2736				return -EBUSY;
2737			}
2738		}
2739
2740		return 0;
2741	}
2742
2743	if (enabled) {
2744		/* Enable VLAN filtering. */
2745		tpid  = ETH_P_8021Q;
2746		tpid2 = ETH_P_8021AD;
2747	} else {
2748		/* Disable VLAN filtering. */
2749		tpid  = ETH_P_SJA1105;
2750		tpid2 = ETH_P_SJA1105;
2751	}
2752
2753	for (port = 0; port < ds->num_ports; port++) {
2754		struct sja1105_port *sp = &priv->ports[port];
2755
2756		if (enabled)
2757			sp->xmit_tpid = priv->info->qinq_tpid;
2758		else
2759			sp->xmit_tpid = ETH_P_SJA1105;
2760	}
2761
2762	if (!enabled)
2763		state = SJA1105_VLAN_UNAWARE;
2764	else if (priv->best_effort_vlan_filtering)
2765		state = SJA1105_VLAN_BEST_EFFORT;
2766	else
2767		state = SJA1105_VLAN_FILTERING_FULL;
2768
2769	if (priv->vlan_state == state)
2770		return 0;
2771
2772	priv->vlan_state = state;
2773	want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2774			state == SJA1105_VLAN_BEST_EFFORT);
2775
2776	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2777	general_params = table->entries;
2778	/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2779	general_params->tpid = tpid;
2780	/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2781	general_params->tpid2 = tpid2;
2782	/* When VLAN filtering is on, we need to at least be able to
2783	 * decode management traffic through the "backup plan".
2784	 */
2785	general_params->incl_srcpt1 = enabled;
2786	general_params->incl_srcpt0 = enabled;
2787
2788	want_tagging = priv->best_effort_vlan_filtering || !enabled;
2789
2790	/* VLAN filtering => independent VLAN learning.
2791	 * No VLAN filtering (or best effort) => shared VLAN learning.
2792	 *
2793	 * In shared VLAN learning mode, untagged traffic still gets
2794	 * pvid-tagged, and the FDB table gets populated with entries
2795	 * containing the "real" (pvid or from VLAN tag) VLAN ID.
2796	 * However the switch performs a masked L2 lookup in the FDB,
2797	 * effectively only looking up a frame's DMAC (and not VID) for the
2798	 * forwarding decision.
2799	 *
2800	 * This is extremely convenient for us, because in modes with
2801	 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2802	 * each front panel port. This is good for identification but breaks
2803	 * learning badly - the VID of the learnt FDB entry is unique, aka
2804	 * no frames coming from any other port are going to have it. So
2805	 * for forwarding purposes, this is as though learning was broken
2806	 * (all frames get flooded).
2807	 */
2808	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2809	l2_lookup_params = table->entries;
2810	l2_lookup_params->shared_learn = want_tagging;
2811
2812	sja1105_frame_memory_partitioning(priv);
2813
2814	rc = sja1105_build_vlan_table(priv, false);
2815	if (rc)
2816		return rc;
2817
2818	rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2819	if (rc)
2820		dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
2821
2822	/* Switch port identification based on 802.1Q is only passable
2823	 * if we are not under a vlan_filtering bridge. So make sure
2824	 * the two configurations are mutually exclusive (of course, the
2825	 * user may know better, i.e. best_effort_vlan_filtering).
2826	 */
2827	return sja1105_setup_8021q_tagging(ds, want_tagging);
2828}
2829
2830/* Returns number of VLANs added (0 or 1) on success,
2831 * or a negative error code.
2832 */
2833static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2834				u16 flags, struct list_head *vlan_list)
2835{
2836	bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2837	bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2838	struct sja1105_bridge_vlan *v;
2839
2840	list_for_each_entry(v, vlan_list, list) {
2841		if (v->port == port && v->vid == vid) {
2842			/* Already added */
2843			if (v->untagged == untagged && v->pvid == pvid)
2844				/* Nothing changed */
2845				return 0;
2846
2847			/* It's the same VLAN, but some of the flags changed
2848			 * and the user did not bother to delete it first.
2849			 * Update it and trigger sja1105_build_vlan_table.
2850			 */
2851			v->untagged = untagged;
2852			v->pvid = pvid;
2853			return 1;
2854		}
2855	}
2856
2857	v = kzalloc(sizeof(*v), GFP_KERNEL);
2858	if (!v) {
2859		dev_err(ds->dev, "Out of memory while storing VLAN\n");
2860		return -ENOMEM;
2861	}
2862
2863	v->port = port;
2864	v->vid = vid;
2865	v->untagged = untagged;
2866	v->pvid = pvid;
2867	list_add(&v->list, vlan_list);
2868
2869	return 1;
2870}
2871
2872/* Returns number of VLANs deleted (0 or 1) */
2873static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2874				struct list_head *vlan_list)
2875{
2876	struct sja1105_bridge_vlan *v, *n;
2877
2878	list_for_each_entry_safe(v, n, vlan_list, list) {
2879		if (v->port == port && v->vid == vid) {
2880			list_del(&v->list);
2881			kfree(v);
2882			return 1;
2883		}
2884	}
2885
2886	return 0;
2887}
2888
2889static void sja1105_vlan_add(struct dsa_switch *ds, int port,
2890			     const struct switchdev_obj_port_vlan *vlan)
2891{
2892	struct sja1105_private *priv = ds->priv;
2893	bool vlan_table_changed = false;
2894	u16 vid;
2895	int rc;
2896
2897	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2898		rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
2899					  &priv->bridge_vlans);
2900		if (rc < 0)
2901			return;
2902		if (rc > 0)
2903			vlan_table_changed = true;
2904	}
2905
2906	if (!vlan_table_changed)
2907		return;
2908
2909	rc = sja1105_build_vlan_table(priv, true);
2910	if (rc)
2911		dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
2912}
2913
2914static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2915			    const struct switchdev_obj_port_vlan *vlan)
2916{
2917	struct sja1105_private *priv = ds->priv;
2918	bool vlan_table_changed = false;
2919	u16 vid;
2920	int rc;
2921
2922	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2923		rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
2924		if (rc > 0)
2925			vlan_table_changed = true;
2926	}
2927
2928	if (!vlan_table_changed)
2929		return 0;
2930
2931	return sja1105_build_vlan_table(priv, true);
2932}
2933
2934static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2935				      u16 flags)
2936{
2937	struct sja1105_private *priv = ds->priv;
2938	int rc;
2939
2940	rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2941	if (rc <= 0)
2942		return rc;
2943
2944	return sja1105_build_vlan_table(priv, true);
2945}
2946
2947static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2948{
2949	struct sja1105_private *priv = ds->priv;
2950	int rc;
2951
2952	rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2953	if (!rc)
2954		return 0;
2955
2956	return sja1105_build_vlan_table(priv, true);
2957}
2958
2959static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2960	.vlan_add	= sja1105_dsa_8021q_vlan_add,
2961	.vlan_del	= sja1105_dsa_8021q_vlan_del,
2962};
2963
2964/* The programming model for the SJA1105 switch is "all-at-once" via static
2965 * configuration tables. Some of these can be dynamically modified at runtime,
2966 * but not the xMII mode parameters table.
2967 * Furthermode, some PHYs may not have crystals for generating their clocks
2968 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2969 * ref_clk pin. So port clocking needs to be initialized early, before
2970 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2971 * Setting correct PHY link speed does not matter now.
2972 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2973 * bindings are not yet parsed by DSA core. We need to parse early so that we
2974 * can populate the xMII mode parameters table.
2975 */
2976static int sja1105_setup(struct dsa_switch *ds)
2977{
2978	struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2979	struct sja1105_private *priv = ds->priv;
2980	int rc;
2981
2982	rc = sja1105_parse_dt(priv, ports);
2983	if (rc < 0) {
2984		dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2985		return rc;
2986	}
2987
2988	/* Error out early if internal delays are required through DT
2989	 * and we can't apply them.
2990	 */
2991	rc = sja1105_parse_rgmii_delays(priv, ports);
2992	if (rc < 0) {
2993		dev_err(ds->dev, "RGMII delay not supported\n");
2994		return rc;
2995	}
2996
2997	rc = sja1105_ptp_clock_register(ds);
2998	if (rc < 0) {
2999		dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
3000		return rc;
3001	}
3002	/* Create and send configuration down to device */
3003	rc = sja1105_static_config_load(priv, ports);
3004	if (rc < 0) {
3005		dev_err(ds->dev, "Failed to load static config: %d\n", rc);
3006		goto out_ptp_clock_unregister;
3007	}
3008	/* Configure the CGU (PHY link modes and speeds) */
3009	rc = sja1105_clocking_setup(priv);
3010	if (rc < 0) {
3011		dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
3012		goto out_static_config_free;
3013	}
3014	/* On SJA1105, VLAN filtering per se is always enabled in hardware.
3015	 * The only thing we can do to disable it is lie about what the 802.1Q
3016	 * EtherType is.
3017	 * So it will still try to apply VLAN filtering, but all ingress
3018	 * traffic (except frames received with EtherType of ETH_P_SJA1105)
3019	 * will be internally tagged with a distorted VLAN header where the
3020	 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
3021	 */
3022	ds->vlan_filtering_is_global = true;
3023
3024	/* Advertise the 8 egress queues */
3025	ds->num_tx_queues = SJA1105_NUM_TC;
3026
3027	ds->mtu_enforcement_ingress = true;
3028
3029	ds->configure_vlan_while_not_filtering = true;
3030
3031	rc = sja1105_devlink_setup(ds);
3032	if (rc < 0)
3033		goto out_static_config_free;
3034
3035	/* The DSA/switchdev model brings up switch ports in standalone mode by
3036	 * default, and that means vlan_filtering is 0 since they're not under
3037	 * a bridge, so it's safe to set up switch tagging at this time.
3038	 */
3039	rtnl_lock();
3040	rc = sja1105_setup_8021q_tagging(ds, true);
3041	rtnl_unlock();
3042	if (rc)
3043		goto out_devlink_teardown;
3044
3045	return 0;
3046
3047out_devlink_teardown:
3048	sja1105_devlink_teardown(ds);
3049out_ptp_clock_unregister:
3050	sja1105_ptp_clock_unregister(ds);
3051out_static_config_free:
3052	sja1105_static_config_free(&priv->static_config);
3053
3054	return rc;
3055}
3056
3057static void sja1105_teardown(struct dsa_switch *ds)
3058{
3059	struct sja1105_private *priv = ds->priv;
3060	struct sja1105_bridge_vlan *v, *n;
3061	int port;
3062
3063	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3064		struct sja1105_port *sp = &priv->ports[port];
3065
3066		if (!dsa_is_user_port(ds, port))
3067			continue;
3068
3069		if (sp->xmit_worker)
3070			kthread_destroy_worker(sp->xmit_worker);
3071	}
3072
3073	sja1105_devlink_teardown(ds);
3074	sja1105_flower_teardown(ds);
3075	sja1105_tas_teardown(ds);
3076	sja1105_ptp_clock_unregister(ds);
3077	sja1105_static_config_free(&priv->static_config);
3078
3079	list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
3080		list_del(&v->list);
3081		kfree(v);
3082	}
3083
3084	list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
3085		list_del(&v->list);
3086		kfree(v);
3087	}
3088}
3089
3090static int sja1105_port_enable(struct dsa_switch *ds, int port,
3091			       struct phy_device *phy)
3092{
3093	struct net_device *slave;
3094
3095	if (!dsa_is_user_port(ds, port))
3096		return 0;
3097
3098	slave = dsa_to_port(ds, port)->slave;
3099
3100	slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3101
3102	return 0;
3103}
3104
3105static void sja1105_port_disable(struct dsa_switch *ds, int port)
3106{
3107	struct sja1105_private *priv = ds->priv;
3108	struct sja1105_port *sp = &priv->ports[port];
3109
3110	if (!dsa_is_user_port(ds, port))
3111		return;
3112
3113	kthread_cancel_work_sync(&sp->xmit_work);
3114	skb_queue_purge(&sp->xmit_queue);
3115}
3116
3117static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3118			     struct sk_buff *skb, bool takets)
3119{
3120	struct sja1105_mgmt_entry mgmt_route = {0};
3121	struct sja1105_private *priv = ds->priv;
3122	struct ethhdr *hdr;
3123	int timeout = 10;
3124	int rc;
3125
3126	hdr = eth_hdr(skb);
3127
3128	mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3129	mgmt_route.destports = BIT(port);
3130	mgmt_route.enfport = 1;
3131	mgmt_route.tsreg = 0;
3132	mgmt_route.takets = takets;
3133
3134	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3135					  slot, &mgmt_route, true);
3136	if (rc < 0) {
3137		kfree_skb(skb);
3138		return rc;
3139	}
3140
3141	/* Transfer skb to the host port. */
3142	dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3143
3144	/* Wait until the switch has processed the frame */
3145	do {
3146		rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3147						 slot, &mgmt_route);
3148		if (rc < 0) {
3149			dev_err_ratelimited(priv->ds->dev,
3150					    "failed to poll for mgmt route\n");
3151			continue;
3152		}
3153
3154		/* UM10944: The ENFPORT flag of the respective entry is
3155		 * cleared when a match is found. The host can use this
3156		 * flag as an acknowledgment.
3157		 */
3158		cpu_relax();
3159	} while (mgmt_route.enfport && --timeout);
3160
3161	if (!timeout) {
3162		/* Clean up the management route so that a follow-up
3163		 * frame may not match on it by mistake.
3164		 * This is only hardware supported on P/Q/R/S - on E/T it is
3165		 * a no-op and we are silently discarding the -EOPNOTSUPP.
3166		 */
3167		sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3168					     slot, &mgmt_route, false);
3169		dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3170	}
3171
3172	return NETDEV_TX_OK;
3173}
3174
3175#define work_to_port(work) \
3176		container_of((work), struct sja1105_port, xmit_work)
3177#define tagger_to_sja1105(t) \
3178		container_of((t), struct sja1105_private, tagger_data)
3179
3180/* Deferred work is unfortunately necessary because setting up the management
3181 * route cannot be done from atomit context (SPI transfer takes a sleepable
3182 * lock on the bus)
3183 */
3184static void sja1105_port_deferred_xmit(struct kthread_work *work)
3185{
3186	struct sja1105_port *sp = work_to_port(work);
3187	struct sja1105_tagger_data *tagger_data = sp->data;
3188	struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3189	int port = sp - priv->ports;
3190	struct sk_buff *skb;
3191
3192	while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3193		struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
3194
3195		mutex_lock(&priv->mgmt_lock);
3196
3197		sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3198
3199		/* The clone, if there, was made by dsa_skb_tx_timestamp */
3200		if (clone)
3201			sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3202
3203		mutex_unlock(&priv->mgmt_lock);
3204	}
3205}
3206
3207/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3208 * which cannot be reconfigured at runtime. So a switch reset is required.
3209 */
3210static int sja1105_set_ageing_time(struct dsa_switch *ds,
3211				   unsigned int ageing_time)
3212{
3213	struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3214	struct sja1105_private *priv = ds->priv;
3215	struct sja1105_table *table;
3216	unsigned int maxage;
3217
3218	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3219	l2_lookup_params = table->entries;
3220
3221	maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3222
3223	if (l2_lookup_params->maxage == maxage)
3224		return 0;
3225
3226	l2_lookup_params->maxage = maxage;
3227
3228	return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3229}
3230
3231static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3232{
3233	struct sja1105_l2_policing_entry *policing;
3234	struct sja1105_private *priv = ds->priv;
3235
3236	new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3237
3238	if (dsa_is_cpu_port(ds, port))
3239		new_mtu += VLAN_HLEN;
3240
3241	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3242
3243	if (policing[port].maxlen == new_mtu)
3244		return 0;
3245
3246	policing[port].maxlen = new_mtu;
3247
3248	return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3249}
3250
3251static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3252{
3253	return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3254}
3255
3256static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3257				 enum tc_setup_type type,
3258				 void *type_data)
3259{
3260	switch (type) {
3261	case TC_SETUP_QDISC_TAPRIO:
3262		return sja1105_setup_tc_taprio(ds, port, type_data);
3263	case TC_SETUP_QDISC_CBS:
3264		return sja1105_setup_tc_cbs(ds, port, type_data);
3265	default:
3266		return -EOPNOTSUPP;
3267	}
3268}
3269
3270/* We have a single mirror (@to) port, but can configure ingress and egress
3271 * mirroring on all other (@from) ports.
3272 * We need to allow mirroring rules only as long as the @to port is always the
3273 * same, and we need to unset the @to port from mirr_port only when there is no
3274 * mirroring rule that references it.
3275 */
3276static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3277				bool ingress, bool enabled)
3278{
3279	struct sja1105_general_params_entry *general_params;
3280	struct sja1105_mac_config_entry *mac;
3281	struct sja1105_table *table;
3282	bool already_enabled;
3283	u64 new_mirr_port;
3284	int rc;
3285
3286	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3287	general_params = table->entries;
3288
3289	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3290
3291	already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3292	if (already_enabled && enabled && general_params->mirr_port != to) {
3293		dev_err(priv->ds->dev,
3294			"Delete mirroring rules towards port %llu first\n",
3295			general_params->mirr_port);
3296		return -EBUSY;
3297	}
3298
3299	new_mirr_port = to;
3300	if (!enabled) {
3301		bool keep = false;
3302		int port;
3303
3304		/* Anybody still referencing mirr_port? */
3305		for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3306			if (mac[port].ing_mirr || mac[port].egr_mirr) {
3307				keep = true;
3308				break;
3309			}
3310		}
3311		/* Unset already_enabled for next time */
3312		if (!keep)
3313			new_mirr_port = SJA1105_NUM_PORTS;
3314	}
3315	if (new_mirr_port != general_params->mirr_port) {
3316		general_params->mirr_port = new_mirr_port;
3317
3318		rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3319						  0, general_params, true);
3320		if (rc < 0)
3321			return rc;
3322	}
3323
3324	if (ingress)
3325		mac[from].ing_mirr = enabled;
3326	else
3327		mac[from].egr_mirr = enabled;
3328
3329	return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3330					    &mac[from], true);
3331}
3332
3333static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3334			      struct dsa_mall_mirror_tc_entry *mirror,
3335			      bool ingress)
3336{
3337	return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3338				    ingress, true);
3339}
3340
3341static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3342			       struct dsa_mall_mirror_tc_entry *mirror)
3343{
3344	sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3345			     mirror->ingress, false);
3346}
3347
3348static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3349				    struct dsa_mall_policer_tc_entry *policer)
3350{
3351	struct sja1105_l2_policing_entry *policing;
3352	struct sja1105_private *priv = ds->priv;
3353
3354	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3355
3356	/* In hardware, every 8 microseconds the credit level is incremented by
3357	 * the value of RATE bytes divided by 64, up to a maximum of SMAX
3358	 * bytes.
3359	 */
3360	policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3361				      1000000);
3362	policing[port].smax = policer->burst;
3363
3364	return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3365}
3366
3367static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3368{
3369	struct sja1105_l2_policing_entry *policing;
3370	struct sja1105_private *priv = ds->priv;
3371
3372	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3373
3374	policing[port].rate = SJA1105_RATE_MBPS(1000);
3375	policing[port].smax = 65535;
3376
3377	sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3378}
3379
3380static const struct dsa_switch_ops sja1105_switch_ops = {
3381	.get_tag_protocol	= sja1105_get_tag_protocol,
3382	.setup			= sja1105_setup,
3383	.teardown		= sja1105_teardown,
3384	.set_ageing_time	= sja1105_set_ageing_time,
3385	.port_change_mtu	= sja1105_change_mtu,
3386	.port_max_mtu		= sja1105_get_max_mtu,
3387	.phylink_validate	= sja1105_phylink_validate,
3388	.phylink_mac_link_state	= sja1105_mac_pcs_get_state,
3389	.phylink_mac_config	= sja1105_mac_config,
3390	.phylink_mac_link_up	= sja1105_mac_link_up,
3391	.phylink_mac_link_down	= sja1105_mac_link_down,
3392	.get_strings		= sja1105_get_strings,
3393	.get_ethtool_stats	= sja1105_get_ethtool_stats,
3394	.get_sset_count		= sja1105_get_sset_count,
3395	.get_ts_info		= sja1105_get_ts_info,
3396	.port_enable		= sja1105_port_enable,
3397	.port_disable		= sja1105_port_disable,
3398	.port_fdb_dump		= sja1105_fdb_dump,
3399	.port_fdb_add		= sja1105_fdb_add,
3400	.port_fdb_del		= sja1105_fdb_del,
3401	.port_bridge_join	= sja1105_bridge_join,
3402	.port_bridge_leave	= sja1105_bridge_leave,
3403	.port_stp_state_set	= sja1105_bridge_stp_state_set,
3404	.port_vlan_prepare	= sja1105_vlan_prepare,
3405	.port_vlan_filtering	= sja1105_vlan_filtering,
3406	.port_vlan_add		= sja1105_vlan_add,
3407	.port_vlan_del		= sja1105_vlan_del,
3408	.port_mdb_prepare	= sja1105_mdb_prepare,
3409	.port_mdb_add		= sja1105_mdb_add,
3410	.port_mdb_del		= sja1105_mdb_del,
3411	.port_hwtstamp_get	= sja1105_hwtstamp_get,
3412	.port_hwtstamp_set	= sja1105_hwtstamp_set,
3413	.port_rxtstamp		= sja1105_port_rxtstamp,
3414	.port_txtstamp		= sja1105_port_txtstamp,
3415	.port_setup_tc		= sja1105_port_setup_tc,
3416	.port_mirror_add	= sja1105_mirror_add,
3417	.port_mirror_del	= sja1105_mirror_del,
3418	.port_policer_add	= sja1105_port_policer_add,
3419	.port_policer_del	= sja1105_port_policer_del,
3420	.cls_flower_add		= sja1105_cls_flower_add,
3421	.cls_flower_del		= sja1105_cls_flower_del,
3422	.cls_flower_stats	= sja1105_cls_flower_stats,
3423	.crosschip_bridge_join	= sja1105_crosschip_bridge_join,
3424	.crosschip_bridge_leave	= sja1105_crosschip_bridge_leave,
3425	.devlink_param_get	= sja1105_devlink_param_get,
3426	.devlink_param_set	= sja1105_devlink_param_set,
3427	.devlink_info_get	= sja1105_devlink_info_get,
3428};
3429
3430static const struct of_device_id sja1105_dt_ids[];
3431
3432static int sja1105_check_device_id(struct sja1105_private *priv)
3433{
3434	const struct sja1105_regs *regs = priv->info->regs;
3435	u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3436	struct device *dev = &priv->spidev->dev;
3437	const struct of_device_id *match;
3438	u32 device_id;
3439	u64 part_no;
3440	int rc;
3441
3442	rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3443			      NULL);
3444	if (rc < 0)
3445		return rc;
3446
3447	rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3448			      SJA1105_SIZE_DEVICE_ID);
3449	if (rc < 0)
3450		return rc;
3451
3452	sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3453
3454	for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3455		const struct sja1105_info *info = match->data;
3456
3457		/* Is what's been probed in our match table at all? */
3458		if (info->device_id != device_id || info->part_no != part_no)
3459			continue;
3460
3461		/* But is it what's in the device tree? */
3462		if (priv->info->device_id != device_id ||
3463		    priv->info->part_no != part_no) {
3464			dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3465				 priv->info->name, info->name);
3466			/* It isn't. No problem, pick that up. */
3467			priv->info = info;
3468		}
3469
3470		return 0;
3471	}
3472
3473	dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3474		device_id, part_no);
3475
3476	return -ENODEV;
3477}
3478
3479static int sja1105_probe(struct spi_device *spi)
3480{
3481	struct sja1105_tagger_data *tagger_data;
3482	struct device *dev = &spi->dev;
3483	struct sja1105_private *priv;
3484	struct dsa_switch *ds;
3485	int rc, port;
3486
3487	if (!dev->of_node) {
3488		dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3489		return -EINVAL;
3490	}
3491
3492	priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3493	if (!priv)
3494		return -ENOMEM;
3495
3496	/* Configure the optional reset pin and bring up switch */
3497	priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3498	if (IS_ERR(priv->reset_gpio))
3499		dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3500	else
3501		sja1105_hw_reset(priv->reset_gpio, 1, 1);
3502
3503	/* Populate our driver private structure (priv) based on
3504	 * the device tree node that was probed (spi)
3505	 */
3506	priv->spidev = spi;
3507	spi_set_drvdata(spi, priv);
3508
3509	/* Configure the SPI bus */
3510	spi->bits_per_word = 8;
3511	rc = spi_setup(spi);
3512	if (rc < 0) {
3513		dev_err(dev, "Could not init SPI\n");
3514		return rc;
3515	}
3516
3517	priv->info = of_device_get_match_data(dev);
3518
3519	/* Detect hardware device */
3520	rc = sja1105_check_device_id(priv);
3521	if (rc < 0) {
3522		dev_err(dev, "Device ID check failed: %d\n", rc);
3523		return rc;
3524	}
3525
3526	dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3527
3528	ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3529	if (!ds)
3530		return -ENOMEM;
3531
3532	ds->dev = dev;
3533	ds->num_ports = SJA1105_NUM_PORTS;
3534	ds->ops = &sja1105_switch_ops;
3535	ds->priv = priv;
3536	priv->ds = ds;
3537
3538	tagger_data = &priv->tagger_data;
3539
3540	mutex_init(&priv->ptp_data.lock);
3541	mutex_init(&priv->mgmt_lock);
3542
3543	priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3544					   GFP_KERNEL);
3545	if (!priv->dsa_8021q_ctx)
3546		return -ENOMEM;
3547
3548	priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3549	priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3550	priv->dsa_8021q_ctx->ds = ds;
3551
3552	INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3553	INIT_LIST_HEAD(&priv->bridge_vlans);
3554	INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3555
3556	sja1105_tas_setup(ds);
3557	sja1105_flower_setup(ds);
3558
3559	rc = dsa_register_switch(priv->ds);
3560	if (rc)
3561		return rc;
3562
3563	if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3564		priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3565					 sizeof(struct sja1105_cbs_entry),
3566					 GFP_KERNEL);
3567		if (!priv->cbs) {
3568			rc = -ENOMEM;
3569			goto out_unregister_switch;
3570		}
3571	}
3572
3573	/* Connections between dsa_port and sja1105_port */
3574	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3575		struct sja1105_port *sp = &priv->ports[port];
3576		struct dsa_port *dp = dsa_to_port(ds, port);
3577		struct net_device *slave;
3578		int subvlan;
3579
3580		if (!dsa_is_user_port(ds, port))
3581			continue;
3582
3583		dp->priv = sp;
3584		sp->dp = dp;
3585		sp->data = tagger_data;
3586		slave = dp->slave;
3587		kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3588		sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3589							slave->name);
3590		if (IS_ERR(sp->xmit_worker)) {
3591			rc = PTR_ERR(sp->xmit_worker);
3592			dev_err(ds->dev,
3593				"failed to create deferred xmit thread: %d\n",
3594				rc);
3595			goto out_destroy_workers;
3596		}
3597		skb_queue_head_init(&sp->xmit_queue);
3598		sp->xmit_tpid = ETH_P_SJA1105;
3599
3600		for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3601			sp->subvlan_map[subvlan] = VLAN_N_VID;
3602	}
3603
3604	return 0;
3605
3606out_destroy_workers:
3607	while (port-- > 0) {
3608		struct sja1105_port *sp = &priv->ports[port];
3609
3610		if (!dsa_is_user_port(ds, port))
3611			continue;
3612
3613		kthread_destroy_worker(sp->xmit_worker);
3614	}
3615
3616out_unregister_switch:
3617	dsa_unregister_switch(ds);
3618
3619	return rc;
3620}
3621
3622static int sja1105_remove(struct spi_device *spi)
3623{
3624	struct sja1105_private *priv = spi_get_drvdata(spi);
3625
3626	dsa_unregister_switch(priv->ds);
3627	return 0;
3628}
3629
3630static const struct of_device_id sja1105_dt_ids[] = {
3631	{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3632	{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3633	{ .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3634	{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3635	{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3636	{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3637	{ /* sentinel */ },
3638};
3639MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3640
3641static struct spi_driver sja1105_driver = {
3642	.driver = {
3643		.name  = "sja1105",
3644		.owner = THIS_MODULE,
3645		.of_match_table = of_match_ptr(sja1105_dt_ids),
3646	},
3647	.probe  = sja1105_probe,
3648	.remove = sja1105_remove,
3649};
3650
3651module_spi_driver(sja1105_driver);
3652
3653MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3654MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3655MODULE_DESCRIPTION("SJA1105 Driver");
3656MODULE_LICENSE("GPL v2");
3657