1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Broadcom Starfighter 2 DSA switch driver
4 *
5 * Copyright (C) 2014, Broadcom Corporation
6 */
7
8#include <linux/list.h>
9#include <linux/module.h>
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/phy.h>
14#include <linux/phy_fixed.h>
15#include <linux/phylink.h>
16#include <linux/mii.h>
17#include <linux/clk.h>
18#include <linux/of.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <net/dsa.h>
24#include <linux/ethtool.h>
25#include <linux/if_bridge.h>
26#include <linux/brcmphy.h>
27#include <linux/etherdevice.h>
28#include <linux/platform_data/b53.h>
29
30#include "bcm_sf2.h"
31#include "bcm_sf2_regs.h"
32#include "b53/b53_priv.h"
33#include "b53/b53_regs.h"
34
35/* Return the number of active ports, not counting the IMP (CPU) port */
36static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
37{
38	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
39	unsigned int port, count = 0;
40
41	for (port = 0; port < ds->num_ports; port++) {
42		if (dsa_is_cpu_port(ds, port))
43			continue;
44		if (priv->port_sts[port].enabled)
45			count++;
46	}
47
48	return count;
49}
50
51static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
52{
53	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
54	unsigned long new_rate;
55	unsigned int ports_active;
56	/* Frequenty in Mhz */
57	static const unsigned long rate_table[] = {
58		59220000,
59		60820000,
60		62500000,
61		62500000,
62	};
63
64	ports_active = bcm_sf2_num_active_ports(ds);
65	if (ports_active == 0 || !priv->clk_mdiv)
66		return;
67
68	/* If we overflow our table, just use the recommended operational
69	 * frequency
70	 */
71	if (ports_active > ARRAY_SIZE(rate_table))
72		new_rate = 90000000;
73	else
74		new_rate = rate_table[ports_active - 1];
75	clk_set_rate(priv->clk_mdiv, new_rate);
76}
77
78static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
79{
80	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
81	unsigned int i;
82	u32 reg, offset;
83
84	/* Enable the port memories */
85	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
86	reg &= ~P_TXQ_PSM_VDD(port);
87	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
88
89	/* Enable forwarding */
90	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
91
92	/* Enable IMP port in dumb mode */
93	reg = core_readl(priv, CORE_SWITCH_CTRL);
94	reg |= MII_DUMB_FWDG_EN;
95	core_writel(priv, reg, CORE_SWITCH_CTRL);
96
97	/* Configure Traffic Class to QoS mapping, allow each priority to map
98	 * to a different queue number
99	 */
100	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
101	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
102		reg |= i << (PRT_TO_QID_SHIFT * i);
103	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
104
105	b53_brcm_hdr_setup(ds, port);
106
107	if (port == 8) {
108		if (priv->type == BCM7445_DEVICE_ID)
109			offset = CORE_STS_OVERRIDE_IMP;
110		else
111			offset = CORE_STS_OVERRIDE_IMP2;
112
113		/* Force link status for IMP port */
114		reg = core_readl(priv, offset);
115		reg |= (MII_SW_OR | LINK_STS);
116		reg &= ~GMII_SPEED_UP_2G;
117		core_writel(priv, reg, offset);
118
119		/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
120		reg = core_readl(priv, CORE_IMP_CTL);
121		reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
122		reg &= ~(RX_DIS | TX_DIS);
123		core_writel(priv, reg, CORE_IMP_CTL);
124	} else {
125		reg = core_readl(priv, CORE_G_PCTL_PORT(port));
126		reg &= ~(RX_DIS | TX_DIS);
127		core_writel(priv, reg, CORE_G_PCTL_PORT(port));
128	}
129
130	priv->port_sts[port].enabled = true;
131}
132
133static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
134{
135	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
136	u32 reg;
137
138	reg = reg_readl(priv, REG_SPHY_CNTRL);
139	if (enable) {
140		reg |= PHY_RESET;
141		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
142		reg_writel(priv, reg, REG_SPHY_CNTRL);
143		udelay(21);
144		reg = reg_readl(priv, REG_SPHY_CNTRL);
145		reg &= ~PHY_RESET;
146	} else {
147		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
148		reg_writel(priv, reg, REG_SPHY_CNTRL);
149		mdelay(1);
150		reg |= CK25_DIS;
151	}
152	reg_writel(priv, reg, REG_SPHY_CNTRL);
153
154	/* Use PHY-driven LED signaling */
155	if (!enable) {
156		reg = reg_readl(priv, REG_LED_CNTRL(0));
157		reg |= SPDLNK_SRC_SEL;
158		reg_writel(priv, reg, REG_LED_CNTRL(0));
159	}
160}
161
162static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
163					    int port)
164{
165	unsigned int off;
166
167	switch (port) {
168	case 7:
169		off = P7_IRQ_OFF;
170		break;
171	case 0:
172		/* Port 0 interrupts are located on the first bank */
173		intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
174		return;
175	default:
176		off = P_IRQ_OFF(port);
177		break;
178	}
179
180	intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
181}
182
183static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
184					     int port)
185{
186	unsigned int off;
187
188	switch (port) {
189	case 7:
190		off = P7_IRQ_OFF;
191		break;
192	case 0:
193		/* Port 0 interrupts are located on the first bank */
194		intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
195		intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
196		return;
197	default:
198		off = P_IRQ_OFF(port);
199		break;
200	}
201
202	intrl2_1_mask_set(priv, P_IRQ_MASK(off));
203	intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
204}
205
206static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
207			      struct phy_device *phy)
208{
209	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
210	unsigned int i;
211	u32 reg;
212
213	if (!dsa_is_user_port(ds, port))
214		return 0;
215
216	priv->port_sts[port].enabled = true;
217
218	bcm_sf2_recalc_clock(ds);
219
220	/* Clear the memory power down */
221	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
222	reg &= ~P_TXQ_PSM_VDD(port);
223	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
224
225	/* Enable Broadcom tags for that port if requested */
226	if (priv->brcm_tag_mask & BIT(port))
227		b53_brcm_hdr_setup(ds, port);
228
229	/* Configure Traffic Class to QoS mapping, allow each priority to map
230	 * to a different queue number
231	 */
232	reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
233	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
234		reg |= i << (PRT_TO_QID_SHIFT * i);
235	core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
236
237	/* Re-enable the GPHY and re-apply workarounds */
238	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
239		bcm_sf2_gphy_enable_set(ds, true);
240		if (phy) {
241			/* if phy_stop() has been called before, phy
242			 * will be in halted state, and phy_start()
243			 * will call resume.
244			 *
245			 * the resume path does not configure back
246			 * autoneg settings, and since we hard reset
247			 * the phy manually here, we need to reset the
248			 * state machine also.
249			 */
250			phy->state = PHY_READY;
251			phy_init_hw(phy);
252		}
253	}
254
255	/* Enable MoCA port interrupts to get notified */
256	if (port == priv->moca_port)
257		bcm_sf2_port_intr_enable(priv, port);
258
259	/* Set per-queue pause threshold to 32 */
260	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
261
262	/* Set ACB threshold to 24 */
263	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
264		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
265						    SF2_NUM_EGRESS_QUEUES + i));
266		reg &= ~XOFF_THRESHOLD_MASK;
267		reg |= 24;
268		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
269						    SF2_NUM_EGRESS_QUEUES + i));
270	}
271
272	return b53_enable_port(ds, port, phy);
273}
274
275static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
276{
277	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
278	u32 reg;
279
280	/* Disable learning while in WoL mode */
281	if (priv->wol_ports_mask & (1 << port)) {
282		reg = core_readl(priv, CORE_DIS_LEARN);
283		reg |= BIT(port);
284		core_writel(priv, reg, CORE_DIS_LEARN);
285		return;
286	}
287
288	if (port == priv->moca_port)
289		bcm_sf2_port_intr_disable(priv, port);
290
291	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
292		bcm_sf2_gphy_enable_set(ds, false);
293
294	b53_disable_port(ds, port);
295
296	/* Power down the port memory */
297	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
298	reg |= P_TXQ_PSM_VDD(port);
299	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
300
301	priv->port_sts[port].enabled = false;
302
303	bcm_sf2_recalc_clock(ds);
304}
305
306
307static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
308			       int regnum, u16 val)
309{
310	int ret = 0;
311	u32 reg;
312
313	reg = reg_readl(priv, REG_SWITCH_CNTRL);
314	reg |= MDIO_MASTER_SEL;
315	reg_writel(priv, reg, REG_SWITCH_CNTRL);
316
317	/* Page << 8 | offset */
318	reg = 0x70;
319	reg <<= 2;
320	core_writel(priv, addr, reg);
321
322	/* Page << 8 | offset */
323	reg = 0x80 << 8 | regnum << 1;
324	reg <<= 2;
325
326	if (op)
327		ret = core_readl(priv, reg);
328	else
329		core_writel(priv, val, reg);
330
331	reg = reg_readl(priv, REG_SWITCH_CNTRL);
332	reg &= ~MDIO_MASTER_SEL;
333	reg_writel(priv, reg, REG_SWITCH_CNTRL);
334
335	return ret & 0xffff;
336}
337
338static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
339{
340	struct bcm_sf2_priv *priv = bus->priv;
341
342	/* Intercept reads from Broadcom pseudo-PHY address, else, send
343	 * them to our master MDIO bus controller
344	 */
345	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
346		return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
347	else
348		return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
349}
350
351static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
352				 u16 val)
353{
354	struct bcm_sf2_priv *priv = bus->priv;
355
356	/* Intercept writes to the Broadcom pseudo-PHY address, else,
357	 * send them to our master MDIO bus controller
358	 */
359	if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
360		return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
361	else
362		return mdiobus_write_nested(priv->master_mii_bus, addr,
363				regnum, val);
364}
365
366static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
367{
368	struct dsa_switch *ds = dev_id;
369	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
370
371	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
372				~priv->irq0_mask;
373	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
374
375	return IRQ_HANDLED;
376}
377
378static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
379{
380	struct dsa_switch *ds = dev_id;
381	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
382
383	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
384				~priv->irq1_mask;
385	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
386
387	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
388		priv->port_sts[7].link = true;
389		dsa_port_phylink_mac_change(ds, 7, true);
390	}
391	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
392		priv->port_sts[7].link = false;
393		dsa_port_phylink_mac_change(ds, 7, false);
394	}
395
396	return IRQ_HANDLED;
397}
398
399static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
400{
401	unsigned int timeout = 1000;
402	u32 reg;
403	int ret;
404
405	/* The watchdog reset does not work on 7278, we need to hit the
406	 * "external" reset line through the reset controller.
407	 */
408	if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) {
409		ret = reset_control_assert(priv->rcdev);
410		if (ret)
411			return ret;
412
413		return reset_control_deassert(priv->rcdev);
414	}
415
416	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
417	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
418	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
419
420	do {
421		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
422		if (!(reg & SOFTWARE_RESET))
423			break;
424
425		usleep_range(1000, 2000);
426	} while (timeout-- > 0);
427
428	if (timeout == 0)
429		return -ETIMEDOUT;
430
431	return 0;
432}
433
434static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
435{
436	intrl2_0_mask_set(priv, 0xffffffff);
437	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
438	intrl2_1_mask_set(priv, 0xffffffff);
439	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
440}
441
442static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
443				   struct device_node *dn)
444{
445	struct device_node *port;
446	unsigned int port_num;
447	struct property *prop;
448	phy_interface_t mode;
449	int err;
450
451	priv->moca_port = -1;
452
453	for_each_available_child_of_node(dn, port) {
454		if (of_property_read_u32(port, "reg", &port_num))
455			continue;
456
457		/* Internal PHYs get assigned a specific 'phy-mode' property
458		 * value: "internal" to help flag them before MDIO probing
459		 * has completed, since they might be turned off at that
460		 * time
461		 */
462		err = of_get_phy_mode(port, &mode);
463		if (err)
464			continue;
465
466		if (mode == PHY_INTERFACE_MODE_INTERNAL)
467			priv->int_phy_mask |= 1 << port_num;
468
469		if (mode == PHY_INTERFACE_MODE_MOCA)
470			priv->moca_port = port_num;
471
472		if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
473			priv->brcm_tag_mask |= 1 << port_num;
474
475		/* Ensure that port 5 is not picked up as a DSA CPU port
476		 * flavour but a regular port instead. We should be using
477		 * devlink to be able to set the port flavour.
478		 */
479		if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
480			prop = of_find_property(port, "ethernet", NULL);
481			if (prop)
482				of_remove_property(port, prop);
483		}
484	}
485}
486
487static int bcm_sf2_mdio_register(struct dsa_switch *ds)
488{
489	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
490	struct device_node *dn, *child;
491	struct phy_device *phydev;
492	struct property *prop;
493	static int index;
494	int err, reg;
495
496	/* Find our integrated MDIO bus node */
497	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
498	priv->master_mii_bus = of_mdio_find_bus(dn);
499	if (!priv->master_mii_bus) {
500		err = -EPROBE_DEFER;
501		goto err_of_node_put;
502	}
503
504	priv->master_mii_dn = dn;
505
506	priv->slave_mii_bus = mdiobus_alloc();
507	if (!priv->slave_mii_bus) {
508		err = -ENOMEM;
509		goto err_put_master_mii_bus_dev;
510	}
511
512	priv->slave_mii_bus->priv = priv;
513	priv->slave_mii_bus->name = "sf2 slave mii";
514	priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
515	priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
516	snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
517		 index++);
518	priv->slave_mii_bus->dev.of_node = dn;
519
520	/* Include the pseudo-PHY address to divert reads towards our
521	 * workaround. This is only required for 7445D0, since 7445E0
522	 * disconnects the internal switch pseudo-PHY such that we can use the
523	 * regular SWITCH_MDIO master controller instead.
524	 *
525	 * Here we flag the pseudo PHY as needing special treatment and would
526	 * otherwise make all other PHY read/writes go to the master MDIO bus
527	 * controller that comes with this switch backed by the "mdio-unimac"
528	 * driver.
529	 */
530	if (of_machine_is_compatible("brcm,bcm7445d0"))
531		priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
532	else
533		priv->indir_phy_mask = 0;
534
535	ds->phys_mii_mask = priv->indir_phy_mask;
536	ds->slave_mii_bus = priv->slave_mii_bus;
537	priv->slave_mii_bus->parent = ds->dev->parent;
538	priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
539
540	/* We need to make sure that of_phy_connect() will not work by
541	 * removing the 'phandle' and 'linux,phandle' properties and
542	 * unregister the existing PHY device that was already registered.
543	 */
544	for_each_available_child_of_node(dn, child) {
545		if (of_property_read_u32(child, "reg", &reg) ||
546		    reg >= PHY_MAX_ADDR)
547			continue;
548
549		if (!(priv->indir_phy_mask & BIT(reg)))
550			continue;
551
552		prop = of_find_property(child, "phandle", NULL);
553		if (prop)
554			of_remove_property(child, prop);
555
556		prop = of_find_property(child, "linux,phandle", NULL);
557		if (prop)
558			of_remove_property(child, prop);
559
560		phydev = of_phy_find_device(child);
561		if (phydev) {
562			phy_device_remove(phydev);
563			phy_device_free(phydev);
564		}
565	}
566
567	err = mdiobus_register(priv->slave_mii_bus);
568	if (err && dn)
569		goto err_free_slave_mii_bus;
570
571	return 0;
572
573err_free_slave_mii_bus:
574	mdiobus_free(priv->slave_mii_bus);
575err_put_master_mii_bus_dev:
576	put_device(&priv->master_mii_bus->dev);
577err_of_node_put:
578	of_node_put(dn);
579	return err;
580}
581
582static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
583{
584	mdiobus_unregister(priv->slave_mii_bus);
585	mdiobus_free(priv->slave_mii_bus);
586	put_device(&priv->master_mii_bus->dev);
587	of_node_put(priv->master_mii_dn);
588}
589
590static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
591{
592	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
593
594	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
595	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
596	 * the REG_PHY_REVISION register layout is.
597	 */
598	if (priv->int_phy_mask & BIT(port))
599		return priv->hw_params.gphy_rev;
600	else
601		return 0;
602}
603
604static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
605				unsigned long *supported,
606				struct phylink_link_state *state)
607{
608	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
609	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
610
611	if (!phy_interface_mode_is_rgmii(state->interface) &&
612	    state->interface != PHY_INTERFACE_MODE_MII &&
613	    state->interface != PHY_INTERFACE_MODE_REVMII &&
614	    state->interface != PHY_INTERFACE_MODE_GMII &&
615	    state->interface != PHY_INTERFACE_MODE_INTERNAL &&
616	    state->interface != PHY_INTERFACE_MODE_MOCA) {
617		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
618		if (port != core_readl(priv, CORE_IMP0_PRT_ID))
619			dev_err(ds->dev,
620				"Unsupported interface: %d for port %d\n",
621				state->interface, port);
622		return;
623	}
624
625	/* Allow all the expected bits */
626	phylink_set(mask, Autoneg);
627	phylink_set_port_modes(mask);
628	phylink_set(mask, Pause);
629	phylink_set(mask, Asym_Pause);
630
631	/* With the exclusion of MII and Reverse MII, we support Gigabit,
632	 * including Half duplex
633	 */
634	if (state->interface != PHY_INTERFACE_MODE_MII &&
635	    state->interface != PHY_INTERFACE_MODE_REVMII) {
636		phylink_set(mask, 1000baseT_Full);
637		phylink_set(mask, 1000baseT_Half);
638	}
639
640	phylink_set(mask, 10baseT_Half);
641	phylink_set(mask, 10baseT_Full);
642	phylink_set(mask, 100baseT_Half);
643	phylink_set(mask, 100baseT_Full);
644
645	bitmap_and(supported, supported, mask,
646		   __ETHTOOL_LINK_MODE_MASK_NBITS);
647	bitmap_and(state->advertising, state->advertising, mask,
648		   __ETHTOOL_LINK_MODE_MASK_NBITS);
649}
650
651static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
652				  unsigned int mode,
653				  const struct phylink_link_state *state)
654{
655	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
656	u32 id_mode_dis = 0, port_mode;
657	u32 reg;
658
659	if (port == core_readl(priv, CORE_IMP0_PRT_ID))
660		return;
661
662	switch (state->interface) {
663	case PHY_INTERFACE_MODE_RGMII:
664		id_mode_dis = 1;
665		fallthrough;
666	case PHY_INTERFACE_MODE_RGMII_TXID:
667		port_mode = EXT_GPHY;
668		break;
669	case PHY_INTERFACE_MODE_MII:
670		port_mode = EXT_EPHY;
671		break;
672	case PHY_INTERFACE_MODE_REVMII:
673		port_mode = EXT_REVMII;
674		break;
675	default:
676		/* Nothing required for all other PHYs: internal and MoCA */
677		return;
678	}
679
680	/* Clear id_mode_dis bit, and the existing port mode, let
681	 * RGMII_MODE_EN bet set by mac_link_{up,down}
682	 */
683	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
684	reg &= ~ID_MODE_DIS;
685	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
686
687	reg |= port_mode;
688	if (id_mode_dis)
689		reg |= ID_MODE_DIS;
690
691	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
692}
693
694static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
695				    phy_interface_t interface, bool link)
696{
697	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
698	u32 reg;
699
700	if (!phy_interface_mode_is_rgmii(interface) &&
701	    interface != PHY_INTERFACE_MODE_MII &&
702	    interface != PHY_INTERFACE_MODE_REVMII)
703		return;
704
705	/* If the link is down, just disable the interface to conserve power */
706	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
707	if (link)
708		reg |= RGMII_MODE_EN;
709	else
710		reg &= ~RGMII_MODE_EN;
711	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
712}
713
714static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
715				     unsigned int mode,
716				     phy_interface_t interface)
717{
718	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
719	u32 reg, offset;
720
721	if (priv->wol_ports_mask & BIT(port))
722		return;
723
724	if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
725		if (priv->type == BCM7445_DEVICE_ID)
726			offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
727		else
728			offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
729
730		reg = core_readl(priv, offset);
731		reg &= ~LINK_STS;
732		core_writel(priv, reg, offset);
733	}
734
735	bcm_sf2_sw_mac_link_set(ds, port, interface, false);
736}
737
738static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
739				   unsigned int mode,
740				   phy_interface_t interface,
741				   struct phy_device *phydev,
742				   int speed, int duplex,
743				   bool tx_pause, bool rx_pause)
744{
745	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
746	struct ethtool_eee *p = &priv->dev->ports[port].eee;
747	u32 reg, offset;
748
749	bcm_sf2_sw_mac_link_set(ds, port, interface, true);
750
751	if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
752		if (priv->type == BCM7445_DEVICE_ID)
753			offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
754		else
755			offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
756
757		if (interface == PHY_INTERFACE_MODE_RGMII ||
758		    interface == PHY_INTERFACE_MODE_RGMII_TXID ||
759		    interface == PHY_INTERFACE_MODE_MII ||
760		    interface == PHY_INTERFACE_MODE_REVMII) {
761			reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
762			reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
763
764			if (tx_pause)
765				reg |= TX_PAUSE_EN;
766			if (rx_pause)
767				reg |= RX_PAUSE_EN;
768
769			reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
770		}
771
772		reg = SW_OVERRIDE | LINK_STS;
773		switch (speed) {
774		case SPEED_1000:
775			reg |= SPDSTS_1000 << SPEED_SHIFT;
776			break;
777		case SPEED_100:
778			reg |= SPDSTS_100 << SPEED_SHIFT;
779			break;
780		}
781
782		if (duplex == DUPLEX_FULL)
783			reg |= DUPLX_MODE;
784
785		if (tx_pause)
786			reg |= TXFLOW_CNTL;
787		if (rx_pause)
788			reg |= RXFLOW_CNTL;
789
790		core_writel(priv, reg, offset);
791	}
792
793	if (mode == MLO_AN_PHY && phydev)
794		p->eee_enabled = b53_eee_init(ds, port, phydev);
795}
796
797static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
798				   struct phylink_link_state *status)
799{
800	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
801
802	status->link = false;
803
804	/* MoCA port is special as we do not get link status from CORE_LNKSTS,
805	 * which means that we need to force the link at the port override
806	 * level to get the data to flow. We do use what the interrupt handler
807	 * did determine before.
808	 *
809	 * For the other ports, we just force the link status, since this is
810	 * a fixed PHY device.
811	 */
812	if (port == priv->moca_port) {
813		status->link = priv->port_sts[port].link;
814		/* For MoCA interfaces, also force a link down notification
815		 * since some version of the user-space daemon (mocad) use
816		 * cmd->autoneg to force the link, which messes up the PHY
817		 * state machine and make it go in PHY_FORCING state instead.
818		 */
819		if (!status->link)
820			netif_carrier_off(dsa_to_port(ds, port)->slave);
821		status->duplex = DUPLEX_FULL;
822	} else {
823		status->link = true;
824	}
825}
826
827static void bcm_sf2_enable_acb(struct dsa_switch *ds)
828{
829	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
830	u32 reg;
831
832	/* Enable ACB globally */
833	reg = acb_readl(priv, ACB_CONTROL);
834	reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
835	acb_writel(priv, reg, ACB_CONTROL);
836	reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
837	reg |= ACB_EN | ACB_ALGORITHM;
838	acb_writel(priv, reg, ACB_CONTROL);
839}
840
841static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
842{
843	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
844	unsigned int port;
845
846	bcm_sf2_intr_disable(priv);
847
848	/* Disable all ports physically present including the IMP
849	 * port, the other ones have already been disabled during
850	 * bcm_sf2_sw_setup
851	 */
852	for (port = 0; port < ds->num_ports; port++) {
853		if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
854			bcm_sf2_port_disable(ds, port);
855	}
856
857	if (!priv->wol_ports_mask)
858		clk_disable_unprepare(priv->clk);
859
860	return 0;
861}
862
863static int bcm_sf2_sw_resume(struct dsa_switch *ds)
864{
865	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
866	int ret;
867
868	if (!priv->wol_ports_mask)
869		clk_prepare_enable(priv->clk);
870
871	ret = bcm_sf2_sw_rst(priv);
872	if (ret) {
873		pr_err("%s: failed to software reset switch\n", __func__);
874		return ret;
875	}
876
877	ret = bcm_sf2_cfp_resume(ds);
878	if (ret)
879		return ret;
880
881	if (priv->hw_params.num_gphy == 1)
882		bcm_sf2_gphy_enable_set(ds, true);
883
884	ds->ops->setup(ds);
885
886	return 0;
887}
888
889static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
890			       struct ethtool_wolinfo *wol)
891{
892	struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
893	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
894	struct ethtool_wolinfo pwol = { };
895
896	/* Get the parent device WoL settings */
897	if (p->ethtool_ops->get_wol)
898		p->ethtool_ops->get_wol(p, &pwol);
899
900	/* Advertise the parent device supported settings */
901	wol->supported = pwol.supported;
902	memset(&wol->sopass, 0, sizeof(wol->sopass));
903
904	if (pwol.wolopts & WAKE_MAGICSECURE)
905		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
906
907	if (priv->wol_ports_mask & (1 << port))
908		wol->wolopts = pwol.wolopts;
909	else
910		wol->wolopts = 0;
911}
912
913static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
914			      struct ethtool_wolinfo *wol)
915{
916	struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
917	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
918	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
919	struct ethtool_wolinfo pwol =  { };
920
921	if (p->ethtool_ops->get_wol)
922		p->ethtool_ops->get_wol(p, &pwol);
923	if (wol->wolopts & ~pwol.supported)
924		return -EINVAL;
925
926	if (wol->wolopts)
927		priv->wol_ports_mask |= (1 << port);
928	else
929		priv->wol_ports_mask &= ~(1 << port);
930
931	/* If we have at least one port enabled, make sure the CPU port
932	 * is also enabled. If the CPU port is the last one enabled, we disable
933	 * it since this configuration does not make sense.
934	 */
935	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
936		priv->wol_ports_mask |= (1 << cpu_port);
937	else
938		priv->wol_ports_mask &= ~(1 << cpu_port);
939
940	return p->ethtool_ops->set_wol(p, wol);
941}
942
943static int bcm_sf2_sw_setup(struct dsa_switch *ds)
944{
945	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
946	unsigned int port;
947
948	/* Enable all valid ports and disable those unused */
949	for (port = 0; port < priv->hw_params.num_ports; port++) {
950		/* IMP port receives special treatment */
951		if (dsa_is_user_port(ds, port))
952			bcm_sf2_port_setup(ds, port, NULL);
953		else if (dsa_is_cpu_port(ds, port))
954			bcm_sf2_imp_setup(ds, port);
955		else
956			bcm_sf2_port_disable(ds, port);
957	}
958
959	b53_configure_vlan(ds);
960	bcm_sf2_enable_acb(ds);
961
962	return b53_setup_devlink_resources(ds);
963}
964
965static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
966{
967	dsa_devlink_resources_unregister(ds);
968}
969
970/* The SWITCH_CORE register space is managed by b53 but operates on a page +
971 * register basis so we need to translate that into an address that the
972 * bus-glue understands.
973 */
974#define SF2_PAGE_REG_MKADDR(page, reg)	((page) << 10 | (reg) << 2)
975
976static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
977			      u8 *val)
978{
979	struct bcm_sf2_priv *priv = dev->priv;
980
981	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
982
983	return 0;
984}
985
986static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
987			       u16 *val)
988{
989	struct bcm_sf2_priv *priv = dev->priv;
990
991	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
992
993	return 0;
994}
995
996static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
997			       u32 *val)
998{
999	struct bcm_sf2_priv *priv = dev->priv;
1000
1001	*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1002
1003	return 0;
1004}
1005
1006static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
1007			       u64 *val)
1008{
1009	struct bcm_sf2_priv *priv = dev->priv;
1010
1011	*val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
1012
1013	return 0;
1014}
1015
1016static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
1017			       u8 value)
1018{
1019	struct bcm_sf2_priv *priv = dev->priv;
1020
1021	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1022
1023	return 0;
1024}
1025
1026static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
1027				u16 value)
1028{
1029	struct bcm_sf2_priv *priv = dev->priv;
1030
1031	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1032
1033	return 0;
1034}
1035
1036static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
1037				u32 value)
1038{
1039	struct bcm_sf2_priv *priv = dev->priv;
1040
1041	core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1042
1043	return 0;
1044}
1045
1046static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
1047				u64 value)
1048{
1049	struct bcm_sf2_priv *priv = dev->priv;
1050
1051	core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1052
1053	return 0;
1054}
1055
1056static const struct b53_io_ops bcm_sf2_io_ops = {
1057	.read8	= bcm_sf2_core_read8,
1058	.read16	= bcm_sf2_core_read16,
1059	.read32	= bcm_sf2_core_read32,
1060	.read48	= bcm_sf2_core_read64,
1061	.read64	= bcm_sf2_core_read64,
1062	.write8	= bcm_sf2_core_write8,
1063	.write16 = bcm_sf2_core_write16,
1064	.write32 = bcm_sf2_core_write32,
1065	.write48 = bcm_sf2_core_write64,
1066	.write64 = bcm_sf2_core_write64,
1067};
1068
1069static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
1070				   u32 stringset, uint8_t *data)
1071{
1072	int cnt = b53_get_sset_count(ds, port, stringset);
1073
1074	b53_get_strings(ds, port, stringset, data);
1075	bcm_sf2_cfp_get_strings(ds, port, stringset,
1076				data + cnt * ETH_GSTRING_LEN);
1077}
1078
1079static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
1080					 uint64_t *data)
1081{
1082	int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
1083
1084	b53_get_ethtool_stats(ds, port, data);
1085	bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
1086}
1087
1088static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
1089				     int sset)
1090{
1091	int cnt = b53_get_sset_count(ds, port, sset);
1092
1093	if (cnt < 0)
1094		return cnt;
1095
1096	cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
1097
1098	return cnt;
1099}
1100
1101static const struct dsa_switch_ops bcm_sf2_ops = {
1102	.get_tag_protocol	= b53_get_tag_protocol,
1103	.setup			= bcm_sf2_sw_setup,
1104	.teardown		= bcm_sf2_sw_teardown,
1105	.get_strings		= bcm_sf2_sw_get_strings,
1106	.get_ethtool_stats	= bcm_sf2_sw_get_ethtool_stats,
1107	.get_sset_count		= bcm_sf2_sw_get_sset_count,
1108	.get_ethtool_phy_stats	= b53_get_ethtool_phy_stats,
1109	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
1110	.phylink_validate	= bcm_sf2_sw_validate,
1111	.phylink_mac_config	= bcm_sf2_sw_mac_config,
1112	.phylink_mac_link_down	= bcm_sf2_sw_mac_link_down,
1113	.phylink_mac_link_up	= bcm_sf2_sw_mac_link_up,
1114	.phylink_fixed_state	= bcm_sf2_sw_fixed_state,
1115	.suspend		= bcm_sf2_sw_suspend,
1116	.resume			= bcm_sf2_sw_resume,
1117	.get_wol		= bcm_sf2_sw_get_wol,
1118	.set_wol		= bcm_sf2_sw_set_wol,
1119	.port_enable		= bcm_sf2_port_setup,
1120	.port_disable		= bcm_sf2_port_disable,
1121	.get_mac_eee		= b53_get_mac_eee,
1122	.set_mac_eee		= b53_set_mac_eee,
1123	.port_bridge_join	= b53_br_join,
1124	.port_bridge_leave	= b53_br_leave,
1125	.port_stp_state_set	= b53_br_set_stp_state,
1126	.port_fast_age		= b53_br_fast_age,
1127	.port_vlan_filtering	= b53_vlan_filtering,
1128	.port_vlan_prepare	= b53_vlan_prepare,
1129	.port_vlan_add		= b53_vlan_add,
1130	.port_vlan_del		= b53_vlan_del,
1131	.port_fdb_dump		= b53_fdb_dump,
1132	.port_fdb_add		= b53_fdb_add,
1133	.port_fdb_del		= b53_fdb_del,
1134	.get_rxnfc		= bcm_sf2_get_rxnfc,
1135	.set_rxnfc		= bcm_sf2_set_rxnfc,
1136	.port_mirror_add	= b53_mirror_add,
1137	.port_mirror_del	= b53_mirror_del,
1138	.port_mdb_prepare	= b53_mdb_prepare,
1139	.port_mdb_add		= b53_mdb_add,
1140	.port_mdb_del		= b53_mdb_del,
1141};
1142
1143struct bcm_sf2_of_data {
1144	u32 type;
1145	const u16 *reg_offsets;
1146	unsigned int core_reg_align;
1147	unsigned int num_cfp_rules;
1148};
1149
1150/* Register offsets for the SWITCH_REG_* block */
1151static const u16 bcm_sf2_7445_reg_offsets[] = {
1152	[REG_SWITCH_CNTRL]	= 0x00,
1153	[REG_SWITCH_STATUS]	= 0x04,
1154	[REG_DIR_DATA_WRITE]	= 0x08,
1155	[REG_DIR_DATA_READ]	= 0x0C,
1156	[REG_SWITCH_REVISION]	= 0x18,
1157	[REG_PHY_REVISION]	= 0x1C,
1158	[REG_SPHY_CNTRL]	= 0x2C,
1159	[REG_RGMII_0_CNTRL]	= 0x34,
1160	[REG_RGMII_1_CNTRL]	= 0x40,
1161	[REG_RGMII_2_CNTRL]	= 0x4c,
1162	[REG_LED_0_CNTRL]	= 0x90,
1163	[REG_LED_1_CNTRL]	= 0x94,
1164	[REG_LED_2_CNTRL]	= 0x98,
1165};
1166
1167static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1168	.type		= BCM7445_DEVICE_ID,
1169	.core_reg_align	= 0,
1170	.reg_offsets	= bcm_sf2_7445_reg_offsets,
1171	.num_cfp_rules	= 256,
1172};
1173
1174static const u16 bcm_sf2_7278_reg_offsets[] = {
1175	[REG_SWITCH_CNTRL]	= 0x00,
1176	[REG_SWITCH_STATUS]	= 0x04,
1177	[REG_DIR_DATA_WRITE]	= 0x08,
1178	[REG_DIR_DATA_READ]	= 0x0c,
1179	[REG_SWITCH_REVISION]	= 0x10,
1180	[REG_PHY_REVISION]	= 0x14,
1181	[REG_SPHY_CNTRL]	= 0x24,
1182	[REG_RGMII_0_CNTRL]	= 0xe0,
1183	[REG_RGMII_1_CNTRL]	= 0xec,
1184	[REG_RGMII_2_CNTRL]	= 0xf8,
1185	[REG_LED_0_CNTRL]	= 0x40,
1186	[REG_LED_1_CNTRL]	= 0x4c,
1187	[REG_LED_2_CNTRL]	= 0x58,
1188};
1189
1190static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1191	.type		= BCM7278_DEVICE_ID,
1192	.core_reg_align	= 1,
1193	.reg_offsets	= bcm_sf2_7278_reg_offsets,
1194	.num_cfp_rules	= 128,
1195};
1196
1197static const struct of_device_id bcm_sf2_of_match[] = {
1198	{ .compatible = "brcm,bcm7445-switch-v4.0",
1199	  .data = &bcm_sf2_7445_data
1200	},
1201	{ .compatible = "brcm,bcm7278-switch-v4.0",
1202	  .data = &bcm_sf2_7278_data
1203	},
1204	{ .compatible = "brcm,bcm7278-switch-v4.8",
1205	  .data = &bcm_sf2_7278_data
1206	},
1207	{ /* sentinel */ },
1208};
1209MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1210
1211static int bcm_sf2_sw_probe(struct platform_device *pdev)
1212{
1213	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1214	struct device_node *dn = pdev->dev.of_node;
1215	const struct of_device_id *of_id = NULL;
1216	const struct bcm_sf2_of_data *data;
1217	struct b53_platform_data *pdata;
1218	struct dsa_switch_ops *ops;
1219	struct device_node *ports;
1220	struct bcm_sf2_priv *priv;
1221	struct b53_device *dev;
1222	struct dsa_switch *ds;
1223	void __iomem **base;
1224	unsigned int i;
1225	u32 reg, rev;
1226	int ret;
1227
1228	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1229	if (!priv)
1230		return -ENOMEM;
1231
1232	ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1233	if (!ops)
1234		return -ENOMEM;
1235
1236	dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
1237	if (!dev)
1238		return -ENOMEM;
1239
1240	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1241	if (!pdata)
1242		return -ENOMEM;
1243
1244	of_id = of_match_node(bcm_sf2_of_match, dn);
1245	if (!of_id || !of_id->data)
1246		return -EINVAL;
1247
1248	data = of_id->data;
1249
1250	/* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1251	priv->type = data->type;
1252	priv->reg_offsets = data->reg_offsets;
1253	priv->core_reg_align = data->core_reg_align;
1254	priv->num_cfp_rules = data->num_cfp_rules;
1255
1256	priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
1257								"switch");
1258	if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER)
1259		return PTR_ERR(priv->rcdev);
1260
1261	/* Auto-detection using standard registers will not work, so
1262	 * provide an indication of what kind of device we are for
1263	 * b53_common to work with
1264	 */
1265	pdata->chip_id = priv->type;
1266	dev->pdata = pdata;
1267
1268	priv->dev = dev;
1269	ds = dev->ds;
1270	ds->ops = &bcm_sf2_ops;
1271
1272	/* Advertise the 8 egress queues */
1273	ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1274
1275	dev_set_drvdata(&pdev->dev, priv);
1276
1277	spin_lock_init(&priv->indir_lock);
1278	mutex_init(&priv->cfp.lock);
1279	INIT_LIST_HEAD(&priv->cfp.rules_list);
1280
1281	/* CFP rule #0 cannot be used for specific classifications, flag it as
1282	 * permanently used
1283	 */
1284	set_bit(0, priv->cfp.used);
1285	set_bit(0, priv->cfp.unique);
1286
1287	/* Balance of_node_put() done by of_find_node_by_name() */
1288	of_node_get(dn);
1289	ports = of_find_node_by_name(dn, "ports");
1290	if (ports) {
1291		bcm_sf2_identify_ports(priv, ports);
1292		of_node_put(ports);
1293	}
1294
1295	priv->irq0 = irq_of_parse_and_map(dn, 0);
1296	priv->irq1 = irq_of_parse_and_map(dn, 1);
1297
1298	base = &priv->core;
1299	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1300		*base = devm_platform_ioremap_resource(pdev, i);
1301		if (IS_ERR(*base)) {
1302			pr_err("unable to find register: %s\n", reg_names[i]);
1303			return PTR_ERR(*base);
1304		}
1305		base++;
1306	}
1307
1308	priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
1309	if (IS_ERR(priv->clk))
1310		return PTR_ERR(priv->clk);
1311
1312	ret = clk_prepare_enable(priv->clk);
1313	if (ret)
1314		return ret;
1315
1316	priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
1317	if (IS_ERR(priv->clk_mdiv)) {
1318		ret = PTR_ERR(priv->clk_mdiv);
1319		goto out_clk;
1320	}
1321
1322	ret = clk_prepare_enable(priv->clk_mdiv);
1323	if (ret)
1324		goto out_clk;
1325
1326	ret = bcm_sf2_sw_rst(priv);
1327	if (ret) {
1328		pr_err("unable to software reset switch: %d\n", ret);
1329		goto out_clk_mdiv;
1330	}
1331
1332	bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1333
1334	ret = bcm_sf2_mdio_register(ds);
1335	if (ret) {
1336		pr_err("failed to register MDIO bus\n");
1337		goto out_clk_mdiv;
1338	}
1339
1340	bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1341
1342	ret = bcm_sf2_cfp_rst(priv);
1343	if (ret) {
1344		pr_err("failed to reset CFP\n");
1345		goto out_mdio;
1346	}
1347
1348	/* Disable all interrupts and request them */
1349	bcm_sf2_intr_disable(priv);
1350
1351	ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1352			       "switch_0", ds);
1353	if (ret < 0) {
1354		pr_err("failed to request switch_0 IRQ\n");
1355		goto out_mdio;
1356	}
1357
1358	ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1359			       "switch_1", ds);
1360	if (ret < 0) {
1361		pr_err("failed to request switch_1 IRQ\n");
1362		goto out_mdio;
1363	}
1364
1365	/* Reset the MIB counters */
1366	reg = core_readl(priv, CORE_GMNCFGCFG);
1367	reg |= RST_MIB_CNT;
1368	core_writel(priv, reg, CORE_GMNCFGCFG);
1369	reg &= ~RST_MIB_CNT;
1370	core_writel(priv, reg, CORE_GMNCFGCFG);
1371
1372	/* Get the maximum number of ports for this switch */
1373	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1374	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1375		priv->hw_params.num_ports = DSA_MAX_PORTS;
1376
1377	/* Assume a single GPHY setup if we can't read that property */
1378	if (of_property_read_u32(dn, "brcm,num-gphy",
1379				 &priv->hw_params.num_gphy))
1380		priv->hw_params.num_gphy = 1;
1381
1382	rev = reg_readl(priv, REG_SWITCH_REVISION);
1383	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1384					SWITCH_TOP_REV_MASK;
1385	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1386
1387	rev = reg_readl(priv, REG_PHY_REVISION);
1388	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1389
1390	ret = b53_switch_register(dev);
1391	if (ret)
1392		goto out_mdio;
1393
1394	dev_info(&pdev->dev,
1395		 "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
1396		 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1397		 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1398		 priv->irq0, priv->irq1);
1399
1400	return 0;
1401
1402out_mdio:
1403	bcm_sf2_mdio_unregister(priv);
1404out_clk_mdiv:
1405	clk_disable_unprepare(priv->clk_mdiv);
1406out_clk:
1407	clk_disable_unprepare(priv->clk);
1408	return ret;
1409}
1410
1411static int bcm_sf2_sw_remove(struct platform_device *pdev)
1412{
1413	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1414
1415	priv->wol_ports_mask = 0;
1416	/* Disable interrupts */
1417	bcm_sf2_intr_disable(priv);
1418	dsa_unregister_switch(priv->dev->ds);
1419	bcm_sf2_cfp_exit(priv->dev->ds);
1420	bcm_sf2_mdio_unregister(priv);
1421	clk_disable_unprepare(priv->clk_mdiv);
1422	clk_disable_unprepare(priv->clk);
1423	if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
1424		reset_control_assert(priv->rcdev);
1425
1426	return 0;
1427}
1428
1429static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1430{
1431	struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1432
1433	/* For a kernel about to be kexec'd we want to keep the GPHY on for a
1434	 * successful MDIO bus scan to occur. If we did turn off the GPHY
1435	 * before (e.g: port_disable), this will also power it back on.
1436	 *
1437	 * Do not rely on kexec_in_progress, just power the PHY on.
1438	 */
1439	if (priv->hw_params.num_gphy == 1)
1440		bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1441}
1442
1443#ifdef CONFIG_PM_SLEEP
1444static int bcm_sf2_suspend(struct device *dev)
1445{
1446	struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1447
1448	return dsa_switch_suspend(priv->dev->ds);
1449}
1450
1451static int bcm_sf2_resume(struct device *dev)
1452{
1453	struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1454
1455	return dsa_switch_resume(priv->dev->ds);
1456}
1457#endif /* CONFIG_PM_SLEEP */
1458
1459static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1460			 bcm_sf2_suspend, bcm_sf2_resume);
1461
1462
1463static struct platform_driver bcm_sf2_driver = {
1464	.probe	= bcm_sf2_sw_probe,
1465	.remove	= bcm_sf2_sw_remove,
1466	.shutdown = bcm_sf2_sw_shutdown,
1467	.driver = {
1468		.name = "brcm-sf2",
1469		.of_match_table = bcm_sf2_of_match,
1470		.pm = &bcm_sf2_pm_ops,
1471	},
1472};
1473module_platform_driver(bcm_sf2_driver);
1474
1475MODULE_AUTHOR("Broadcom Corporation");
1476MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1477MODULE_LICENSE("GPL");
1478MODULE_ALIAS("platform:brcm-sf2");
1479