1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Texas Instruments Ethernet Switch Driver
4 *
5 * Copyright (C) 2019 Texas Instruments
6 */
7
8#include <linux/io.h>
9#include <linux/clk.h>
10#include <linux/timer.h>
11#include <linux/module.h>
12#include <linux/irqreturn.h>
13#include <linux/interrupt.h>
14#include <linux/if_ether.h>
15#include <linux/etherdevice.h>
16#include <linux/net_tstamp.h>
17#include <linux/phy.h>
18#include <linux/phy/phy.h>
19#include <linux/delay.h>
20#include <linux/pinctrl/consumer.h>
21#include <linux/pm_runtime.h>
22#include <linux/gpio/consumer.h>
23#include <linux/of.h>
24#include <linux/of_mdio.h>
25#include <linux/of_net.h>
26#include <linux/of_device.h>
27#include <linux/if_vlan.h>
28#include <linux/kmemleak.h>
29#include <linux/sys_soc.h>
30
31#include <net/page_pool.h>
32#include <net/pkt_cls.h>
33#include <net/devlink.h>
34
35#include "cpsw.h"
36#include "cpsw_ale.h"
37#include "cpsw_priv.h"
38#include "cpsw_sl.h"
39#include "cpsw_switchdev.h"
40#include "cpts.h"
41#include "davinci_cpdma.h"
42
43#include <net/pkt_sched.h>
44
45static int debug_level;
46static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
47static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
48static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
49
50struct cpsw_devlink {
51	struct cpsw_common *cpsw;
52};
53
54enum cpsw_devlink_param_id {
55	CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
56	CPSW_DL_PARAM_SWITCH_MODE,
57	CPSW_DL_PARAM_ALE_BYPASS,
58};
59
60/* struct cpsw_common is not needed, kept here for compatibility
61 * reasons witrh the old driver
62 */
63static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
64				 struct cpsw_priv *priv)
65{
66	if (priv->emac_port == HOST_PORT_NUM)
67		return -1;
68
69	return priv->emac_port - 1;
70}
71
72static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
73{
74	return !cpsw->data.dual_emac;
75}
76
77static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
78{
79	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
80	bool enable_uni = false;
81	int i;
82
83	if (cpsw_is_switch_en(cpsw))
84		return;
85
86	/* Enabling promiscuous mode for one interface will be
87	 * common for both the interface as the interface shares
88	 * the same hardware resource.
89	 */
90	for (i = 0; i < cpsw->data.slaves; i++)
91		if (cpsw->slaves[i].ndev &&
92		    (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
93			enable_uni = true;
94
95	if (!enable && enable_uni) {
96		enable = enable_uni;
97		dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
98	}
99
100	if (enable) {
101		/* Enable unknown unicast, reg/unreg mcast */
102		cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
103				     ALE_P0_UNI_FLOOD, 1);
104
105		dev_dbg(cpsw->dev, "promiscuity enabled\n");
106	} else {
107		/* Disable unknown unicast */
108		cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
109				     ALE_P0_UNI_FLOOD, 0);
110		dev_dbg(cpsw->dev, "promiscuity disabled\n");
111	}
112}
113
114/**
115 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
116 * if it's not deleted
117 * @ndev: device to sync
118 * @addr: address to be added or deleted
119 * @vid: vlan id, if vid < 0 set/unset address for real device
120 * @add: add address if the flag is set or remove otherwise
121 */
122static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
123		       int vid, int add)
124{
125	struct cpsw_priv *priv = netdev_priv(ndev);
126	struct cpsw_common *cpsw = priv->cpsw;
127	int mask, flags, ret, slave_no;
128
129	slave_no = cpsw_slave_index(cpsw, priv);
130	if (vid < 0)
131		vid = cpsw->slaves[slave_no].port_vlan;
132
133	mask =  ALE_PORT_HOST;
134	flags = vid ? ALE_VLAN : 0;
135
136	if (add)
137		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
138	else
139		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
140
141	return ret;
142}
143
144static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
145{
146	struct addr_sync_ctx *sync_ctx = ctx;
147	struct netdev_hw_addr *ha;
148	int found = 0, ret = 0;
149
150	if (!vdev || !(vdev->flags & IFF_UP))
151		return 0;
152
153	/* vlan address is relevant if its sync_cnt != 0 */
154	netdev_for_each_mc_addr(ha, vdev) {
155		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
156			found = ha->sync_cnt;
157			break;
158		}
159	}
160
161	if (found)
162		sync_ctx->consumed++;
163
164	if (sync_ctx->flush) {
165		if (!found)
166			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
167		return 0;
168	}
169
170	if (found)
171		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
172
173	return ret;
174}
175
176static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
177{
178	struct addr_sync_ctx sync_ctx;
179	int ret;
180
181	sync_ctx.consumed = 0;
182	sync_ctx.addr = addr;
183	sync_ctx.ndev = ndev;
184	sync_ctx.flush = 0;
185
186	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
187	if (sync_ctx.consumed < num && !ret)
188		ret = cpsw_set_mc(ndev, addr, -1, 1);
189
190	return ret;
191}
192
193static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
194{
195	struct addr_sync_ctx sync_ctx;
196
197	sync_ctx.consumed = 0;
198	sync_ctx.addr = addr;
199	sync_ctx.ndev = ndev;
200	sync_ctx.flush = 1;
201
202	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
203	if (sync_ctx.consumed == num)
204		cpsw_set_mc(ndev, addr, -1, 0);
205
206	return 0;
207}
208
209static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
210{
211	struct addr_sync_ctx *sync_ctx = ctx;
212	struct netdev_hw_addr *ha;
213	int found = 0;
214
215	if (!vdev || !(vdev->flags & IFF_UP))
216		return 0;
217
218	/* vlan address is relevant if its sync_cnt != 0 */
219	netdev_for_each_mc_addr(ha, vdev) {
220		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
221			found = ha->sync_cnt;
222			break;
223		}
224	}
225
226	if (!found)
227		return 0;
228
229	sync_ctx->consumed++;
230	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
231	return 0;
232}
233
234static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
235{
236	struct addr_sync_ctx sync_ctx;
237
238	sync_ctx.addr = addr;
239	sync_ctx.ndev = ndev;
240	sync_ctx.consumed = 0;
241
242	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
243	if (sync_ctx.consumed < num)
244		cpsw_set_mc(ndev, addr, -1, 0);
245
246	return 0;
247}
248
249static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
250{
251	struct cpsw_priv *priv = netdev_priv(ndev);
252	struct cpsw_common *cpsw = priv->cpsw;
253
254	if (ndev->flags & IFF_PROMISC) {
255		/* Enable promiscuous mode */
256		cpsw_set_promiscious(ndev, true);
257		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
258		return;
259	}
260
261	/* Disable promiscuous mode */
262	cpsw_set_promiscious(ndev, false);
263
264	/* Restore allmulti on vlans if necessary */
265	cpsw_ale_set_allmulti(cpsw->ale,
266			      ndev->flags & IFF_ALLMULTI, priv->emac_port);
267
268	/* add/remove mcast address either for real netdev or for vlan */
269	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
270			       cpsw_del_mc_addr);
271}
272
273static unsigned int cpsw_rxbuf_total_len(unsigned int len)
274{
275	len += CPSW_HEADROOM;
276	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
277
278	return SKB_DATA_ALIGN(len);
279}
280
281static void cpsw_rx_handler(void *token, int len, int status)
282{
283	struct page *new_page, *page = token;
284	void *pa = page_address(page);
285	int headroom = CPSW_HEADROOM;
286	struct cpsw_meta_xdp *xmeta;
287	struct cpsw_common *cpsw;
288	struct net_device *ndev;
289	int port, ch, pkt_size;
290	struct cpsw_priv *priv;
291	struct page_pool *pool;
292	struct sk_buff *skb;
293	struct xdp_buff xdp;
294	int ret = 0;
295	dma_addr_t dma;
296
297	xmeta = pa + CPSW_XMETA_OFFSET;
298	cpsw = ndev_to_cpsw(xmeta->ndev);
299	ndev = xmeta->ndev;
300	pkt_size = cpsw->rx_packet_max;
301	ch = xmeta->ch;
302
303	if (status >= 0) {
304		port = CPDMA_RX_SOURCE_PORT(status);
305		if (port)
306			ndev = cpsw->slaves[--port].ndev;
307	}
308
309	priv = netdev_priv(ndev);
310	pool = cpsw->page_pool[ch];
311
312	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
313		/* In dual emac mode check for all interfaces */
314		if (cpsw->usage_count && status >= 0) {
315			/* The packet received is for the interface which
316			 * is already down and the other interface is up
317			 * and running, instead of freeing which results
318			 * in reducing of the number of rx descriptor in
319			 * DMA engine, requeue page back to cpdma.
320			 */
321			new_page = page;
322			goto requeue;
323		}
324
325		/* the interface is going down, pages are purged */
326		page_pool_recycle_direct(pool, page);
327		return;
328	}
329
330	new_page = page_pool_dev_alloc_pages(pool);
331	if (unlikely(!new_page)) {
332		new_page = page;
333		ndev->stats.rx_dropped++;
334		goto requeue;
335	}
336
337	if (priv->xdp_prog) {
338		if (status & CPDMA_RX_VLAN_ENCAP) {
339			xdp.data = pa + CPSW_HEADROOM +
340				   CPSW_RX_VLAN_ENCAP_HDR_SIZE;
341			xdp.data_end = xdp.data + len -
342				       CPSW_RX_VLAN_ENCAP_HDR_SIZE;
343		} else {
344			xdp.data = pa + CPSW_HEADROOM;
345			xdp.data_end = xdp.data + len;
346		}
347
348		xdp_set_data_meta_invalid(&xdp);
349
350		xdp.data_hard_start = pa;
351		xdp.rxq = &priv->xdp_rxq[ch];
352		xdp.frame_sz = PAGE_SIZE;
353
354		ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
355		if (ret != CPSW_XDP_PASS)
356			goto requeue;
357
358		/* XDP prog might have changed packet data and boundaries */
359		len = xdp.data_end - xdp.data;
360		headroom = xdp.data - xdp.data_hard_start;
361
362		/* XDP prog can modify vlan tag, so can't use encap header */
363		status &= ~CPDMA_RX_VLAN_ENCAP;
364	}
365
366	/* pass skb to netstack if no XDP prog or returned XDP_PASS */
367	skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
368	if (!skb) {
369		ndev->stats.rx_dropped++;
370		page_pool_recycle_direct(pool, page);
371		goto requeue;
372	}
373
374	skb->offload_fwd_mark = priv->offload_fwd_mark;
375	skb_reserve(skb, headroom);
376	skb_put(skb, len);
377	skb->dev = ndev;
378	if (status & CPDMA_RX_VLAN_ENCAP)
379		cpsw_rx_vlan_encap(skb);
380	if (priv->rx_ts_enabled)
381		cpts_rx_timestamp(cpsw->cpts, skb);
382	skb->protocol = eth_type_trans(skb, ndev);
383
384	/* unmap page as no netstack skb page recycling */
385	page_pool_release_page(pool, page);
386	netif_receive_skb(skb);
387
388	ndev->stats.rx_bytes += len;
389	ndev->stats.rx_packets++;
390
391requeue:
392	xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
393	xmeta->ndev = ndev;
394	xmeta->ch = ch;
395
396	dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
397	ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
398				       pkt_size, 0);
399	if (ret < 0) {
400		WARN_ON(ret == -ENOMEM);
401		page_pool_recycle_direct(pool, new_page);
402	}
403}
404
405static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
406				   unsigned short vid)
407{
408	struct cpsw_common *cpsw = priv->cpsw;
409	int unreg_mcast_mask = 0;
410	int mcast_mask;
411	u32 port_mask;
412	int ret;
413
414	port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
415
416	mcast_mask = ALE_PORT_HOST;
417	if (priv->ndev->flags & IFF_ALLMULTI)
418		unreg_mcast_mask = mcast_mask;
419
420	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
421				unreg_mcast_mask);
422	if (ret != 0)
423		return ret;
424
425	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
426				 HOST_PORT_NUM, ALE_VLAN, vid);
427	if (ret != 0)
428		goto clean_vid;
429
430	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
431				 mcast_mask, ALE_VLAN, vid, 0);
432	if (ret != 0)
433		goto clean_vlan_ucast;
434	return 0;
435
436clean_vlan_ucast:
437	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
438			   HOST_PORT_NUM, ALE_VLAN, vid);
439clean_vid:
440	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
441	return ret;
442}
443
444static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
445				    __be16 proto, u16 vid)
446{
447	struct cpsw_priv *priv = netdev_priv(ndev);
448	struct cpsw_common *cpsw = priv->cpsw;
449	int ret, i;
450
451	if (cpsw_is_switch_en(cpsw)) {
452		dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
453		return 0;
454	}
455
456	if (vid == cpsw->data.default_vlan)
457		return 0;
458
459	ret = pm_runtime_get_sync(cpsw->dev);
460	if (ret < 0) {
461		pm_runtime_put_noidle(cpsw->dev);
462		return ret;
463	}
464
465	/* In dual EMAC, reserved VLAN id should not be used for
466	 * creating VLAN interfaces as this can break the dual
467	 * EMAC port separation
468	 */
469	for (i = 0; i < cpsw->data.slaves; i++) {
470		if (cpsw->slaves[i].ndev &&
471		    vid == cpsw->slaves[i].port_vlan) {
472			ret = -EINVAL;
473			goto err;
474		}
475	}
476
477	dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
478	ret = cpsw_add_vlan_ale_entry(priv, vid);
479err:
480	pm_runtime_put(cpsw->dev);
481	return ret;
482}
483
484static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
485{
486	struct cpsw_priv *priv = arg;
487
488	if (!vdev || !vid)
489		return 0;
490
491	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
492	return 0;
493}
494
495/* restore resources after port reset */
496static void cpsw_restore(struct cpsw_priv *priv)
497{
498	struct cpsw_common *cpsw = priv->cpsw;
499
500	/* restore vlan configurations */
501	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
502
503	/* restore MQPRIO offload */
504	cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
505
506	/* restore CBS offload */
507	cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
508}
509
510static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
511{
512	char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
513
514	cpsw_ale_add_mcast(cpsw->ale, stpa,
515			   ALE_PORT_HOST, ALE_SUPER, 0,
516			   ALE_MCAST_BLOCK_LEARN_FWD);
517}
518
519static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
520{
521	int vlan = cpsw->data.default_vlan;
522
523	writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
524
525	writel(vlan, &cpsw->host_port_regs->port_vlan);
526
527	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
528			  ALE_ALL_PORTS, ALE_ALL_PORTS,
529			  ALE_PORT_1 | ALE_PORT_2);
530
531	cpsw_init_stp_ale_entry(cpsw);
532
533	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
534	dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
535	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
536}
537
538static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
539{
540	int vlan = cpsw->data.default_vlan;
541
542	writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
543
544	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
545	dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
546
547	writel(vlan, &cpsw->host_port_regs->port_vlan);
548
549	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
550	/* learning make no sense in dual_mac mode */
551	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
552}
553
554static void cpsw_init_host_port(struct cpsw_priv *priv)
555{
556	struct cpsw_common *cpsw = priv->cpsw;
557	u32 control_reg;
558
559	/* soft reset the controller and initialize ale */
560	soft_reset("cpsw", &cpsw->regs->soft_reset);
561	cpsw_ale_start(cpsw->ale);
562
563	/* switch to vlan unaware mode */
564	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
565			     CPSW_ALE_VLAN_AWARE);
566	control_reg = readl(&cpsw->regs->control);
567	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
568	writel(control_reg, &cpsw->regs->control);
569
570	/* setup host port priority mapping */
571	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
572		       &cpsw->host_port_regs->cpdma_tx_pri_map);
573	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
574
575	/* disable priority elevation */
576	writel_relaxed(0, &cpsw->regs->ptype);
577
578	/* enable statistics collection only on all ports */
579	writel_relaxed(0x7, &cpsw->regs->stat_port_en);
580
581	/* Enable internal fifo flow control */
582	writel(0x7, &cpsw->regs->flow_control);
583
584	if (cpsw_is_switch_en(cpsw))
585		cpsw_init_host_port_switch(cpsw);
586	else
587		cpsw_init_host_port_dual_mac(cpsw);
588
589	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
590			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
591}
592
593static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
594						    struct cpsw_slave *slave)
595{
596	u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
597	struct cpsw_common *cpsw = priv->cpsw;
598	u32 reg;
599
600	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
601	       CPSW2_PORT_VLAN;
602	slave_write(slave, slave->port_vlan, reg);
603
604	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
605			  port_mask, port_mask, 0);
606	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
607			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
608			   ALE_MCAST_FWD);
609	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
610			   HOST_PORT_NUM, ALE_VLAN |
611			   ALE_SECURE, slave->port_vlan);
612	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
613			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
614	/* learning make no sense in dual_mac mode */
615	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
616			     ALE_PORT_NOLEARN, 1);
617}
618
619static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
620						 struct cpsw_slave *slave)
621{
622	u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
623	struct cpsw_common *cpsw = priv->cpsw;
624	u32 reg;
625
626	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
627			     ALE_PORT_DROP_UNKNOWN_VLAN, 0);
628	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
629			     ALE_PORT_NOLEARN, 0);
630	/* disabling SA_UPDATE required to make stp work, without this setting
631	 * Host MAC addresses will jump between ports.
632	 * As per TRM MAC address can be defined as unicast supervisory (super)
633	 * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
634	 * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
635	 * causes STP packets to be dropped due to ingress filter
636	 *	if (source address found) and (secure) and
637	 *	   (receive port number != port_number))
638	 *	   then discard the packet
639	 */
640	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
641			     ALE_PORT_NO_SA_UPDATE, 1);
642
643	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
644			   port_mask, ALE_VLAN, slave->port_vlan,
645			   ALE_MCAST_FWD_2);
646	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
647			   HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
648
649	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
650	       CPSW2_PORT_VLAN;
651	slave_write(slave, slave->port_vlan, reg);
652}
653
654static void cpsw_adjust_link(struct net_device *ndev)
655{
656	struct cpsw_priv *priv = netdev_priv(ndev);
657	struct cpsw_common *cpsw = priv->cpsw;
658	struct cpsw_slave *slave;
659	struct phy_device *phy;
660	u32 mac_control = 0;
661
662	slave = &cpsw->slaves[priv->emac_port - 1];
663	phy = slave->phy;
664
665	if (!phy)
666		return;
667
668	if (phy->link) {
669		mac_control = CPSW_SL_CTL_GMII_EN;
670
671		if (phy->speed == 1000)
672			mac_control |= CPSW_SL_CTL_GIG;
673		if (phy->duplex)
674			mac_control |= CPSW_SL_CTL_FULLDUPLEX;
675
676		/* set speed_in input in case RMII mode is used in 100Mbps */
677		if (phy->speed == 100)
678			mac_control |= CPSW_SL_CTL_IFCTL_A;
679		/* in band mode only works in 10Mbps RGMII mode */
680		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
681			mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
682
683		if (priv->rx_pause)
684			mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
685
686		if (priv->tx_pause)
687			mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
688
689		if (mac_control != slave->mac_control)
690			cpsw_sl_ctl_set(slave->mac_sl, mac_control);
691
692		/* enable forwarding */
693		cpsw_ale_control_set(cpsw->ale, priv->emac_port,
694				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
695
696		netif_tx_wake_all_queues(ndev);
697
698		if (priv->shp_cfg_speed &&
699		    priv->shp_cfg_speed != slave->phy->speed &&
700		    !cpsw_shp_is_off(priv))
701			dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
702	} else {
703		netif_tx_stop_all_queues(ndev);
704
705		mac_control = 0;
706		/* disable forwarding */
707		cpsw_ale_control_set(cpsw->ale, priv->emac_port,
708				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
709
710		cpsw_sl_wait_for_idle(slave->mac_sl, 100);
711
712		cpsw_sl_ctl_reset(slave->mac_sl);
713	}
714
715	if (mac_control != slave->mac_control)
716		phy_print_status(phy);
717
718	slave->mac_control = mac_control;
719
720	if (phy->link && cpsw_need_resplit(cpsw))
721		cpsw_split_res(cpsw);
722}
723
724static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
725{
726	struct cpsw_common *cpsw = priv->cpsw;
727	struct phy_device *phy;
728
729	cpsw_sl_reset(slave->mac_sl, 100);
730	cpsw_sl_ctl_reset(slave->mac_sl);
731
732	/* setup priority mapping */
733	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
734			  RX_PRIORITY_MAPPING);
735
736	switch (cpsw->version) {
737	case CPSW_VERSION_1:
738		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
739		/* Increase RX FIFO size to 5 for supporting fullduplex
740		 * flow control mode
741		 */
742		slave_write(slave,
743			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
744			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
745		break;
746	case CPSW_VERSION_2:
747	case CPSW_VERSION_3:
748	case CPSW_VERSION_4:
749		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
750		/* Increase RX FIFO size to 5 for supporting fullduplex
751		 * flow control mode
752		 */
753		slave_write(slave,
754			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
755			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
756		break;
757	}
758
759	/* setup max packet size, and mac address */
760	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
761			  cpsw->rx_packet_max);
762	cpsw_set_slave_mac(slave, priv);
763
764	slave->mac_control = 0;	/* no link yet */
765
766	if (cpsw_is_switch_en(cpsw))
767		cpsw_port_add_switch_def_ale_entries(priv, slave);
768	else
769		cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
770
771	if (!slave->data->phy_node)
772		dev_err(priv->dev, "no phy found on slave %d\n",
773			slave->slave_num);
774	phy = of_phy_connect(priv->ndev, slave->data->phy_node,
775			     &cpsw_adjust_link, 0, slave->data->phy_if);
776	if (!phy) {
777		dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
778			slave->data->phy_node,
779			slave->slave_num);
780		return;
781	}
782	slave->phy = phy;
783
784	phy_attached_info(slave->phy);
785
786	phy_start(slave->phy);
787
788	/* Configure GMII_SEL register */
789	phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
790			 slave->data->phy_if);
791}
792
793static int cpsw_ndo_stop(struct net_device *ndev)
794{
795	struct cpsw_priv *priv = netdev_priv(ndev);
796	struct cpsw_common *cpsw = priv->cpsw;
797	struct cpsw_slave *slave;
798
799	cpsw_info(priv, ifdown, "shutting down ndev\n");
800	slave = &cpsw->slaves[priv->emac_port - 1];
801	if (slave->phy)
802		phy_stop(slave->phy);
803
804	netif_tx_stop_all_queues(priv->ndev);
805
806	if (slave->phy) {
807		phy_disconnect(slave->phy);
808		slave->phy = NULL;
809	}
810
811	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
812
813	if (cpsw->usage_count <= 1) {
814		napi_disable(&cpsw->napi_rx);
815		napi_disable(&cpsw->napi_tx);
816		cpts_unregister(cpsw->cpts);
817		cpsw_intr_disable(cpsw);
818		cpdma_ctlr_stop(cpsw->dma);
819		cpsw_ale_stop(cpsw->ale);
820		cpsw_destroy_xdp_rxqs(cpsw);
821	}
822
823	if (cpsw_need_resplit(cpsw))
824		cpsw_split_res(cpsw);
825
826	cpsw->usage_count--;
827	pm_runtime_put_sync(cpsw->dev);
828	return 0;
829}
830
831static int cpsw_ndo_open(struct net_device *ndev)
832{
833	struct cpsw_priv *priv = netdev_priv(ndev);
834	struct cpsw_common *cpsw = priv->cpsw;
835	int ret;
836
837	dev_info(priv->dev, "starting ndev. mode: %s\n",
838		 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
839	ret = pm_runtime_get_sync(cpsw->dev);
840	if (ret < 0) {
841		pm_runtime_put_noidle(cpsw->dev);
842		return ret;
843	}
844
845	/* Notify the stack of the actual queue counts. */
846	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
847	if (ret) {
848		dev_err(priv->dev, "cannot set real number of tx queues\n");
849		goto pm_cleanup;
850	}
851
852	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
853	if (ret) {
854		dev_err(priv->dev, "cannot set real number of rx queues\n");
855		goto pm_cleanup;
856	}
857
858	/* Initialize host and slave ports */
859	if (!cpsw->usage_count)
860		cpsw_init_host_port(priv);
861	cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
862
863	/* initialize shared resources for every ndev */
864	if (!cpsw->usage_count) {
865		/* create rxqs for both infs in dual mac as they use same pool
866		 * and must be destroyed together when no users.
867		 */
868		ret = cpsw_create_xdp_rxqs(cpsw);
869		if (ret < 0)
870			goto err_cleanup;
871
872		ret = cpsw_fill_rx_channels(priv);
873		if (ret < 0)
874			goto err_cleanup;
875
876		if (cpsw->cpts) {
877			if (cpts_register(cpsw->cpts))
878				dev_err(priv->dev, "error registering cpts device\n");
879			else
880				writel(0x10, &cpsw->wr_regs->misc_en);
881		}
882
883		napi_enable(&cpsw->napi_rx);
884		napi_enable(&cpsw->napi_tx);
885
886		if (cpsw->tx_irq_disabled) {
887			cpsw->tx_irq_disabled = false;
888			enable_irq(cpsw->irqs_table[1]);
889		}
890
891		if (cpsw->rx_irq_disabled) {
892			cpsw->rx_irq_disabled = false;
893			enable_irq(cpsw->irqs_table[0]);
894		}
895	}
896
897	cpsw_restore(priv);
898
899	/* Enable Interrupt pacing if configured */
900	if (cpsw->coal_intvl != 0) {
901		struct ethtool_coalesce coal;
902
903		coal.rx_coalesce_usecs = cpsw->coal_intvl;
904		cpsw_set_coalesce(ndev, &coal);
905	}
906
907	cpdma_ctlr_start(cpsw->dma);
908	cpsw_intr_enable(cpsw);
909	cpsw->usage_count++;
910
911	return 0;
912
913err_cleanup:
914	cpsw_ndo_stop(ndev);
915
916pm_cleanup:
917	pm_runtime_put_sync(cpsw->dev);
918	return ret;
919}
920
921static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
922				       struct net_device *ndev)
923{
924	struct cpsw_priv *priv = netdev_priv(ndev);
925	struct cpsw_common *cpsw = priv->cpsw;
926	struct cpts *cpts = cpsw->cpts;
927	struct netdev_queue *txq;
928	struct cpdma_chan *txch;
929	int ret, q_idx;
930
931	if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
932		cpsw_err(priv, tx_err, "packet pad failed\n");
933		ndev->stats.tx_dropped++;
934		return NET_XMIT_DROP;
935	}
936
937	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
938	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
939		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
940
941	q_idx = skb_get_queue_mapping(skb);
942	if (q_idx >= cpsw->tx_ch_num)
943		q_idx = q_idx % cpsw->tx_ch_num;
944
945	txch = cpsw->txv[q_idx].ch;
946	txq = netdev_get_tx_queue(ndev, q_idx);
947	skb_tx_timestamp(skb);
948	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
949				priv->emac_port);
950	if (unlikely(ret != 0)) {
951		cpsw_err(priv, tx_err, "desc submit failed\n");
952		goto fail;
953	}
954
955	/* If there is no more tx desc left free then we need to
956	 * tell the kernel to stop sending us tx frames.
957	 */
958	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
959		netif_tx_stop_queue(txq);
960
961		/* Barrier, so that stop_queue visible to other cpus */
962		smp_mb__after_atomic();
963
964		if (cpdma_check_free_tx_desc(txch))
965			netif_tx_wake_queue(txq);
966	}
967
968	return NETDEV_TX_OK;
969fail:
970	ndev->stats.tx_dropped++;
971	netif_tx_stop_queue(txq);
972
973	/* Barrier, so that stop_queue visible to other cpus */
974	smp_mb__after_atomic();
975
976	if (cpdma_check_free_tx_desc(txch))
977		netif_tx_wake_queue(txq);
978
979	return NETDEV_TX_BUSY;
980}
981
982static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
983{
984	struct sockaddr *addr = (struct sockaddr *)p;
985	struct cpsw_priv *priv = netdev_priv(ndev);
986	struct cpsw_common *cpsw = priv->cpsw;
987	int ret, slave_no;
988	int flags = 0;
989	u16 vid = 0;
990
991	slave_no = cpsw_slave_index(cpsw, priv);
992	if (!is_valid_ether_addr(addr->sa_data))
993		return -EADDRNOTAVAIL;
994
995	ret = pm_runtime_get_sync(cpsw->dev);
996	if (ret < 0) {
997		pm_runtime_put_noidle(cpsw->dev);
998		return ret;
999	}
1000
1001	vid = cpsw->slaves[slave_no].port_vlan;
1002	flags = ALE_VLAN | ALE_SECURE;
1003
1004	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1005			   flags, vid);
1006	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1007			   flags, vid);
1008
1009	ether_addr_copy(priv->mac_addr, addr->sa_data);
1010	ether_addr_copy(ndev->dev_addr, priv->mac_addr);
1011	cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1012
1013	pm_runtime_put(cpsw->dev);
1014
1015	return 0;
1016}
1017
1018static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1019				     __be16 proto, u16 vid)
1020{
1021	struct cpsw_priv *priv = netdev_priv(ndev);
1022	struct cpsw_common *cpsw = priv->cpsw;
1023	int ret;
1024	int i;
1025
1026	if (cpsw_is_switch_en(cpsw)) {
1027		dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1028		return 0;
1029	}
1030
1031	if (vid == cpsw->data.default_vlan)
1032		return 0;
1033
1034	ret = pm_runtime_get_sync(cpsw->dev);
1035	if (ret < 0) {
1036		pm_runtime_put_noidle(cpsw->dev);
1037		return ret;
1038	}
1039
1040	/* reset the return code as pm_runtime_get_sync() can return
1041	 * non zero values as well.
1042	 */
1043	ret = 0;
1044	for (i = 0; i < cpsw->data.slaves; i++) {
1045		if (cpsw->slaves[i].ndev &&
1046		    vid == cpsw->slaves[i].port_vlan) {
1047			ret = -EINVAL;
1048			goto err;
1049		}
1050	}
1051
1052	dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1053	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1054	if (ret)
1055		dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1056	ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1057				 HOST_PORT_NUM, ALE_VLAN, vid);
1058	if (ret)
1059		dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1060			ret);
1061	ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1062				 0, ALE_VLAN, vid);
1063	if (ret)
1064		dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1065			ret);
1066	cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1067	ret = 0;
1068err:
1069	pm_runtime_put(cpsw->dev);
1070	return ret;
1071}
1072
1073static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1074				       size_t len)
1075{
1076	struct cpsw_priv *priv = netdev_priv(ndev);
1077	int err;
1078
1079	err = snprintf(name, len, "p%d", priv->emac_port);
1080
1081	if (err >= len)
1082		return -EINVAL;
1083
1084	return 0;
1085}
1086
1087#ifdef CONFIG_NET_POLL_CONTROLLER
1088static void cpsw_ndo_poll_controller(struct net_device *ndev)
1089{
1090	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1091
1092	cpsw_intr_disable(cpsw);
1093	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1094	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1095	cpsw_intr_enable(cpsw);
1096}
1097#endif
1098
1099static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1100			     struct xdp_frame **frames, u32 flags)
1101{
1102	struct cpsw_priv *priv = netdev_priv(ndev);
1103	struct xdp_frame *xdpf;
1104	int i, drops = 0;
1105
1106	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1107		return -EINVAL;
1108
1109	for (i = 0; i < n; i++) {
1110		xdpf = frames[i];
1111		if (xdpf->len < READ_ONCE(priv->tx_packet_min)) {
1112			xdp_return_frame_rx_napi(xdpf);
1113			drops++;
1114			continue;
1115		}
1116
1117		if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1118			drops++;
1119	}
1120
1121	return n - drops;
1122}
1123
1124static int cpsw_get_port_parent_id(struct net_device *ndev,
1125				   struct netdev_phys_item_id *ppid)
1126{
1127	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1128
1129	ppid->id_len = sizeof(cpsw->base_mac);
1130	memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1131
1132	return 0;
1133}
1134
1135static const struct net_device_ops cpsw_netdev_ops = {
1136	.ndo_open		= cpsw_ndo_open,
1137	.ndo_stop		= cpsw_ndo_stop,
1138	.ndo_start_xmit		= cpsw_ndo_start_xmit,
1139	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
1140	.ndo_do_ioctl		= cpsw_ndo_ioctl,
1141	.ndo_validate_addr	= eth_validate_addr,
1142	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
1143	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
1144	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
1145#ifdef CONFIG_NET_POLL_CONTROLLER
1146	.ndo_poll_controller	= cpsw_ndo_poll_controller,
1147#endif
1148	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
1149	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
1150	.ndo_setup_tc           = cpsw_ndo_setup_tc,
1151	.ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1152	.ndo_bpf		= cpsw_ndo_bpf,
1153	.ndo_xdp_xmit		= cpsw_ndo_xdp_xmit,
1154	.ndo_get_port_parent_id	= cpsw_get_port_parent_id,
1155};
1156
1157static void cpsw_get_drvinfo(struct net_device *ndev,
1158			     struct ethtool_drvinfo *info)
1159{
1160	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1161	struct platform_device *pdev;
1162
1163	pdev = to_platform_device(cpsw->dev);
1164	strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1165	strlcpy(info->version, "2.0", sizeof(info->version));
1166	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1167}
1168
1169static int cpsw_set_pauseparam(struct net_device *ndev,
1170			       struct ethtool_pauseparam *pause)
1171{
1172	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1173	struct cpsw_priv *priv = netdev_priv(ndev);
1174	int slave_no;
1175
1176	slave_no = cpsw_slave_index(cpsw, priv);
1177	if (!cpsw->slaves[slave_no].phy)
1178		return -EINVAL;
1179
1180	if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1181		return -EINVAL;
1182
1183	priv->rx_pause = pause->rx_pause ? true : false;
1184	priv->tx_pause = pause->tx_pause ? true : false;
1185
1186	phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1187			   priv->rx_pause, priv->tx_pause);
1188
1189	return 0;
1190}
1191
1192static int cpsw_set_channels(struct net_device *ndev,
1193			     struct ethtool_channels *chs)
1194{
1195	return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1196}
1197
1198static const struct ethtool_ops cpsw_ethtool_ops = {
1199	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1200	.get_drvinfo		= cpsw_get_drvinfo,
1201	.get_msglevel		= cpsw_get_msglevel,
1202	.set_msglevel		= cpsw_set_msglevel,
1203	.get_link		= ethtool_op_get_link,
1204	.get_ts_info		= cpsw_get_ts_info,
1205	.get_coalesce		= cpsw_get_coalesce,
1206	.set_coalesce		= cpsw_set_coalesce,
1207	.get_sset_count		= cpsw_get_sset_count,
1208	.get_strings		= cpsw_get_strings,
1209	.get_ethtool_stats	= cpsw_get_ethtool_stats,
1210	.get_pauseparam		= cpsw_get_pauseparam,
1211	.set_pauseparam		= cpsw_set_pauseparam,
1212	.get_wol		= cpsw_get_wol,
1213	.set_wol		= cpsw_set_wol,
1214	.get_regs_len		= cpsw_get_regs_len,
1215	.get_regs		= cpsw_get_regs,
1216	.begin			= cpsw_ethtool_op_begin,
1217	.complete		= cpsw_ethtool_op_complete,
1218	.get_channels		= cpsw_get_channels,
1219	.set_channels		= cpsw_set_channels,
1220	.get_link_ksettings	= cpsw_get_link_ksettings,
1221	.set_link_ksettings	= cpsw_set_link_ksettings,
1222	.get_eee		= cpsw_get_eee,
1223	.set_eee		= cpsw_set_eee,
1224	.nway_reset		= cpsw_nway_reset,
1225	.get_ringparam		= cpsw_get_ringparam,
1226	.set_ringparam		= cpsw_set_ringparam,
1227};
1228
1229static int cpsw_probe_dt(struct cpsw_common *cpsw)
1230{
1231	struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1232	struct cpsw_platform_data *data = &cpsw->data;
1233	struct device *dev = cpsw->dev;
1234	int ret;
1235	u32 prop;
1236
1237	if (!node)
1238		return -EINVAL;
1239
1240	tmp_node = of_get_child_by_name(node, "ethernet-ports");
1241	if (!tmp_node)
1242		return -ENOENT;
1243	data->slaves = of_get_child_count(tmp_node);
1244	if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1245		of_node_put(tmp_node);
1246		return -ENOENT;
1247	}
1248
1249	data->active_slave = 0;
1250	data->channels = CPSW_MAX_QUEUES;
1251	data->dual_emac = true;
1252	data->bd_ram_size = CPSW_BD_RAM_SIZE;
1253	data->mac_control = 0;
1254
1255	data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1256					sizeof(struct cpsw_slave_data),
1257					GFP_KERNEL);
1258	if (!data->slave_data) {
1259		of_node_put(tmp_node);
1260		return -ENOMEM;
1261	}
1262
1263	/* Populate all the child nodes here...
1264	 */
1265	ret = devm_of_platform_populate(dev);
1266	/* We do not want to force this, as in some cases may not have child */
1267	if (ret)
1268		dev_warn(dev, "Doesn't have any child node\n");
1269
1270	for_each_child_of_node(tmp_node, port_np) {
1271		struct cpsw_slave_data *slave_data;
1272		const void *mac_addr;
1273		u32 port_id;
1274
1275		ret = of_property_read_u32(port_np, "reg", &port_id);
1276		if (ret < 0) {
1277			dev_err(dev, "%pOF error reading port_id %d\n",
1278				port_np, ret);
1279			goto err_node_put;
1280		}
1281
1282		if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1283			dev_err(dev, "%pOF has invalid port_id %u\n",
1284				port_np, port_id);
1285			ret = -EINVAL;
1286			goto err_node_put;
1287		}
1288
1289		slave_data = &data->slave_data[port_id - 1];
1290
1291		slave_data->disabled = !of_device_is_available(port_np);
1292		if (slave_data->disabled)
1293			continue;
1294
1295		slave_data->slave_node = port_np;
1296		slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1297		if (IS_ERR(slave_data->ifphy)) {
1298			ret = PTR_ERR(slave_data->ifphy);
1299			dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1300				port_np, ret);
1301			goto err_node_put;
1302		}
1303
1304		if (of_phy_is_fixed_link(port_np)) {
1305			ret = of_phy_register_fixed_link(port_np);
1306			if (ret) {
1307				if (ret != -EPROBE_DEFER)
1308					dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1309						port_np, ret);
1310				goto err_node_put;
1311			}
1312			slave_data->phy_node = of_node_get(port_np);
1313		} else {
1314			slave_data->phy_node =
1315				of_parse_phandle(port_np, "phy-handle", 0);
1316		}
1317
1318		if (!slave_data->phy_node) {
1319			dev_err(dev, "%pOF no phy found\n", port_np);
1320			ret = -ENODEV;
1321			goto err_node_put;
1322		}
1323
1324		ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1325		if (ret) {
1326			dev_err(dev, "%pOF read phy-mode err %d\n",
1327				port_np, ret);
1328			goto err_node_put;
1329		}
1330
1331		mac_addr = of_get_mac_address(port_np);
1332		if (!IS_ERR(mac_addr)) {
1333			ether_addr_copy(slave_data->mac_addr, mac_addr);
1334		} else {
1335			ret = ti_cm_get_macid(dev, port_id - 1,
1336					      slave_data->mac_addr);
1337			if (ret)
1338				goto err_node_put;
1339		}
1340
1341		if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1342					 &prop)) {
1343			dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1344				port_np);
1345			slave_data->dual_emac_res_vlan = port_id;
1346			dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1347				port_np, slave_data->dual_emac_res_vlan);
1348		} else {
1349			slave_data->dual_emac_res_vlan = prop;
1350		}
1351	}
1352
1353	of_node_put(tmp_node);
1354	return 0;
1355
1356err_node_put:
1357	of_node_put(port_np);
1358	of_node_put(tmp_node);
1359	return ret;
1360}
1361
1362static void cpsw_remove_dt(struct cpsw_common *cpsw)
1363{
1364	struct cpsw_platform_data *data = &cpsw->data;
1365	int i = 0;
1366
1367	for (i = 0; i < cpsw->data.slaves; i++) {
1368		struct cpsw_slave_data *slave_data = &data->slave_data[i];
1369		struct device_node *port_np = slave_data->phy_node;
1370
1371		if (port_np) {
1372			if (of_phy_is_fixed_link(port_np))
1373				of_phy_deregister_fixed_link(port_np);
1374
1375			of_node_put(port_np);
1376		}
1377	}
1378}
1379
1380static int cpsw_create_ports(struct cpsw_common *cpsw)
1381{
1382	struct cpsw_platform_data *data = &cpsw->data;
1383	struct net_device *ndev, *napi_ndev = NULL;
1384	struct device *dev = cpsw->dev;
1385	struct cpsw_priv *priv;
1386	int ret = 0, i = 0;
1387
1388	for (i = 0; i < cpsw->data.slaves; i++) {
1389		struct cpsw_slave_data *slave_data = &data->slave_data[i];
1390
1391		if (slave_data->disabled)
1392			continue;
1393
1394		ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1395					       CPSW_MAX_QUEUES,
1396					       CPSW_MAX_QUEUES);
1397		if (!ndev) {
1398			dev_err(dev, "error allocating net_device\n");
1399			return -ENOMEM;
1400		}
1401
1402		priv = netdev_priv(ndev);
1403		priv->cpsw = cpsw;
1404		priv->ndev = ndev;
1405		priv->dev  = dev;
1406		priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1407		priv->emac_port = i + 1;
1408		priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
1409
1410		if (is_valid_ether_addr(slave_data->mac_addr)) {
1411			ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1412			dev_info(cpsw->dev, "Detected MACID = %pM\n",
1413				 priv->mac_addr);
1414		} else {
1415			eth_random_addr(slave_data->mac_addr);
1416			dev_info(cpsw->dev, "Random MACID = %pM\n",
1417				 priv->mac_addr);
1418		}
1419		ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
1420		ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1421
1422		cpsw->slaves[i].ndev = ndev;
1423
1424		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1425				  NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
1426
1427		ndev->netdev_ops = &cpsw_netdev_ops;
1428		ndev->ethtool_ops = &cpsw_ethtool_ops;
1429		SET_NETDEV_DEV(ndev, dev);
1430
1431		if (!napi_ndev) {
1432			/* CPSW Host port CPDMA interface is shared between
1433			 * ports and there is only one TX and one RX IRQs
1434			 * available for all possible TX and RX channels
1435			 * accordingly.
1436			 */
1437			netif_napi_add(ndev, &cpsw->napi_rx,
1438				       cpsw->quirk_irq ?
1439				       cpsw_rx_poll : cpsw_rx_mq_poll,
1440				       CPSW_POLL_WEIGHT);
1441			netif_tx_napi_add(ndev, &cpsw->napi_tx,
1442					  cpsw->quirk_irq ?
1443					  cpsw_tx_poll : cpsw_tx_mq_poll,
1444					  CPSW_POLL_WEIGHT);
1445		}
1446
1447		napi_ndev = ndev;
1448	}
1449
1450	return ret;
1451}
1452
1453static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1454{
1455	int i = 0;
1456
1457	for (i = 0; i < cpsw->data.slaves; i++) {
1458		if (!cpsw->slaves[i].ndev)
1459			continue;
1460
1461		unregister_netdev(cpsw->slaves[i].ndev);
1462	}
1463}
1464
1465static int cpsw_register_ports(struct cpsw_common *cpsw)
1466{
1467	int ret = 0, i = 0;
1468
1469	for (i = 0; i < cpsw->data.slaves; i++) {
1470		if (!cpsw->slaves[i].ndev)
1471			continue;
1472
1473		/* register the network device */
1474		ret = register_netdev(cpsw->slaves[i].ndev);
1475		if (ret) {
1476			dev_err(cpsw->dev,
1477				"cpsw: err registering net device%d\n", i);
1478			cpsw->slaves[i].ndev = NULL;
1479			break;
1480		}
1481	}
1482
1483	if (ret)
1484		cpsw_unregister_ports(cpsw);
1485	return ret;
1486}
1487
1488bool cpsw_port_dev_check(const struct net_device *ndev)
1489{
1490	if (ndev->netdev_ops == &cpsw_netdev_ops) {
1491		struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1492
1493		return !cpsw->data.dual_emac;
1494	}
1495
1496	return false;
1497}
1498
1499static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1500{
1501	int set_val = 0;
1502	int i;
1503
1504	if (!cpsw->ale_bypass &&
1505	    (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1506		set_val = 1;
1507
1508	dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1509
1510	for (i = 0; i < cpsw->data.slaves; i++) {
1511		struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1512		struct cpsw_priv *priv = netdev_priv(sl_ndev);
1513
1514		priv->offload_fwd_mark = set_val;
1515	}
1516}
1517
1518static int cpsw_netdevice_port_link(struct net_device *ndev,
1519				    struct net_device *br_ndev)
1520{
1521	struct cpsw_priv *priv = netdev_priv(ndev);
1522	struct cpsw_common *cpsw = priv->cpsw;
1523
1524	if (!cpsw->br_members) {
1525		cpsw->hw_bridge_dev = br_ndev;
1526	} else {
1527		/* This is adding the port to a second bridge, this is
1528		 * unsupported
1529		 */
1530		if (cpsw->hw_bridge_dev != br_ndev)
1531			return -EOPNOTSUPP;
1532	}
1533
1534	cpsw->br_members |= BIT(priv->emac_port);
1535
1536	cpsw_port_offload_fwd_mark_update(cpsw);
1537
1538	return NOTIFY_DONE;
1539}
1540
1541static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1542{
1543	struct cpsw_priv *priv = netdev_priv(ndev);
1544	struct cpsw_common *cpsw = priv->cpsw;
1545
1546	cpsw->br_members &= ~BIT(priv->emac_port);
1547
1548	cpsw_port_offload_fwd_mark_update(cpsw);
1549
1550	if (!cpsw->br_members)
1551		cpsw->hw_bridge_dev = NULL;
1552}
1553
1554/* netdev notifier */
1555static int cpsw_netdevice_event(struct notifier_block *unused,
1556				unsigned long event, void *ptr)
1557{
1558	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1559	struct netdev_notifier_changeupper_info *info;
1560	int ret = NOTIFY_DONE;
1561
1562	if (!cpsw_port_dev_check(ndev))
1563		return NOTIFY_DONE;
1564
1565	switch (event) {
1566	case NETDEV_CHANGEUPPER:
1567		info = ptr;
1568
1569		if (netif_is_bridge_master(info->upper_dev)) {
1570			if (info->linking)
1571				ret = cpsw_netdevice_port_link(ndev,
1572							       info->upper_dev);
1573			else
1574				cpsw_netdevice_port_unlink(ndev);
1575		}
1576		break;
1577	default:
1578		return NOTIFY_DONE;
1579	}
1580
1581	return notifier_from_errno(ret);
1582}
1583
1584static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1585	.notifier_call = cpsw_netdevice_event,
1586};
1587
1588static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1589{
1590	int ret = 0;
1591
1592	ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1593	if (ret) {
1594		dev_err(cpsw->dev, "can't register netdevice notifier\n");
1595		return ret;
1596	}
1597
1598	ret = cpsw_switchdev_register_notifiers(cpsw);
1599	if (ret)
1600		unregister_netdevice_notifier(&cpsw_netdevice_nb);
1601
1602	return ret;
1603}
1604
1605static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1606{
1607	cpsw_switchdev_unregister_notifiers(cpsw);
1608	unregister_netdevice_notifier(&cpsw_netdevice_nb);
1609}
1610
1611static const struct devlink_ops cpsw_devlink_ops = {
1612};
1613
1614static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1615				   struct devlink_param_gset_ctx *ctx)
1616{
1617	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1618	struct cpsw_common *cpsw = dl_priv->cpsw;
1619
1620	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1621
1622	if (id != CPSW_DL_PARAM_SWITCH_MODE)
1623		return  -EOPNOTSUPP;
1624
1625	ctx->val.vbool = !cpsw->data.dual_emac;
1626
1627	return 0;
1628}
1629
1630static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1631				   struct devlink_param_gset_ctx *ctx)
1632{
1633	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1634	struct cpsw_common *cpsw = dl_priv->cpsw;
1635	int vlan = cpsw->data.default_vlan;
1636	bool switch_en = ctx->val.vbool;
1637	bool if_running = false;
1638	int i;
1639
1640	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1641
1642	if (id != CPSW_DL_PARAM_SWITCH_MODE)
1643		return  -EOPNOTSUPP;
1644
1645	if (switch_en == !cpsw->data.dual_emac)
1646		return 0;
1647
1648	if (!switch_en && cpsw->br_members) {
1649		dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1650		return -EINVAL;
1651	}
1652
1653	rtnl_lock();
1654
1655	for (i = 0; i < cpsw->data.slaves; i++) {
1656		struct cpsw_slave *slave = &cpsw->slaves[i];
1657		struct net_device *sl_ndev = slave->ndev;
1658
1659		if (!sl_ndev || !netif_running(sl_ndev))
1660			continue;
1661
1662		if_running = true;
1663	}
1664
1665	if (!if_running) {
1666		/* all ndevs are down */
1667		cpsw->data.dual_emac = !switch_en;
1668		for (i = 0; i < cpsw->data.slaves; i++) {
1669			struct cpsw_slave *slave = &cpsw->slaves[i];
1670			struct net_device *sl_ndev = slave->ndev;
1671
1672			if (!sl_ndev)
1673				continue;
1674
1675			if (switch_en)
1676				vlan = cpsw->data.default_vlan;
1677			else
1678				vlan = slave->data->dual_emac_res_vlan;
1679			slave->port_vlan = vlan;
1680		}
1681		goto exit;
1682	}
1683
1684	if (switch_en) {
1685		dev_info(cpsw->dev, "Enable switch mode\n");
1686
1687		/* enable bypass - no forwarding; all traffic goes to Host */
1688		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1689
1690		/* clean up ALE table */
1691		cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1692		cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1693
1694		cpsw_init_host_port_switch(cpsw);
1695
1696		for (i = 0; i < cpsw->data.slaves; i++) {
1697			struct cpsw_slave *slave = &cpsw->slaves[i];
1698			struct net_device *sl_ndev = slave->ndev;
1699			struct cpsw_priv *priv;
1700
1701			if (!sl_ndev)
1702				continue;
1703
1704			priv = netdev_priv(sl_ndev);
1705			slave->port_vlan = vlan;
1706			WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
1707			if (netif_running(sl_ndev))
1708				cpsw_port_add_switch_def_ale_entries(priv,
1709								     slave);
1710		}
1711
1712		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1713		cpsw->data.dual_emac = false;
1714	} else {
1715		dev_info(cpsw->dev, "Disable switch mode\n");
1716
1717		/* enable bypass - no forwarding; all traffic goes to Host */
1718		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1719
1720		cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1721		cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1722
1723		cpsw_init_host_port_dual_mac(cpsw);
1724
1725		for (i = 0; i < cpsw->data.slaves; i++) {
1726			struct cpsw_slave *slave = &cpsw->slaves[i];
1727			struct net_device *sl_ndev = slave->ndev;
1728			struct cpsw_priv *priv;
1729
1730			if (!sl_ndev)
1731				continue;
1732
1733			priv = netdev_priv(slave->ndev);
1734			slave->port_vlan = slave->data->dual_emac_res_vlan;
1735			WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
1736			cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1737		}
1738
1739		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1740		cpsw->data.dual_emac = true;
1741	}
1742exit:
1743	rtnl_unlock();
1744
1745	return 0;
1746}
1747
1748static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1749				struct devlink_param_gset_ctx *ctx)
1750{
1751	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1752	struct cpsw_common *cpsw = dl_priv->cpsw;
1753
1754	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1755
1756	switch (id) {
1757	case CPSW_DL_PARAM_ALE_BYPASS:
1758		ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1759		break;
1760	default:
1761		return -EOPNOTSUPP;
1762	}
1763
1764	return 0;
1765}
1766
1767static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1768				struct devlink_param_gset_ctx *ctx)
1769{
1770	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1771	struct cpsw_common *cpsw = dl_priv->cpsw;
1772	int ret = -EOPNOTSUPP;
1773
1774	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1775
1776	switch (id) {
1777	case CPSW_DL_PARAM_ALE_BYPASS:
1778		ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1779					   ctx->val.vbool);
1780		if (!ret) {
1781			cpsw->ale_bypass = ctx->val.vbool;
1782			cpsw_port_offload_fwd_mark_update(cpsw);
1783		}
1784		break;
1785	default:
1786		return -EOPNOTSUPP;
1787	}
1788
1789	return 0;
1790}
1791
1792static const struct devlink_param cpsw_devlink_params[] = {
1793	DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1794			     "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1795			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1796			     cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1797			     NULL),
1798	DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1799			     "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1800			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1801			     cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1802};
1803
1804static int cpsw_register_devlink(struct cpsw_common *cpsw)
1805{
1806	struct device *dev = cpsw->dev;
1807	struct cpsw_devlink *dl_priv;
1808	int ret = 0;
1809
1810	cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
1811	if (!cpsw->devlink)
1812		return -ENOMEM;
1813
1814	dl_priv = devlink_priv(cpsw->devlink);
1815	dl_priv->cpsw = cpsw;
1816
1817	ret = devlink_register(cpsw->devlink, dev);
1818	if (ret) {
1819		dev_err(dev, "DL reg fail ret:%d\n", ret);
1820		goto dl_free;
1821	}
1822
1823	ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1824				      ARRAY_SIZE(cpsw_devlink_params));
1825	if (ret) {
1826		dev_err(dev, "DL params reg fail ret:%d\n", ret);
1827		goto dl_unreg;
1828	}
1829
1830	devlink_params_publish(cpsw->devlink);
1831	return ret;
1832
1833dl_unreg:
1834	devlink_unregister(cpsw->devlink);
1835dl_free:
1836	devlink_free(cpsw->devlink);
1837	return ret;
1838}
1839
1840static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1841{
1842	devlink_params_unpublish(cpsw->devlink);
1843	devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1844				  ARRAY_SIZE(cpsw_devlink_params));
1845	devlink_unregister(cpsw->devlink);
1846	devlink_free(cpsw->devlink);
1847}
1848
1849static const struct of_device_id cpsw_of_mtable[] = {
1850	{ .compatible = "ti,cpsw-switch"},
1851	{ .compatible = "ti,am335x-cpsw-switch"},
1852	{ .compatible = "ti,am4372-cpsw-switch"},
1853	{ .compatible = "ti,dra7-cpsw-switch"},
1854	{ /* sentinel */ },
1855};
1856MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1857
1858static const struct soc_device_attribute cpsw_soc_devices[] = {
1859	{ .family = "AM33xx", .revision = "ES1.0"},
1860	{ /* sentinel */ }
1861};
1862
1863static int cpsw_probe(struct platform_device *pdev)
1864{
1865	const struct soc_device_attribute *soc;
1866	struct device *dev = &pdev->dev;
1867	struct cpsw_common *cpsw;
1868	struct resource *ss_res;
1869	struct gpio_descs *mode;
1870	void __iomem *ss_regs;
1871	int ret = 0, ch;
1872	struct clk *clk;
1873	int irq;
1874
1875	cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1876	if (!cpsw)
1877		return -ENOMEM;
1878
1879	cpsw_slave_index = cpsw_slave_index_priv;
1880
1881	cpsw->dev = dev;
1882
1883	cpsw->slaves = devm_kcalloc(dev,
1884				    CPSW_SLAVE_PORTS_NUM,
1885				    sizeof(struct cpsw_slave),
1886				    GFP_KERNEL);
1887	if (!cpsw->slaves)
1888		return -ENOMEM;
1889
1890	mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1891	if (IS_ERR(mode)) {
1892		ret = PTR_ERR(mode);
1893		dev_err(dev, "gpio request failed, ret %d\n", ret);
1894		return ret;
1895	}
1896
1897	clk = devm_clk_get(dev, "fck");
1898	if (IS_ERR(clk)) {
1899		ret = PTR_ERR(clk);
1900		dev_err(dev, "fck is not found %d\n", ret);
1901		return ret;
1902	}
1903	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1904
1905	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1906	ss_regs = devm_ioremap_resource(dev, ss_res);
1907	if (IS_ERR(ss_regs)) {
1908		ret = PTR_ERR(ss_regs);
1909		return ret;
1910	}
1911	cpsw->regs = ss_regs;
1912
1913	irq = platform_get_irq_byname(pdev, "rx");
1914	if (irq < 0)
1915		return irq;
1916	cpsw->irqs_table[0] = irq;
1917
1918	irq = platform_get_irq_byname(pdev, "tx");
1919	if (irq < 0)
1920		return irq;
1921	cpsw->irqs_table[1] = irq;
1922
1923	irq = platform_get_irq_byname(pdev, "misc");
1924	if (irq <= 0)
1925		return irq;
1926	cpsw->misc_irq = irq;
1927
1928	platform_set_drvdata(pdev, cpsw);
1929	/* This may be required here for child devices. */
1930	pm_runtime_enable(dev);
1931
1932	/* Need to enable clocks with runtime PM api to access module
1933	 * registers
1934	 */
1935	ret = pm_runtime_get_sync(dev);
1936	if (ret < 0) {
1937		pm_runtime_put_noidle(dev);
1938		pm_runtime_disable(dev);
1939		return ret;
1940	}
1941
1942	ret = cpsw_probe_dt(cpsw);
1943	if (ret)
1944		goto clean_dt_ret;
1945
1946	soc = soc_device_match(cpsw_soc_devices);
1947	if (soc)
1948		cpsw->quirk_irq = true;
1949
1950	cpsw->rx_packet_max = rx_packet_max;
1951	cpsw->descs_pool_size = descs_pool_size;
1952	eth_random_addr(cpsw->base_mac);
1953
1954	ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1955			       (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1956			       descs_pool_size);
1957	if (ret)
1958		goto clean_dt_ret;
1959
1960	cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1961			ss_regs + CPSW1_WR_OFFSET :
1962			ss_regs + CPSW2_WR_OFFSET;
1963
1964	ch = cpsw->quirk_irq ? 0 : 7;
1965	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1966	if (IS_ERR(cpsw->txv[0].ch)) {
1967		dev_err(dev, "error initializing tx dma channel\n");
1968		ret = PTR_ERR(cpsw->txv[0].ch);
1969		goto clean_cpts;
1970	}
1971
1972	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1973	if (IS_ERR(cpsw->rxv[0].ch)) {
1974		dev_err(dev, "error initializing rx dma channel\n");
1975		ret = PTR_ERR(cpsw->rxv[0].ch);
1976		goto clean_cpts;
1977	}
1978	cpsw_split_res(cpsw);
1979
1980	/* setup netdevs */
1981	ret = cpsw_create_ports(cpsw);
1982	if (ret)
1983		goto clean_unregister_netdev;
1984
1985	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
1986	 * MISC IRQs which are always kept disabled with this driver so
1987	 * we will not request them.
1988	 *
1989	 * If anyone wants to implement support for those, make sure to
1990	 * first request and append them to irqs_table array.
1991	 */
1992
1993	ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1994			       0, dev_name(dev), cpsw);
1995	if (ret < 0) {
1996		dev_err(dev, "error attaching irq (%d)\n", ret);
1997		goto clean_unregister_netdev;
1998	}
1999
2000	ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
2001			       0, dev_name(dev), cpsw);
2002	if (ret < 0) {
2003		dev_err(dev, "error attaching irq (%d)\n", ret);
2004		goto clean_unregister_netdev;
2005	}
2006
2007	if (!cpsw->cpts)
2008		goto skip_cpts;
2009
2010	ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
2011			       0, dev_name(&pdev->dev), cpsw);
2012	if (ret < 0) {
2013		dev_err(dev, "error attaching misc irq (%d)\n", ret);
2014		goto clean_unregister_netdev;
2015	}
2016
2017	/* Enable misc CPTS evnt_pend IRQ */
2018	cpts_set_irqpoll(cpsw->cpts, false);
2019
2020skip_cpts:
2021	ret = cpsw_register_notifiers(cpsw);
2022	if (ret)
2023		goto clean_unregister_netdev;
2024
2025	ret = cpsw_register_devlink(cpsw);
2026	if (ret)
2027		goto clean_unregister_notifiers;
2028
2029	ret = cpsw_register_ports(cpsw);
2030	if (ret)
2031		goto clean_unregister_notifiers;
2032
2033	dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2034		   &ss_res->start, descs_pool_size,
2035		   cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2036		   CPSW_MINOR_VERSION(cpsw->version),
2037		   CPSW_RTL_VERSION(cpsw->version));
2038
2039	pm_runtime_put(dev);
2040
2041	return 0;
2042
2043clean_unregister_notifiers:
2044	cpsw_unregister_notifiers(cpsw);
2045clean_unregister_netdev:
2046	cpsw_unregister_ports(cpsw);
2047clean_cpts:
2048	cpts_release(cpsw->cpts);
2049	cpdma_ctlr_destroy(cpsw->dma);
2050clean_dt_ret:
2051	cpsw_remove_dt(cpsw);
2052	pm_runtime_put_sync(dev);
2053	pm_runtime_disable(dev);
2054	return ret;
2055}
2056
2057static int cpsw_remove(struct platform_device *pdev)
2058{
2059	struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2060	int ret;
2061
2062	ret = pm_runtime_get_sync(&pdev->dev);
2063	if (ret < 0) {
2064		pm_runtime_put_noidle(&pdev->dev);
2065		return ret;
2066	}
2067
2068	cpsw_unregister_notifiers(cpsw);
2069	cpsw_unregister_devlink(cpsw);
2070	cpsw_unregister_ports(cpsw);
2071
2072	cpts_release(cpsw->cpts);
2073	cpdma_ctlr_destroy(cpsw->dma);
2074	cpsw_remove_dt(cpsw);
2075	pm_runtime_put_sync(&pdev->dev);
2076	pm_runtime_disable(&pdev->dev);
2077	return 0;
2078}
2079
2080static int __maybe_unused cpsw_suspend(struct device *dev)
2081{
2082	struct cpsw_common *cpsw = dev_get_drvdata(dev);
2083	int i;
2084
2085	rtnl_lock();
2086
2087	for (i = 0; i < cpsw->data.slaves; i++) {
2088		struct net_device *ndev = cpsw->slaves[i].ndev;
2089
2090		if (!(ndev && netif_running(ndev)))
2091			continue;
2092
2093		cpsw_ndo_stop(ndev);
2094	}
2095
2096	rtnl_unlock();
2097
2098	/* Select sleep pin state */
2099	pinctrl_pm_select_sleep_state(dev);
2100
2101	return 0;
2102}
2103
2104static int __maybe_unused cpsw_resume(struct device *dev)
2105{
2106	struct cpsw_common *cpsw = dev_get_drvdata(dev);
2107	int i;
2108
2109	/* Select default pin state */
2110	pinctrl_pm_select_default_state(dev);
2111
2112	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2113	rtnl_lock();
2114
2115	for (i = 0; i < cpsw->data.slaves; i++) {
2116		struct net_device *ndev = cpsw->slaves[i].ndev;
2117
2118		if (!(ndev && netif_running(ndev)))
2119			continue;
2120
2121		cpsw_ndo_open(ndev);
2122	}
2123
2124	rtnl_unlock();
2125
2126	return 0;
2127}
2128
2129static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2130
2131static struct platform_driver cpsw_driver = {
2132	.driver = {
2133		.name	 = "cpsw-switch",
2134		.pm	 = &cpsw_pm_ops,
2135		.of_match_table = cpsw_of_mtable,
2136	},
2137	.probe = cpsw_probe,
2138	.remove = cpsw_remove,
2139};
2140
2141module_platform_driver(cpsw_driver);
2142
2143MODULE_LICENSE("GPL");
2144MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
2145