1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2020 MediaTek Corporation
4 * Copyright (c) 2020 BayLibre SAS
5 *
6 * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7 */
8
9#include <linux/bits.h>
10#include <linux/clk.h>
11#include <linux/compiler.h>
12#include <linux/dma-mapping.h>
13#include <linux/etherdevice.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/mii.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_mdio.h>
21#include <linux/of_net.h>
22#include <linux/platform_device.h>
23#include <linux/pm.h>
24#include <linux/regmap.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27
28#define MTK_STAR_DRVNAME			"mtk_star_emac"
29
30#define MTK_STAR_WAIT_TIMEOUT			300
31#define MTK_STAR_MAX_FRAME_SIZE			1514
32#define MTK_STAR_SKB_ALIGNMENT			16
33#define MTK_STAR_HASHTABLE_MC_LIMIT		256
34#define MTK_STAR_HASHTABLE_SIZE_MAX		512
35#define MTK_STAR_DESC_NEEDED			(MAX_SKB_FRAGS + 4)
36
37/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38 * work for this controller.
39 */
40#define MTK_STAR_IP_ALIGN			2
41
42static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44
45/* PHY Control Register 0 */
46#define MTK_STAR_REG_PHY_CTRL0			0x0000
47#define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
48#define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
49#define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
50#define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
51#define MTK_STAR_OFF_PHY_CTRL0_PREG		8
52#define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
53#define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
54
55/* PHY Control Register 1 */
56#define MTK_STAR_REG_PHY_CTRL1			0x0004
57#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
58#define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
59#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
60#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
61#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
62#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
63#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
64#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
65#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
66
67/* MAC Configuration Register */
68#define MTK_STAR_REG_MAC_CFG			0x0008
69#define MTK_STAR_OFF_MAC_CFG_IPG		10
70#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
71#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
72#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
73#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
74#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
75#define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
76
77/* Flow-Control Configuration Register */
78#define MTK_STAR_REG_FC_CFG			0x000c
79#define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
80#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
81#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
82#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
83#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
84
85/* ARL Configuration Register */
86#define MTK_STAR_REG_ARL_CFG			0x0010
87#define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
88#define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
89
90/* MAC High and Low Bytes Registers */
91#define MTK_STAR_REG_MY_MAC_H			0x0014
92#define MTK_STAR_REG_MY_MAC_L			0x0018
93
94/* Hash Table Control Register */
95#define MTK_STAR_REG_HASH_CTRL			0x001c
96#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
97#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
98#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
99#define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
100#define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
101#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
102#define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
103
104/* TX DMA Control Register */
105#define MTK_STAR_REG_TX_DMA_CTRL		0x0034
106#define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
107#define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
108#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
109
110/* RX DMA Control Register */
111#define MTK_STAR_REG_RX_DMA_CTRL		0x0038
112#define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
113#define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
114#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
115
116/* DMA Address Registers */
117#define MTK_STAR_REG_TX_DPTR			0x003c
118#define MTK_STAR_REG_RX_DPTR			0x0040
119#define MTK_STAR_REG_TX_BASE_ADDR		0x0044
120#define MTK_STAR_REG_RX_BASE_ADDR		0x0048
121
122/* Interrupt Status Register */
123#define MTK_STAR_REG_INT_STS			0x0050
124#define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
125#define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
126#define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
127#define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
128
129/* Interrupt Mask Register */
130#define MTK_STAR_REG_INT_MASK			0x0054
131#define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
132
133/* Delay-Macro Register */
134#define MTK_STAR_REG_TEST0			0x0058
135#define MTK_STAR_BIT_INV_RX_CLK			BIT(30)
136#define MTK_STAR_BIT_INV_TX_CLK			BIT(31)
137
138/* Misc. Config Register */
139#define MTK_STAR_REG_TEST1			0x005c
140#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
141
142/* Extended Configuration Register */
143#define MTK_STAR_REG_EXT_CFG			0x0060
144#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
145#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
146#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
147
148/* EthSys Configuration Register */
149#define MTK_STAR_REG_SYS_CONF			0x0094
150#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
151#define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
152#define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
153
154/* MAC Clock Configuration Register */
155#define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
156#define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
157#define MTK_STAR_BIT_CLK_DIV_10			0x0a
158#define MTK_STAR_BIT_CLK_DIV_50			0x32
159
160/* Counter registers. */
161#define MTK_STAR_REG_C_RXOKPKT			0x0100
162#define MTK_STAR_REG_C_RXOKBYTE			0x0104
163#define MTK_STAR_REG_C_RXRUNT			0x0108
164#define MTK_STAR_REG_C_RXLONG			0x010c
165#define MTK_STAR_REG_C_RXDROP			0x0110
166#define MTK_STAR_REG_C_RXCRC			0x0114
167#define MTK_STAR_REG_C_RXARLDROP		0x0118
168#define MTK_STAR_REG_C_RXVLANDROP		0x011c
169#define MTK_STAR_REG_C_RXCSERR			0x0120
170#define MTK_STAR_REG_C_RXPAUSE			0x0124
171#define MTK_STAR_REG_C_TXOKPKT			0x0128
172#define MTK_STAR_REG_C_TXOKBYTE			0x012c
173#define MTK_STAR_REG_C_TXPAUSECOL		0x0130
174#define MTK_STAR_REG_C_TXRTY			0x0134
175#define MTK_STAR_REG_C_TXSKIP			0x0138
176#define MTK_STAR_REG_C_TX_ARP			0x013c
177#define MTK_STAR_REG_C_RX_RERR			0x01d8
178#define MTK_STAR_REG_C_RX_UNI			0x01dc
179#define MTK_STAR_REG_C_RX_MULTI			0x01e0
180#define MTK_STAR_REG_C_RX_BROAD			0x01e4
181#define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
182#define MTK_STAR_REG_C_TX_UNI			0x01ec
183#define MTK_STAR_REG_C_TX_MULTI			0x01f0
184#define MTK_STAR_REG_C_TX_BROAD			0x01f4
185#define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
186#define MTK_STAR_REG_C_TX_LATECOL		0x01fc
187#define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
188#define MTK_STAR_REG_C_RX_TWIST			0x0218
189
190/* Ethernet CFG Control */
191#define MTK_PERICFG_REG_NIC_CFG0_CON		0x03c4
192#define MTK_PERICFG_REG_NIC_CFG1_CON		0x03c8
193#define MTK_PERICFG_REG_NIC_CFG_CON_V2		0x0c10
194#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF	GENMASK(3, 0)
195#define MTK_PERICFG_BIT_NIC_CFG_CON_MII		0
196#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	1
197#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK		BIT(0)
198#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2	BIT(8)
199
200/* Represents the actual structure of descriptors used by the MAC. We can
201 * reuse the same structure for both TX and RX - the layout is the same, only
202 * the flags differ slightly.
203 */
204struct mtk_star_ring_desc {
205	/* Contains both the status flags as well as packet length. */
206	u32 status;
207	u32 data_ptr;
208	u32 vtag;
209	u32 reserved;
210};
211
212#define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
213#define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
214#define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
215#define MTK_STAR_DESC_BIT_INT			BIT(27)
216#define MTK_STAR_DESC_BIT_LS			BIT(28)
217#define MTK_STAR_DESC_BIT_FS			BIT(29)
218#define MTK_STAR_DESC_BIT_EOR			BIT(30)
219#define MTK_STAR_DESC_BIT_COWN			BIT(31)
220
221/* Helper structure for storing data read from/written to descriptors in order
222 * to limit reads from/writes to DMA memory.
223 */
224struct mtk_star_ring_desc_data {
225	unsigned int len;
226	unsigned int flags;
227	dma_addr_t dma_addr;
228	struct sk_buff *skb;
229};
230
231#define MTK_STAR_RING_NUM_DESCS			512
232#define MTK_STAR_TX_THRESH			(MTK_STAR_RING_NUM_DESCS / 4)
233#define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
234#define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
235#define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
236#define MTK_STAR_DMA_SIZE \
237		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
238
239struct mtk_star_ring {
240	struct mtk_star_ring_desc *descs;
241	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
242	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
243	unsigned int head;
244	unsigned int tail;
245};
246
247struct mtk_star_compat {
248	int (*set_interface_mode)(struct net_device *ndev);
249	unsigned char bit_clk_div;
250};
251
252struct mtk_star_priv {
253	struct net_device *ndev;
254
255	struct regmap *regs;
256	struct regmap *pericfg;
257
258	struct clk_bulk_data clks[MTK_STAR_NCLKS];
259
260	void *ring_base;
261	struct mtk_star_ring_desc *descs_base;
262	dma_addr_t dma_addr;
263	struct mtk_star_ring tx_ring;
264	struct mtk_star_ring rx_ring;
265
266	struct mii_bus *mii;
267	struct napi_struct tx_napi;
268	struct napi_struct rx_napi;
269
270	struct device_node *phy_node;
271	phy_interface_t phy_intf;
272	struct phy_device *phydev;
273	unsigned int link;
274	int speed;
275	int duplex;
276	int pause;
277	bool rmii_rxc;
278	bool rx_inv;
279	bool tx_inv;
280
281	const struct mtk_star_compat *compat_data;
282
283	/* Protects against concurrent descriptor access. */
284	spinlock_t lock;
285
286	struct rtnl_link_stats64 stats;
287};
288
289static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
290{
291	return priv->ndev->dev.parent;
292}
293
294static const struct regmap_config mtk_star_regmap_config = {
295	.reg_bits		= 32,
296	.val_bits		= 32,
297	.reg_stride		= 4,
298	.disable_locking	= true,
299};
300
301static void mtk_star_ring_init(struct mtk_star_ring *ring,
302			       struct mtk_star_ring_desc *descs)
303{
304	memset(ring, 0, sizeof(*ring));
305	ring->descs = descs;
306	ring->head = 0;
307	ring->tail = 0;
308}
309
310static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
311				  struct mtk_star_ring_desc_data *desc_data)
312{
313	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
314	unsigned int status;
315
316	status = READ_ONCE(desc->status);
317	dma_rmb(); /* Make sure we read the status bits before checking it. */
318
319	if (!(status & MTK_STAR_DESC_BIT_COWN))
320		return -1;
321
322	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
323	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
324	desc_data->dma_addr = ring->dma_addrs[ring->tail];
325	desc_data->skb = ring->skbs[ring->tail];
326
327	ring->dma_addrs[ring->tail] = 0;
328	ring->skbs[ring->tail] = NULL;
329
330	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
331
332	WRITE_ONCE(desc->data_ptr, 0);
333	WRITE_ONCE(desc->status, status);
334
335	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
336
337	return 0;
338}
339
340static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
341				    struct mtk_star_ring_desc_data *desc_data,
342				    unsigned int flags)
343{
344	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
345	unsigned int status;
346
347	status = READ_ONCE(desc->status);
348
349	ring->skbs[ring->head] = desc_data->skb;
350	ring->dma_addrs[ring->head] = desc_data->dma_addr;
351
352	status |= desc_data->len;
353	if (flags)
354		status |= flags;
355
356	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
357	WRITE_ONCE(desc->status, status);
358	status &= ~MTK_STAR_DESC_BIT_COWN;
359	/* Flush previous modifications before ownership change. */
360	dma_wmb();
361	WRITE_ONCE(desc->status, status);
362
363	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
364}
365
366static void
367mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
368			   struct mtk_star_ring_desc_data *desc_data)
369{
370	mtk_star_ring_push_head(ring, desc_data, 0);
371}
372
373static void
374mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
375			   struct mtk_star_ring_desc_data *desc_data)
376{
377	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
378					  MTK_STAR_DESC_BIT_LS |
379					  MTK_STAR_DESC_BIT_INT;
380
381	mtk_star_ring_push_head(ring, desc_data, flags);
382}
383
384static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
385{
386	u32 avail;
387
388	if (ring->tail > ring->head)
389		avail = ring->tail - ring->head - 1;
390	else
391		avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
392
393	return avail;
394}
395
396static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
397				      struct sk_buff *skb)
398{
399	struct device *dev = mtk_star_get_dev(priv);
400
401	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
402	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
403			      skb_tailroom(skb), DMA_FROM_DEVICE);
404}
405
406static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
407				  struct mtk_star_ring_desc_data *desc_data)
408{
409	struct device *dev = mtk_star_get_dev(priv);
410
411	dma_unmap_single(dev, desc_data->dma_addr,
412			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
413}
414
415static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
416				      struct sk_buff *skb)
417{
418	struct device *dev = mtk_star_get_dev(priv);
419
420	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
421}
422
423static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
424				  struct mtk_star_ring_desc_data *desc_data)
425{
426	struct device *dev = mtk_star_get_dev(priv);
427
428	return dma_unmap_single(dev, desc_data->dma_addr,
429				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
430}
431
432static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
433{
434	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
435			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
436}
437
438static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
439				    bool rx, bool tx)
440{
441	u32 value;
442
443	regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
444
445	if (tx)
446		value &= ~MTK_STAR_BIT_INT_STS_TNTC;
447	if (rx)
448		value &= ~MTK_STAR_BIT_INT_STS_FNRC;
449
450	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
451}
452
453static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
454				     bool rx, bool tx)
455{
456	u32 value;
457
458	regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
459
460	if (tx)
461		value |= MTK_STAR_BIT_INT_STS_TNTC;
462	if (rx)
463		value |= MTK_STAR_BIT_INT_STS_FNRC;
464
465	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
466}
467
468/* Unmask the three interrupts we care about, mask all others. */
469static void mtk_star_intr_enable(struct mtk_star_priv *priv)
470{
471	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
472			   MTK_STAR_BIT_INT_STS_FNRC |
473			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
474
475	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
476}
477
478static void mtk_star_intr_disable(struct mtk_star_priv *priv)
479{
480	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
481}
482
483static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
484{
485	unsigned int val;
486
487	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
488	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
489
490	return val;
491}
492
493static void mtk_star_dma_init(struct mtk_star_priv *priv)
494{
495	struct mtk_star_ring_desc *desc;
496	unsigned int val;
497	int i;
498
499	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
500
501	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
502		desc = &priv->descs_base[i];
503
504		memset(desc, 0, sizeof(*desc));
505		desc->status = MTK_STAR_DESC_BIT_COWN;
506		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
507		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
508			desc->status |= MTK_STAR_DESC_BIT_EOR;
509	}
510
511	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
512	mtk_star_ring_init(&priv->rx_ring,
513			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
514
515	/* Set DMA pointers. */
516	val = (unsigned int)priv->dma_addr;
517	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
518	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
519
520	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
521	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
522	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
523}
524
525static void mtk_star_dma_start(struct mtk_star_priv *priv)
526{
527	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
528			MTK_STAR_BIT_TX_DMA_CTRL_START);
529	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
530			MTK_STAR_BIT_RX_DMA_CTRL_START);
531}
532
533static void mtk_star_dma_stop(struct mtk_star_priv *priv)
534{
535	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
536		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
537	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
538		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
539}
540
541static void mtk_star_dma_disable(struct mtk_star_priv *priv)
542{
543	int i;
544
545	mtk_star_dma_stop(priv);
546
547	/* Take back all descriptors. */
548	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
549		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
550}
551
552static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
553{
554	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
555			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
556}
557
558static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
559{
560	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
561			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
562}
563
564static void mtk_star_set_mac_addr(struct net_device *ndev)
565{
566	struct mtk_star_priv *priv = netdev_priv(ndev);
567	const u8 *mac_addr = ndev->dev_addr;
568	unsigned int high, low;
569
570	high = mac_addr[0] << 8 | mac_addr[1] << 0;
571	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
572	      mac_addr[4] << 8 | mac_addr[5];
573
574	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
575	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
576}
577
578static void mtk_star_reset_counters(struct mtk_star_priv *priv)
579{
580	static const unsigned int counter_regs[] = {
581		MTK_STAR_REG_C_RXOKPKT,
582		MTK_STAR_REG_C_RXOKBYTE,
583		MTK_STAR_REG_C_RXRUNT,
584		MTK_STAR_REG_C_RXLONG,
585		MTK_STAR_REG_C_RXDROP,
586		MTK_STAR_REG_C_RXCRC,
587		MTK_STAR_REG_C_RXARLDROP,
588		MTK_STAR_REG_C_RXVLANDROP,
589		MTK_STAR_REG_C_RXCSERR,
590		MTK_STAR_REG_C_RXPAUSE,
591		MTK_STAR_REG_C_TXOKPKT,
592		MTK_STAR_REG_C_TXOKBYTE,
593		MTK_STAR_REG_C_TXPAUSECOL,
594		MTK_STAR_REG_C_TXRTY,
595		MTK_STAR_REG_C_TXSKIP,
596		MTK_STAR_REG_C_TX_ARP,
597		MTK_STAR_REG_C_RX_RERR,
598		MTK_STAR_REG_C_RX_UNI,
599		MTK_STAR_REG_C_RX_MULTI,
600		MTK_STAR_REG_C_RX_BROAD,
601		MTK_STAR_REG_C_RX_ALIGNERR,
602		MTK_STAR_REG_C_TX_UNI,
603		MTK_STAR_REG_C_TX_MULTI,
604		MTK_STAR_REG_C_TX_BROAD,
605		MTK_STAR_REG_C_TX_TIMEOUT,
606		MTK_STAR_REG_C_TX_LATECOL,
607		MTK_STAR_REG_C_RX_LENGTHERR,
608		MTK_STAR_REG_C_RX_TWIST,
609	};
610
611	unsigned int i, val;
612
613	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
614		regmap_read(priv->regs, counter_regs[i], &val);
615}
616
617static void mtk_star_update_stat(struct mtk_star_priv *priv,
618				 unsigned int reg, u64 *stat)
619{
620	unsigned int val;
621
622	regmap_read(priv->regs, reg, &val);
623	*stat += val;
624}
625
626/* Try to get as many stats as possible from the internal registers instead
627 * of tracking them ourselves.
628 */
629static void mtk_star_update_stats(struct mtk_star_priv *priv)
630{
631	struct rtnl_link_stats64 *stats = &priv->stats;
632
633	/* OK packets and bytes. */
634	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
635	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
636	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
637	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
638
639	/* RX & TX multicast. */
640	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
641	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
642
643	/* Collisions. */
644	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
645			     &stats->collisions);
646	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
647			     &stats->collisions);
648	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
649
650	/* RX Errors. */
651	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
652			     &stats->rx_length_errors);
653	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
654			     &stats->rx_over_errors);
655	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
656	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
657			     &stats->rx_frame_errors);
658	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
659			     &stats->rx_fifo_errors);
660	/* Sum of the general RX error counter + all of the above. */
661	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
662	stats->rx_errors += stats->rx_length_errors;
663	stats->rx_errors += stats->rx_over_errors;
664	stats->rx_errors += stats->rx_crc_errors;
665	stats->rx_errors += stats->rx_frame_errors;
666	stats->rx_errors += stats->rx_fifo_errors;
667}
668
669static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
670{
671	uintptr_t tail, offset;
672	struct sk_buff *skb;
673
674	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
675	if (!skb)
676		return NULL;
677
678	/* Align to 16 bytes. */
679	tail = (uintptr_t)skb_tail_pointer(skb);
680	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
681		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
682		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
683	}
684
685	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
686	 * extract the Ethernet header (14 bytes) so we need two more bytes.
687	 */
688	skb_reserve(skb, MTK_STAR_IP_ALIGN);
689
690	return skb;
691}
692
693static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
694{
695	struct mtk_star_priv *priv = netdev_priv(ndev);
696	struct mtk_star_ring *ring = &priv->rx_ring;
697	struct device *dev = mtk_star_get_dev(priv);
698	struct mtk_star_ring_desc *desc;
699	struct sk_buff *skb;
700	dma_addr_t dma_addr;
701	int i;
702
703	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
704		skb = mtk_star_alloc_skb(ndev);
705		if (!skb)
706			return -ENOMEM;
707
708		dma_addr = mtk_star_dma_map_rx(priv, skb);
709		if (dma_mapping_error(dev, dma_addr)) {
710			dev_kfree_skb(skb);
711			return -ENOMEM;
712		}
713
714		desc = &ring->descs[i];
715		desc->data_ptr = dma_addr;
716		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
717		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
718		ring->skbs[i] = skb;
719		ring->dma_addrs[i] = dma_addr;
720	}
721
722	return 0;
723}
724
725static void
726mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
727			void (*unmap_func)(struct mtk_star_priv *,
728					   struct mtk_star_ring_desc_data *))
729{
730	struct mtk_star_ring_desc_data desc_data;
731	int i;
732
733	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
734		if (!ring->dma_addrs[i])
735			continue;
736
737		desc_data.dma_addr = ring->dma_addrs[i];
738		desc_data.skb = ring->skbs[i];
739
740		unmap_func(priv, &desc_data);
741		dev_kfree_skb(desc_data.skb);
742	}
743}
744
745static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
746{
747	struct mtk_star_ring *ring = &priv->rx_ring;
748
749	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
750}
751
752static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
753{
754	struct mtk_star_ring *ring = &priv->tx_ring;
755
756	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
757}
758
759/**
760 * mtk_star_handle_irq - Interrupt Handler.
761 * @irq: interrupt number.
762 * @data: pointer to a network interface device structure.
763 * Description : this is the driver interrupt service routine.
764 * it mainly handles:
765 *  1. tx complete interrupt for frame transmission.
766 *  2. rx complete interrupt for frame reception.
767 *  3. MAC Management Counter interrupt to avoid counter overflow.
768 **/
769static irqreturn_t mtk_star_handle_irq(int irq, void *data)
770{
771	struct net_device *ndev = data;
772	struct mtk_star_priv *priv = netdev_priv(ndev);
773	unsigned int intr_status = mtk_star_intr_ack_all(priv);
774	bool rx, tx;
775
776	rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
777	     napi_schedule_prep(&priv->rx_napi);
778	tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
779	     napi_schedule_prep(&priv->tx_napi);
780
781	if (rx || tx) {
782		spin_lock(&priv->lock);
783		/* mask Rx and TX Complete interrupt */
784		mtk_star_disable_dma_irq(priv, rx, tx);
785		spin_unlock(&priv->lock);
786
787		if (rx)
788			__napi_schedule(&priv->rx_napi);
789		if (tx)
790			__napi_schedule(&priv->tx_napi);
791	}
792
793	/* interrupt is triggered once any counters reach 0x8000000 */
794	if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
795		mtk_star_update_stats(priv);
796		mtk_star_reset_counters(priv);
797	}
798
799	return IRQ_HANDLED;
800}
801
802/* Wait for the completion of any previous command - CMD_START bit must be
803 * cleared by hardware.
804 */
805static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
806{
807	unsigned int val;
808
809	return regmap_read_poll_timeout_atomic(priv->regs,
810				MTK_STAR_REG_HASH_CTRL, val,
811				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
812				10, MTK_STAR_WAIT_TIMEOUT);
813}
814
815static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
816{
817	unsigned int val;
818	int ret;
819
820	/* Wait for BIST_DONE bit. */
821	ret = regmap_read_poll_timeout_atomic(priv->regs,
822					MTK_STAR_REG_HASH_CTRL, val,
823					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
824					10, MTK_STAR_WAIT_TIMEOUT);
825	if (ret)
826		return ret;
827
828	/* Check the BIST_OK bit. */
829	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
830			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
831		return -EIO;
832
833	return 0;
834}
835
836static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
837				unsigned int hash_addr)
838{
839	unsigned int val;
840	int ret;
841
842	ret = mtk_star_hash_wait_cmd_start(priv);
843	if (ret)
844		return ret;
845
846	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
847	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
848	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
849	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
850	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
851	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
852
853	return mtk_star_hash_wait_ok(priv);
854}
855
856static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
857{
858	int ret;
859
860	ret = mtk_star_hash_wait_cmd_start(priv);
861	if (ret)
862		return ret;
863
864	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
865			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
866	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
867			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
868
869	return mtk_star_hash_wait_ok(priv);
870}
871
872static void mtk_star_phy_config(struct mtk_star_priv *priv)
873{
874	unsigned int val;
875
876	if (priv->speed == SPEED_1000)
877		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
878	else if (priv->speed == SPEED_100)
879		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
880	else
881		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
882	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
883
884	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
885	if (priv->pause) {
886		val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
887		val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
888		val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
889	} else {
890		val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
891		val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
892		val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
893	}
894	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
895
896	val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
897	val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
898	val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
899	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
900			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
901			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
902
903	val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
904	val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
905	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
906			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
907}
908
909static void mtk_star_adjust_link(struct net_device *ndev)
910{
911	struct mtk_star_priv *priv = netdev_priv(ndev);
912	struct phy_device *phydev = priv->phydev;
913	bool new_state = false;
914
915	if (phydev->link) {
916		if (!priv->link) {
917			priv->link = phydev->link;
918			new_state = true;
919		}
920
921		if (priv->speed != phydev->speed) {
922			priv->speed = phydev->speed;
923			new_state = true;
924		}
925
926		if (priv->pause != phydev->pause) {
927			priv->pause = phydev->pause;
928			new_state = true;
929		}
930	} else {
931		if (priv->link) {
932			priv->link = phydev->link;
933			new_state = true;
934		}
935	}
936
937	if (new_state) {
938		if (phydev->link)
939			mtk_star_phy_config(priv);
940
941		phy_print_status(ndev->phydev);
942	}
943}
944
945static void mtk_star_init_config(struct mtk_star_priv *priv)
946{
947	unsigned int val;
948
949	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
950	       MTK_STAR_BIT_EXT_MDC_MODE |
951	       MTK_STAR_BIT_SWC_MII_MODE);
952
953	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
954	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
955			   MTK_STAR_MSK_MAC_CLK_CONF,
956			   priv->compat_data->bit_clk_div);
957}
958
959static int mtk_star_enable(struct net_device *ndev)
960{
961	struct mtk_star_priv *priv = netdev_priv(ndev);
962	unsigned int val;
963	int ret;
964
965	mtk_star_nic_disable_pd(priv);
966	mtk_star_intr_disable(priv);
967	mtk_star_dma_stop(priv);
968
969	mtk_star_set_mac_addr(ndev);
970
971	/* Configure the MAC */
972	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
973	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
974	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
975	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
976	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
977	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
978
979	/* Enable Hash Table BIST and reset it */
980	ret = mtk_star_reset_hash_table(priv);
981	if (ret)
982		return ret;
983
984	/* Setup the hashing algorithm */
985	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
986			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
987			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
988
989	/* Don't strip VLAN tags */
990	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
991			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
992
993	/* Setup DMA */
994	mtk_star_dma_init(priv);
995
996	ret = mtk_star_prepare_rx_skbs(ndev);
997	if (ret)
998		goto err_out;
999
1000	/* Request the interrupt */
1001	ret = request_irq(ndev->irq, mtk_star_handle_irq,
1002			  IRQF_TRIGGER_NONE, ndev->name, ndev);
1003	if (ret)
1004		goto err_free_skbs;
1005
1006	napi_enable(&priv->tx_napi);
1007	napi_enable(&priv->rx_napi);
1008
1009	mtk_star_intr_ack_all(priv);
1010	mtk_star_intr_enable(priv);
1011
1012	/* Connect to and start PHY */
1013	priv->phydev = of_phy_connect(ndev, priv->phy_node,
1014				      mtk_star_adjust_link, 0, priv->phy_intf);
1015	if (!priv->phydev) {
1016		netdev_err(ndev, "failed to connect to PHY\n");
1017		ret = -ENODEV;
1018		goto err_free_irq;
1019	}
1020
1021	mtk_star_dma_start(priv);
1022	phy_start(priv->phydev);
1023	netif_start_queue(ndev);
1024
1025	return 0;
1026
1027err_free_irq:
1028	napi_disable(&priv->rx_napi);
1029	napi_disable(&priv->tx_napi);
1030	free_irq(ndev->irq, ndev);
1031err_free_skbs:
1032	mtk_star_free_rx_skbs(priv);
1033err_out:
1034	return ret;
1035}
1036
1037static void mtk_star_disable(struct net_device *ndev)
1038{
1039	struct mtk_star_priv *priv = netdev_priv(ndev);
1040
1041	netif_stop_queue(ndev);
1042	napi_disable(&priv->tx_napi);
1043	napi_disable(&priv->rx_napi);
1044	mtk_star_intr_disable(priv);
1045	mtk_star_dma_disable(priv);
1046	mtk_star_intr_ack_all(priv);
1047	phy_stop(priv->phydev);
1048	phy_disconnect(priv->phydev);
1049	free_irq(ndev->irq, ndev);
1050	mtk_star_free_rx_skbs(priv);
1051	mtk_star_free_tx_skbs(priv);
1052}
1053
1054static int mtk_star_netdev_open(struct net_device *ndev)
1055{
1056	return mtk_star_enable(ndev);
1057}
1058
1059static int mtk_star_netdev_stop(struct net_device *ndev)
1060{
1061	mtk_star_disable(ndev);
1062
1063	return 0;
1064}
1065
1066static int mtk_star_netdev_ioctl(struct net_device *ndev,
1067				 struct ifreq *req, int cmd)
1068{
1069	if (!netif_running(ndev))
1070		return -EINVAL;
1071
1072	return phy_mii_ioctl(ndev->phydev, req, cmd);
1073}
1074
1075static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
1076{
1077	netif_stop_queue(priv->ndev);
1078
1079	/* Might race with mtk_star_tx_poll, check again */
1080	smp_mb();
1081	if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
1082		return -EBUSY;
1083
1084	netif_start_queue(priv->ndev);
1085
1086	return 0;
1087}
1088
1089static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
1090{
1091	if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
1092		return 0;
1093
1094	return __mtk_star_maybe_stop_tx(priv, size);
1095}
1096
1097static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
1098					      struct net_device *ndev)
1099{
1100	struct mtk_star_priv *priv = netdev_priv(ndev);
1101	struct mtk_star_ring *ring = &priv->tx_ring;
1102	struct device *dev = mtk_star_get_dev(priv);
1103	struct mtk_star_ring_desc_data desc_data;
1104	int nfrags = skb_shinfo(skb)->nr_frags;
1105
1106	if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
1107		if (!netif_queue_stopped(ndev)) {
1108			netif_stop_queue(ndev);
1109			/* This is a hard error, log it. */
1110			pr_err_ratelimited("Tx ring full when queue awake\n");
1111		}
1112		return NETDEV_TX_BUSY;
1113	}
1114
1115	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1116	if (dma_mapping_error(dev, desc_data.dma_addr))
1117		goto err_drop_packet;
1118
1119	desc_data.skb = skb;
1120	desc_data.len = skb->len;
1121	mtk_star_ring_push_head_tx(ring, &desc_data);
1122
1123	netdev_sent_queue(ndev, skb->len);
1124
1125	mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
1126
1127	mtk_star_dma_resume_tx(priv);
1128
1129	return NETDEV_TX_OK;
1130
1131err_drop_packet:
1132	dev_kfree_skb(skb);
1133	ndev->stats.tx_dropped++;
1134	return NETDEV_TX_OK;
1135}
1136
1137/* Returns the number of bytes sent or a negative number on the first
1138 * descriptor owned by DMA.
1139 */
1140static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1141{
1142	struct mtk_star_ring *ring = &priv->tx_ring;
1143	struct mtk_star_ring_desc_data desc_data;
1144	int ret;
1145
1146	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1147	if (ret)
1148		return ret;
1149
1150	mtk_star_dma_unmap_tx(priv, &desc_data);
1151	ret = desc_data.skb->len;
1152	dev_kfree_skb_irq(desc_data.skb);
1153
1154	return ret;
1155}
1156
1157static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
1158{
1159	struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
1160						  tx_napi);
1161	int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
1162	struct mtk_star_ring *ring = &priv->tx_ring;
1163	struct net_device *ndev = priv->ndev;
1164	unsigned int head = ring->head;
1165	unsigned int entry = ring->tail;
1166
1167	while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
1168		ret = mtk_star_tx_complete_one(priv);
1169		if (ret < 0)
1170			break;
1171
1172		count++;
1173		pkts_compl++;
1174		bytes_compl += ret;
1175		entry = ring->tail;
1176	}
1177
1178	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1179
1180	if (unlikely(netif_queue_stopped(ndev)) &&
1181	    (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
1182		netif_wake_queue(ndev);
1183
1184	if (napi_complete(napi)) {
1185		spin_lock(&priv->lock);
1186		mtk_star_enable_dma_irq(priv, false, true);
1187		spin_unlock(&priv->lock);
1188	}
1189
1190	return 0;
1191}
1192
1193static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1194					struct rtnl_link_stats64 *stats)
1195{
1196	struct mtk_star_priv *priv = netdev_priv(ndev);
1197
1198	mtk_star_update_stats(priv);
1199
1200	memcpy(stats, &priv->stats, sizeof(*stats));
1201}
1202
1203static void mtk_star_set_rx_mode(struct net_device *ndev)
1204{
1205	struct mtk_star_priv *priv = netdev_priv(ndev);
1206	struct netdev_hw_addr *hw_addr;
1207	unsigned int hash_addr, i;
1208	int ret;
1209
1210	if (ndev->flags & IFF_PROMISC) {
1211		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1212				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1213	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1214		   ndev->flags & IFF_ALLMULTI) {
1215		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1216			ret = mtk_star_set_hashbit(priv, i);
1217			if (ret)
1218				goto hash_fail;
1219		}
1220	} else {
1221		/* Clear previous settings. */
1222		ret = mtk_star_reset_hash_table(priv);
1223		if (ret)
1224			goto hash_fail;
1225
1226		netdev_for_each_mc_addr(hw_addr, ndev) {
1227			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1228			hash_addr += hw_addr->addr[5];
1229			ret = mtk_star_set_hashbit(priv, hash_addr);
1230			if (ret)
1231				goto hash_fail;
1232		}
1233	}
1234
1235	return;
1236
1237hash_fail:
1238	if (ret == -ETIMEDOUT)
1239		netdev_err(ndev, "setting hash bit timed out\n");
1240	else
1241		/* Should be -EIO */
1242		netdev_err(ndev, "unable to set hash bit");
1243}
1244
1245static const struct net_device_ops mtk_star_netdev_ops = {
1246	.ndo_open		= mtk_star_netdev_open,
1247	.ndo_stop		= mtk_star_netdev_stop,
1248	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1249	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1250	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1251	.ndo_eth_ioctl		= mtk_star_netdev_ioctl,
1252	.ndo_set_mac_address	= eth_mac_addr,
1253	.ndo_validate_addr	= eth_validate_addr,
1254};
1255
1256static void mtk_star_get_drvinfo(struct net_device *dev,
1257				 struct ethtool_drvinfo *info)
1258{
1259	strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1260}
1261
1262/* TODO Add ethtool stats. */
1263static const struct ethtool_ops mtk_star_ethtool_ops = {
1264	.get_drvinfo		= mtk_star_get_drvinfo,
1265	.get_link		= ethtool_op_get_link,
1266	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1267	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1268};
1269
1270static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
1271{
1272	struct mtk_star_ring *ring = &priv->rx_ring;
1273	struct device *dev = mtk_star_get_dev(priv);
1274	struct mtk_star_ring_desc_data desc_data;
1275	struct net_device *ndev = priv->ndev;
1276	struct sk_buff *curr_skb, *new_skb;
1277	dma_addr_t new_dma_addr;
1278	int ret, count = 0;
1279
1280	while (count < budget) {
1281		ret = mtk_star_ring_pop_tail(ring, &desc_data);
1282		if (ret)
1283			return -1;
1284
1285		curr_skb = desc_data.skb;
1286
1287		if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1288		    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1289			/* Error packet -> drop and reuse skb. */
1290			new_skb = curr_skb;
1291			goto push_new_skb;
1292		}
1293
1294		/* Prepare new skb before receiving the current one.
1295		 * Reuse the current skb if we fail at any point.
1296		 */
1297		new_skb = mtk_star_alloc_skb(ndev);
1298		if (!new_skb) {
1299			ndev->stats.rx_dropped++;
1300			new_skb = curr_skb;
1301			goto push_new_skb;
1302		}
1303
1304		new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1305		if (dma_mapping_error(dev, new_dma_addr)) {
1306			ndev->stats.rx_dropped++;
1307			dev_kfree_skb(new_skb);
1308			new_skb = curr_skb;
1309			netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1310			goto push_new_skb;
1311		}
1312
1313		/* We can't fail anymore at this point:
1314		 * it's safe to unmap the skb.
1315		 */
1316		mtk_star_dma_unmap_rx(priv, &desc_data);
1317
1318		skb_put(desc_data.skb, desc_data.len);
1319		desc_data.skb->ip_summed = CHECKSUM_NONE;
1320		desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1321		desc_data.skb->dev = ndev;
1322		netif_receive_skb(desc_data.skb);
1323
1324		/* update dma_addr for new skb */
1325		desc_data.dma_addr = new_dma_addr;
1326
1327push_new_skb:
1328
1329		count++;
1330
1331		desc_data.len = skb_tailroom(new_skb);
1332		desc_data.skb = new_skb;
1333		mtk_star_ring_push_head_rx(ring, &desc_data);
1334	}
1335
1336	mtk_star_dma_resume_rx(priv);
1337
1338	return count;
1339}
1340
1341static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
1342{
1343	struct mtk_star_priv *priv;
1344	int work_done = 0;
1345
1346	priv = container_of(napi, struct mtk_star_priv, rx_napi);
1347
1348	work_done = mtk_star_rx(priv, budget);
1349	if (work_done < budget) {
1350		napi_complete_done(napi, work_done);
1351		spin_lock(&priv->lock);
1352		mtk_star_enable_dma_irq(priv, true, false);
1353		spin_unlock(&priv->lock);
1354	}
1355
1356	return work_done;
1357}
1358
1359static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1360{
1361	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1362		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1363}
1364
1365static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1366{
1367	unsigned int val;
1368
1369	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1370					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1371					10, MTK_STAR_WAIT_TIMEOUT);
1372}
1373
1374static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1375{
1376	struct mtk_star_priv *priv = mii->priv;
1377	unsigned int val, data;
1378	int ret;
1379
1380	mtk_star_mdio_rwok_clear(priv);
1381
1382	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1383	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1384	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1385
1386	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1387
1388	ret = mtk_star_mdio_rwok_wait(priv);
1389	if (ret)
1390		return ret;
1391
1392	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1393
1394	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1395	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1396
1397	return data;
1398}
1399
1400static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1401			       int regnum, u16 data)
1402{
1403	struct mtk_star_priv *priv = mii->priv;
1404	unsigned int val;
1405
1406	mtk_star_mdio_rwok_clear(priv);
1407
1408	val = data;
1409	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1410	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1411	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1412	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1413	val |= regnum;
1414	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1415
1416	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1417
1418	return mtk_star_mdio_rwok_wait(priv);
1419}
1420
1421static int mtk_star_mdio_init(struct net_device *ndev)
1422{
1423	struct mtk_star_priv *priv = netdev_priv(ndev);
1424	struct device *dev = mtk_star_get_dev(priv);
1425	struct device_node *of_node, *mdio_node;
1426	int ret;
1427
1428	of_node = dev->of_node;
1429
1430	mdio_node = of_get_child_by_name(of_node, "mdio");
1431	if (!mdio_node)
1432		return -ENODEV;
1433
1434	if (!of_device_is_available(mdio_node)) {
1435		ret = -ENODEV;
1436		goto out_put_node;
1437	}
1438
1439	priv->mii = devm_mdiobus_alloc(dev);
1440	if (!priv->mii) {
1441		ret = -ENOMEM;
1442		goto out_put_node;
1443	}
1444
1445	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1446	priv->mii->name = "mtk-mac-mdio";
1447	priv->mii->parent = dev;
1448	priv->mii->read = mtk_star_mdio_read;
1449	priv->mii->write = mtk_star_mdio_write;
1450	priv->mii->priv = priv;
1451
1452	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1453
1454out_put_node:
1455	of_node_put(mdio_node);
1456	return ret;
1457}
1458
1459static __maybe_unused int mtk_star_suspend(struct device *dev)
1460{
1461	struct mtk_star_priv *priv;
1462	struct net_device *ndev;
1463
1464	ndev = dev_get_drvdata(dev);
1465	priv = netdev_priv(ndev);
1466
1467	if (netif_running(ndev))
1468		mtk_star_disable(ndev);
1469
1470	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1471
1472	return 0;
1473}
1474
1475static __maybe_unused int mtk_star_resume(struct device *dev)
1476{
1477	struct mtk_star_priv *priv;
1478	struct net_device *ndev;
1479	int ret;
1480
1481	ndev = dev_get_drvdata(dev);
1482	priv = netdev_priv(ndev);
1483
1484	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1485	if (ret)
1486		return ret;
1487
1488	if (netif_running(ndev)) {
1489		ret = mtk_star_enable(ndev);
1490		if (ret)
1491			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1492	}
1493
1494	return ret;
1495}
1496
1497static void mtk_star_clk_disable_unprepare(void *data)
1498{
1499	struct mtk_star_priv *priv = data;
1500
1501	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1502}
1503
1504static int mtk_star_set_timing(struct mtk_star_priv *priv)
1505{
1506	struct device *dev = mtk_star_get_dev(priv);
1507	unsigned int delay_val = 0;
1508
1509	switch (priv->phy_intf) {
1510	case PHY_INTERFACE_MODE_MII:
1511	case PHY_INTERFACE_MODE_RMII:
1512		delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
1513		delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
1514		break;
1515	default:
1516		dev_err(dev, "This interface not supported\n");
1517		return -EINVAL;
1518	}
1519
1520	return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
1521}
1522
1523static int mtk_star_probe(struct platform_device *pdev)
1524{
1525	struct device_node *of_node;
1526	struct mtk_star_priv *priv;
1527	struct net_device *ndev;
1528	struct device *dev;
1529	void __iomem *base;
1530	int ret, i;
1531
1532	dev = &pdev->dev;
1533	of_node = dev->of_node;
1534
1535	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1536	if (!ndev)
1537		return -ENOMEM;
1538
1539	priv = netdev_priv(ndev);
1540	priv->ndev = ndev;
1541	priv->compat_data = of_device_get_match_data(&pdev->dev);
1542	SET_NETDEV_DEV(ndev, dev);
1543	platform_set_drvdata(pdev, ndev);
1544
1545	ndev->min_mtu = ETH_ZLEN;
1546	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1547
1548	spin_lock_init(&priv->lock);
1549
1550	base = devm_platform_ioremap_resource(pdev, 0);
1551	if (IS_ERR(base))
1552		return PTR_ERR(base);
1553
1554	/* We won't be checking the return values of regmap read & write
1555	 * functions. They can only fail for mmio if there's a clock attached
1556	 * to regmap which is not the case here.
1557	 */
1558	priv->regs = devm_regmap_init_mmio(dev, base,
1559					   &mtk_star_regmap_config);
1560	if (IS_ERR(priv->regs))
1561		return PTR_ERR(priv->regs);
1562
1563	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1564							"mediatek,pericfg");
1565	if (IS_ERR(priv->pericfg)) {
1566		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1567		return PTR_ERR(priv->pericfg);
1568	}
1569
1570	ndev->irq = platform_get_irq(pdev, 0);
1571	if (ndev->irq < 0)
1572		return ndev->irq;
1573
1574	for (i = 0; i < MTK_STAR_NCLKS; i++)
1575		priv->clks[i].id = mtk_star_clk_names[i];
1576	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1577	if (ret)
1578		return ret;
1579
1580	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1581	if (ret)
1582		return ret;
1583
1584	ret = devm_add_action_or_reset(dev,
1585				       mtk_star_clk_disable_unprepare, priv);
1586	if (ret)
1587		return ret;
1588
1589	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1590	if (ret) {
1591		return ret;
1592	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
1593		   priv->phy_intf != PHY_INTERFACE_MODE_MII) {
1594		dev_err(dev, "unsupported phy mode: %s\n",
1595			phy_modes(priv->phy_intf));
1596		return -EINVAL;
1597	}
1598
1599	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1600	if (!priv->phy_node) {
1601		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1602		return -ENODEV;
1603	}
1604
1605	priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
1606	priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
1607	priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
1608
1609	if (priv->compat_data->set_interface_mode) {
1610		ret = priv->compat_data->set_interface_mode(ndev);
1611		if (ret) {
1612			dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
1613			return -EINVAL;
1614		}
1615	}
1616
1617	ret = mtk_star_set_timing(priv);
1618	if (ret) {
1619		dev_err(dev, "Failed to set timing, err = %d\n", ret);
1620		return -EINVAL;
1621	}
1622
1623	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1624	if (ret) {
1625		dev_err(dev, "unsupported DMA mask\n");
1626		return ret;
1627	}
1628
1629	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1630					      &priv->dma_addr,
1631					      GFP_KERNEL | GFP_DMA);
1632	if (!priv->ring_base)
1633		return -ENOMEM;
1634
1635	mtk_star_nic_disable_pd(priv);
1636	mtk_star_init_config(priv);
1637
1638	ret = mtk_star_mdio_init(ndev);
1639	if (ret)
1640		return ret;
1641
1642	ret = platform_get_ethdev_address(dev, ndev);
1643	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1644		eth_hw_addr_random(ndev);
1645
1646	ndev->netdev_ops = &mtk_star_netdev_ops;
1647	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1648
1649	netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
1650	netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
1651
1652	return devm_register_netdev(dev, ndev);
1653}
1654
1655#ifdef CONFIG_OF
1656static int mt8516_set_interface_mode(struct net_device *ndev)
1657{
1658	struct mtk_star_priv *priv = netdev_priv(ndev);
1659	struct device *dev = mtk_star_get_dev(priv);
1660	unsigned int intf_val, ret, rmii_rxc;
1661
1662	switch (priv->phy_intf) {
1663	case PHY_INTERFACE_MODE_MII:
1664		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1665		rmii_rxc = 0;
1666		break;
1667	case PHY_INTERFACE_MODE_RMII:
1668		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1669		rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
1670		break;
1671	default:
1672		dev_err(dev, "This interface not supported\n");
1673		return -EINVAL;
1674	}
1675
1676	ret = regmap_update_bits(priv->pericfg,
1677				 MTK_PERICFG_REG_NIC_CFG1_CON,
1678				 MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
1679				 rmii_rxc);
1680	if (ret)
1681		return ret;
1682
1683	return regmap_update_bits(priv->pericfg,
1684				  MTK_PERICFG_REG_NIC_CFG0_CON,
1685				  MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
1686				  intf_val);
1687}
1688
1689static int mt8365_set_interface_mode(struct net_device *ndev)
1690{
1691	struct mtk_star_priv *priv = netdev_priv(ndev);
1692	struct device *dev = mtk_star_get_dev(priv);
1693	unsigned int intf_val;
1694
1695	switch (priv->phy_intf) {
1696	case PHY_INTERFACE_MODE_MII:
1697		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1698		break;
1699	case PHY_INTERFACE_MODE_RMII:
1700		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1701		intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
1702		break;
1703	default:
1704		dev_err(dev, "This interface not supported\n");
1705		return -EINVAL;
1706	}
1707
1708	return regmap_update_bits(priv->pericfg,
1709				  MTK_PERICFG_REG_NIC_CFG_CON_V2,
1710				  MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
1711				  MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
1712				  intf_val);
1713}
1714
1715static const struct mtk_star_compat mtk_star_mt8516_compat = {
1716	.set_interface_mode = mt8516_set_interface_mode,
1717	.bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
1718};
1719
1720static const struct mtk_star_compat mtk_star_mt8365_compat = {
1721	.set_interface_mode = mt8365_set_interface_mode,
1722	.bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
1723};
1724
1725static const struct of_device_id mtk_star_of_match[] = {
1726	{ .compatible = "mediatek,mt8516-eth",
1727	  .data = &mtk_star_mt8516_compat },
1728	{ .compatible = "mediatek,mt8518-eth",
1729	  .data = &mtk_star_mt8516_compat },
1730	{ .compatible = "mediatek,mt8175-eth",
1731	  .data = &mtk_star_mt8516_compat },
1732	{ .compatible = "mediatek,mt8365-eth",
1733	  .data = &mtk_star_mt8365_compat },
1734	{ }
1735};
1736MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1737#endif
1738
1739static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1740			 mtk_star_suspend, mtk_star_resume);
1741
1742static struct platform_driver mtk_star_driver = {
1743	.driver = {
1744		.name = MTK_STAR_DRVNAME,
1745		.pm = &mtk_star_pm_ops,
1746		.of_match_table = of_match_ptr(mtk_star_of_match),
1747	},
1748	.probe = mtk_star_probe,
1749};
1750module_platform_driver(mtk_star_driver);
1751
1752MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1753MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1754MODULE_LICENSE("GPL");
1755