1// SPDX-License-Identifier: GPL-2.0
2/*  Atheros AR71xx built-in ethernet mac driver
3 *
4 *  Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
5 *
6 *  List of authors contributed to this driver before mainlining:
7 *  Alexander Couzens <lynxis@fe80.eu>
8 *  Christian Lamparter <chunkeey@gmail.com>
9 *  Chuanhong Guo <gch981213@gmail.com>
10 *  Daniel F. Dickinson <cshored@thecshore.com>
11 *  David Bauer <mail@david-bauer.net>
12 *  Felix Fietkau <nbd@nbd.name>
13 *  Gabor Juhos <juhosg@freemail.hu>
14 *  Hauke Mehrtens <hauke@hauke-m.de>
15 *  Johann Neuhauser <johann@it-neuhauser.de>
16 *  John Crispin <john@phrozen.org>
17 *  Jo-Philipp Wich <jo@mein.io>
18 *  Koen Vandeputte <koen.vandeputte@ncentric.com>
19 *  Lucian Cristian <lucian.cristian@gmail.com>
20 *  Matt Merhar <mattmerhar@protonmail.com>
21 *  Milan Krstic <milan.krstic@gmail.com>
22 *  Petr Štetiar <ynezz@true.cz>
23 *  Rosen Penev <rosenp@gmail.com>
24 *  Stephen Walker <stephendwalker+github@gmail.com>
25 *  Vittorio Gambaletta <openwrt@vittgam.net>
26 *  Weijie Gao <hackpascal@gmail.com>
27 *  Imre Kaloz <kaloz@openwrt.org>
28 */
29
30#include <linux/if_vlan.h>
31#include <linux/mfd/syscon.h>
32#include <linux/of_mdio.h>
33#include <linux/of_net.h>
34#include <linux/of_platform.h>
35#include <linux/phylink.h>
36#include <linux/regmap.h>
37#include <linux/reset.h>
38#include <linux/clk.h>
39#include <linux/io.h>
40
41/* For our NAPI weight bigger does *NOT* mean better - it means more
42 * D-cache misses and lots more wasted cycles than we'll ever
43 * possibly gain from saving instructions.
44 */
45#define AG71XX_NAPI_WEIGHT	32
46#define AG71XX_OOM_REFILL	(1 + HZ / 10)
47
48#define AG71XX_INT_ERR	(AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
49#define AG71XX_INT_TX	(AG71XX_INT_TX_PS)
50#define AG71XX_INT_RX	(AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
51
52#define AG71XX_INT_POLL	(AG71XX_INT_RX | AG71XX_INT_TX)
53#define AG71XX_INT_INIT	(AG71XX_INT_ERR | AG71XX_INT_POLL)
54
55#define AG71XX_TX_MTU_LEN	1540
56
57#define AG71XX_TX_RING_SPLIT		512
58#define AG71XX_TX_RING_DS_PER_PKT	DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
59						     AG71XX_TX_RING_SPLIT)
60#define AG71XX_TX_RING_SIZE_DEFAULT	128
61#define AG71XX_RX_RING_SIZE_DEFAULT	256
62
63#define AG71XX_MDIO_RETRY	1000
64#define AG71XX_MDIO_DELAY	5
65#define AG71XX_MDIO_MAX_CLK	5000000
66
67/* Register offsets */
68#define AG71XX_REG_MAC_CFG1	0x0000
69#define MAC_CFG1_TXE		BIT(0)	/* Tx Enable */
70#define MAC_CFG1_STX		BIT(1)	/* Synchronize Tx Enable */
71#define MAC_CFG1_RXE		BIT(2)	/* Rx Enable */
72#define MAC_CFG1_SRX		BIT(3)	/* Synchronize Rx Enable */
73#define MAC_CFG1_TFC		BIT(4)	/* Tx Flow Control Enable */
74#define MAC_CFG1_RFC		BIT(5)	/* Rx Flow Control Enable */
75#define MAC_CFG1_SR		BIT(31)	/* Soft Reset */
76#define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
77			 MAC_CFG1_SRX | MAC_CFG1_STX)
78
79#define AG71XX_REG_MAC_CFG2	0x0004
80#define MAC_CFG2_FDX		BIT(0)
81#define MAC_CFG2_PAD_CRC_EN	BIT(2)
82#define MAC_CFG2_LEN_CHECK	BIT(4)
83#define MAC_CFG2_IF_1000	BIT(9)
84#define MAC_CFG2_IF_10_100	BIT(8)
85
86#define AG71XX_REG_MAC_MFL	0x0010
87
88#define AG71XX_REG_MII_CFG	0x0020
89#define MII_CFG_CLK_DIV_4	0
90#define MII_CFG_CLK_DIV_6	2
91#define MII_CFG_CLK_DIV_8	3
92#define MII_CFG_CLK_DIV_10	4
93#define MII_CFG_CLK_DIV_14	5
94#define MII_CFG_CLK_DIV_20	6
95#define MII_CFG_CLK_DIV_28	7
96#define MII_CFG_CLK_DIV_34	8
97#define MII_CFG_CLK_DIV_42	9
98#define MII_CFG_CLK_DIV_50	10
99#define MII_CFG_CLK_DIV_58	11
100#define MII_CFG_CLK_DIV_66	12
101#define MII_CFG_CLK_DIV_74	13
102#define MII_CFG_CLK_DIV_82	14
103#define MII_CFG_CLK_DIV_98	15
104#define MII_CFG_RESET		BIT(31)
105
106#define AG71XX_REG_MII_CMD	0x0024
107#define MII_CMD_READ		BIT(0)
108
109#define AG71XX_REG_MII_ADDR	0x0028
110#define MII_ADDR_SHIFT		8
111
112#define AG71XX_REG_MII_CTRL	0x002c
113#define AG71XX_REG_MII_STATUS	0x0030
114#define AG71XX_REG_MII_IND	0x0034
115#define MII_IND_BUSY		BIT(0)
116#define MII_IND_INVALID		BIT(2)
117
118#define AG71XX_REG_MAC_IFCTL	0x0038
119#define MAC_IFCTL_SPEED		BIT(16)
120
121#define AG71XX_REG_MAC_ADDR1	0x0040
122#define AG71XX_REG_MAC_ADDR2	0x0044
123#define AG71XX_REG_FIFO_CFG0	0x0048
124#define FIFO_CFG0_WTM		BIT(0)	/* Watermark Module */
125#define FIFO_CFG0_RXS		BIT(1)	/* Rx System Module */
126#define FIFO_CFG0_RXF		BIT(2)	/* Rx Fabric Module */
127#define FIFO_CFG0_TXS		BIT(3)	/* Tx System Module */
128#define FIFO_CFG0_TXF		BIT(4)	/* Tx Fabric Module */
129#define FIFO_CFG0_ALL	(FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
130			| FIFO_CFG0_TXS | FIFO_CFG0_TXF)
131#define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
132
133#define FIFO_CFG0_ENABLE_SHIFT	8
134
135#define AG71XX_REG_FIFO_CFG1	0x004c
136#define AG71XX_REG_FIFO_CFG2	0x0050
137#define AG71XX_REG_FIFO_CFG3	0x0054
138#define AG71XX_REG_FIFO_CFG4	0x0058
139#define FIFO_CFG4_DE		BIT(0)	/* Drop Event */
140#define FIFO_CFG4_DV		BIT(1)	/* RX_DV Event */
141#define FIFO_CFG4_FC		BIT(2)	/* False Carrier */
142#define FIFO_CFG4_CE		BIT(3)	/* Code Error */
143#define FIFO_CFG4_CR		BIT(4)	/* CRC error */
144#define FIFO_CFG4_LM		BIT(5)	/* Length Mismatch */
145#define FIFO_CFG4_LO		BIT(6)	/* Length out of range */
146#define FIFO_CFG4_OK		BIT(7)	/* Packet is OK */
147#define FIFO_CFG4_MC		BIT(8)	/* Multicast Packet */
148#define FIFO_CFG4_BC		BIT(9)	/* Broadcast Packet */
149#define FIFO_CFG4_DR		BIT(10)	/* Dribble */
150#define FIFO_CFG4_LE		BIT(11)	/* Long Event */
151#define FIFO_CFG4_CF		BIT(12)	/* Control Frame */
152#define FIFO_CFG4_PF		BIT(13)	/* Pause Frame */
153#define FIFO_CFG4_UO		BIT(14)	/* Unsupported Opcode */
154#define FIFO_CFG4_VT		BIT(15)	/* VLAN tag detected */
155#define FIFO_CFG4_FT		BIT(16)	/* Frame Truncated */
156#define FIFO_CFG4_UC		BIT(17)	/* Unicast Packet */
157#define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
158			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
159			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
160			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
161			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
162			 FIFO_CFG4_VT)
163
164#define AG71XX_REG_FIFO_CFG5	0x005c
165#define FIFO_CFG5_DE		BIT(0)	/* Drop Event */
166#define FIFO_CFG5_DV		BIT(1)	/* RX_DV Event */
167#define FIFO_CFG5_FC		BIT(2)	/* False Carrier */
168#define FIFO_CFG5_CE		BIT(3)	/* Code Error */
169#define FIFO_CFG5_LM		BIT(4)	/* Length Mismatch */
170#define FIFO_CFG5_LO		BIT(5)	/* Length Out of Range */
171#define FIFO_CFG5_OK		BIT(6)	/* Packet is OK */
172#define FIFO_CFG5_MC		BIT(7)	/* Multicast Packet */
173#define FIFO_CFG5_BC		BIT(8)	/* Broadcast Packet */
174#define FIFO_CFG5_DR		BIT(9)	/* Dribble */
175#define FIFO_CFG5_CF		BIT(10)	/* Control Frame */
176#define FIFO_CFG5_PF		BIT(11)	/* Pause Frame */
177#define FIFO_CFG5_UO		BIT(12)	/* Unsupported Opcode */
178#define FIFO_CFG5_VT		BIT(13)	/* VLAN tag detected */
179#define FIFO_CFG5_LE		BIT(14)	/* Long Event */
180#define FIFO_CFG5_FT		BIT(15)	/* Frame Truncated */
181#define FIFO_CFG5_16		BIT(16)	/* unknown */
182#define FIFO_CFG5_17		BIT(17)	/* unknown */
183#define FIFO_CFG5_SF		BIT(18)	/* Short Frame */
184#define FIFO_CFG5_BM		BIT(19)	/* Byte Mode */
185#define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
186			 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
187			 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
188			 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
189			 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
190			 FIFO_CFG5_17 | FIFO_CFG5_SF)
191
192#define AG71XX_REG_TX_CTRL	0x0180
193#define TX_CTRL_TXE		BIT(0)	/* Tx Enable */
194
195#define AG71XX_REG_TX_DESC	0x0184
196#define AG71XX_REG_TX_STATUS	0x0188
197#define TX_STATUS_PS		BIT(0)	/* Packet Sent */
198#define TX_STATUS_UR		BIT(1)	/* Tx Underrun */
199#define TX_STATUS_BE		BIT(3)	/* Bus Error */
200
201#define AG71XX_REG_RX_CTRL	0x018c
202#define RX_CTRL_RXE		BIT(0)	/* Rx Enable */
203
204#define AG71XX_DMA_RETRY	10
205#define AG71XX_DMA_DELAY	1
206
207#define AG71XX_REG_RX_DESC	0x0190
208#define AG71XX_REG_RX_STATUS	0x0194
209#define RX_STATUS_PR		BIT(0)	/* Packet Received */
210#define RX_STATUS_OF		BIT(2)	/* Rx Overflow */
211#define RX_STATUS_BE		BIT(3)	/* Bus Error */
212
213#define AG71XX_REG_INT_ENABLE	0x0198
214#define AG71XX_REG_INT_STATUS	0x019c
215#define AG71XX_INT_TX_PS	BIT(0)
216#define AG71XX_INT_TX_UR	BIT(1)
217#define AG71XX_INT_TX_BE	BIT(3)
218#define AG71XX_INT_RX_PR	BIT(4)
219#define AG71XX_INT_RX_OF	BIT(6)
220#define AG71XX_INT_RX_BE	BIT(7)
221
222#define AG71XX_REG_FIFO_DEPTH	0x01a8
223#define AG71XX_REG_RX_SM	0x01b0
224#define AG71XX_REG_TX_SM	0x01b4
225
226#define AG71XX_DEFAULT_MSG_ENABLE	\
227	(NETIF_MSG_DRV			\
228	| NETIF_MSG_PROBE		\
229	| NETIF_MSG_LINK		\
230	| NETIF_MSG_TIMER		\
231	| NETIF_MSG_IFDOWN		\
232	| NETIF_MSG_IFUP		\
233	| NETIF_MSG_RX_ERR		\
234	| NETIF_MSG_TX_ERR)
235
236struct ag71xx_statistic {
237	unsigned short offset;
238	u32 mask;
239	const char name[ETH_GSTRING_LEN];
240};
241
242static const struct ag71xx_statistic ag71xx_statistics[] = {
243	{ 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
244	{ 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
245	{ 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
246	{ 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
247	{ 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
248	{ 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
249	{ 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
250	{ 0x009C, GENMASK(23, 0), "Rx Byte", },
251	{ 0x00A0, GENMASK(17, 0), "Rx Packet", },
252	{ 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
253	{ 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
254	{ 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
255	{ 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
256	{ 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
257	{ 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
258	{ 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
259	{ 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
260	{ 0x00C4, GENMASK(11, 0), "Rx Code Error", },
261	{ 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
262	{ 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
263	{ 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
264	{ 0x00D4, GENMASK(11, 0), "Rx Fragments", },
265	{ 0x00D8, GENMASK(11, 0), "Rx Jabber", },
266	{ 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
267	{ 0x00E0, GENMASK(23, 0), "Tx Byte", },
268	{ 0x00E4, GENMASK(17, 0), "Tx Packet", },
269	{ 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
270	{ 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
271	{ 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
272	{ 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
273	{ 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
274	{ 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
275	{ 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
276	{ 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
277	{ 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
278	{ 0x010C, GENMASK(12, 0), "Tx Total Collision", },
279	{ 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
280	{ 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
281	{ 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
282	{ 0x011C, GENMASK(11, 0), "Tx FCS Error", },
283	{ 0x0120, GENMASK(11, 0), "Tx Control Frame", },
284	{ 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
285	{ 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
286	{ 0x012C, GENMASK(11, 0), "Tx Fragment", },
287};
288
289#define DESC_EMPTY		BIT(31)
290#define DESC_MORE		BIT(24)
291#define DESC_PKTLEN_M		0xfff
292struct ag71xx_desc {
293	u32 data;
294	u32 ctrl;
295	u32 next;
296	u32 pad;
297} __aligned(4);
298
299#define AG71XX_DESC_SIZE	roundup(sizeof(struct ag71xx_desc), \
300					L1_CACHE_BYTES)
301
302struct ag71xx_buf {
303	union {
304		struct {
305			struct sk_buff *skb;
306			unsigned int len;
307		} tx;
308		struct {
309			dma_addr_t dma_addr;
310			void *rx_buf;
311		} rx;
312	};
313};
314
315struct ag71xx_ring {
316	/* "Hot" fields in the data path. */
317	unsigned int curr;
318	unsigned int dirty;
319
320	/* "Cold" fields - not used in the data path. */
321	struct ag71xx_buf *buf;
322	u16 order;
323	u16 desc_split;
324	dma_addr_t descs_dma;
325	u8 *descs_cpu;
326};
327
328enum ag71xx_type {
329	AR7100,
330	AR7240,
331	AR9130,
332	AR9330,
333	AR9340,
334	QCA9530,
335	QCA9550,
336};
337
338struct ag71xx_dcfg {
339	u32 max_frame_len;
340	const u32 *fifodata;
341	u16 desc_pktlen_mask;
342	bool tx_hang_workaround;
343	enum ag71xx_type type;
344};
345
346struct ag71xx {
347	/* Critical data related to the per-packet data path are clustered
348	 * early in this structure to help improve the D-cache footprint.
349	 */
350	struct ag71xx_ring rx_ring ____cacheline_aligned;
351	struct ag71xx_ring tx_ring ____cacheline_aligned;
352
353	u16 rx_buf_size;
354	u8 rx_buf_offset;
355
356	struct net_device *ndev;
357	struct platform_device *pdev;
358	struct napi_struct napi;
359	u32 msg_enable;
360	const struct ag71xx_dcfg *dcfg;
361
362	/* From this point onwards we're not looking at per-packet fields. */
363	void __iomem *mac_base;
364
365	struct ag71xx_desc *stop_desc;
366	dma_addr_t stop_desc_dma;
367
368	phy_interface_t phy_if_mode;
369	struct phylink *phylink;
370	struct phylink_config phylink_config;
371
372	struct delayed_work restart_work;
373	struct timer_list oom_timer;
374
375	struct reset_control *mac_reset;
376
377	u32 fifodata[3];
378	int mac_idx;
379
380	struct reset_control *mdio_reset;
381	struct mii_bus *mii_bus;
382	struct clk *clk_mdio;
383	struct clk *clk_eth;
384};
385
386static int ag71xx_desc_empty(struct ag71xx_desc *desc)
387{
388	return (desc->ctrl & DESC_EMPTY) != 0;
389}
390
391static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
392{
393	return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
394}
395
396static int ag71xx_ring_size_order(int size)
397{
398	return fls(size - 1);
399}
400
401static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
402{
403	return ag->dcfg->type == type;
404}
405
406static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
407{
408	iowrite32(value, ag->mac_base + reg);
409	/* flush write */
410	(void)ioread32(ag->mac_base + reg);
411}
412
413static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
414{
415	return ioread32(ag->mac_base + reg);
416}
417
418static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
419{
420	void __iomem *r;
421
422	r = ag->mac_base + reg;
423	iowrite32(ioread32(r) | mask, r);
424	/* flush write */
425	(void)ioread32(r);
426}
427
428static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
429{
430	void __iomem *r;
431
432	r = ag->mac_base + reg;
433	iowrite32(ioread32(r) & ~mask, r);
434	/* flush write */
435	(void)ioread32(r);
436}
437
438static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
439{
440	ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
441}
442
443static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
444{
445	ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
446}
447
448static void ag71xx_get_drvinfo(struct net_device *ndev,
449			       struct ethtool_drvinfo *info)
450{
451	struct ag71xx *ag = netdev_priv(ndev);
452
453	strlcpy(info->driver, "ag71xx", sizeof(info->driver));
454	strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
455		sizeof(info->bus_info));
456}
457
458static int ag71xx_get_link_ksettings(struct net_device *ndev,
459				   struct ethtool_link_ksettings *kset)
460{
461	struct ag71xx *ag = netdev_priv(ndev);
462
463	return phylink_ethtool_ksettings_get(ag->phylink, kset);
464}
465
466static int ag71xx_set_link_ksettings(struct net_device *ndev,
467				   const struct ethtool_link_ksettings *kset)
468{
469	struct ag71xx *ag = netdev_priv(ndev);
470
471	return phylink_ethtool_ksettings_set(ag->phylink, kset);
472}
473
474static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
475{
476	struct ag71xx *ag = netdev_priv(ndev);
477
478	return phylink_ethtool_nway_reset(ag->phylink);
479}
480
481static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
482					  struct ethtool_pauseparam *pause)
483{
484	struct ag71xx *ag = netdev_priv(ndev);
485
486	phylink_ethtool_get_pauseparam(ag->phylink, pause);
487}
488
489static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
490					 struct ethtool_pauseparam *pause)
491{
492	struct ag71xx *ag = netdev_priv(ndev);
493
494	return phylink_ethtool_set_pauseparam(ag->phylink, pause);
495}
496
497static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
498				       u8 *data)
499{
500	if (sset == ETH_SS_STATS) {
501		int i;
502
503		for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
504			memcpy(data + i * ETH_GSTRING_LEN,
505			       ag71xx_statistics[i].name, ETH_GSTRING_LEN);
506	}
507}
508
509static void ag71xx_ethtool_get_stats(struct net_device *ndev,
510				     struct ethtool_stats *stats, u64 *data)
511{
512	struct ag71xx *ag = netdev_priv(ndev);
513	int i;
514
515	for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
516		*data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
517				& ag71xx_statistics[i].mask;
518}
519
520static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
521{
522	if (sset == ETH_SS_STATS)
523		return ARRAY_SIZE(ag71xx_statistics);
524	return -EOPNOTSUPP;
525}
526
527static const struct ethtool_ops ag71xx_ethtool_ops = {
528	.get_drvinfo			= ag71xx_get_drvinfo,
529	.get_link			= ethtool_op_get_link,
530	.get_ts_info			= ethtool_op_get_ts_info,
531	.get_link_ksettings		= ag71xx_get_link_ksettings,
532	.set_link_ksettings		= ag71xx_set_link_ksettings,
533	.nway_reset			= ag71xx_ethtool_nway_reset,
534	.get_pauseparam			= ag71xx_ethtool_get_pauseparam,
535	.set_pauseparam			= ag71xx_ethtool_set_pauseparam,
536	.get_strings			= ag71xx_ethtool_get_strings,
537	.get_ethtool_stats		= ag71xx_ethtool_get_stats,
538	.get_sset_count			= ag71xx_ethtool_get_sset_count,
539};
540
541static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
542{
543	struct net_device *ndev = ag->ndev;
544	int i;
545
546	for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
547		u32 busy;
548
549		udelay(AG71XX_MDIO_DELAY);
550
551		busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
552		if (!busy)
553			return 0;
554
555		udelay(AG71XX_MDIO_DELAY);
556	}
557
558	netif_err(ag, link, ndev, "MDIO operation timed out\n");
559
560	return -ETIMEDOUT;
561}
562
563static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
564{
565	struct ag71xx *ag = bus->priv;
566	int err, val;
567
568	err = ag71xx_mdio_wait_busy(ag);
569	if (err)
570		return err;
571
572	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
573		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
574	/* enable read mode */
575	ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
576
577	err = ag71xx_mdio_wait_busy(ag);
578	if (err)
579		return err;
580
581	val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
582	/* disable read mode */
583	ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
584
585	netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
586		  addr, reg, val);
587
588	return val;
589}
590
591static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
592				 u16 val)
593{
594	struct ag71xx *ag = bus->priv;
595
596	netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
597		  addr, reg, val);
598
599	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
600		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
601	ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
602
603	return ag71xx_mdio_wait_busy(ag);
604}
605
606static const u32 ar71xx_mdio_div_table[] = {
607	4, 4, 6, 8, 10, 14, 20, 28,
608};
609
610static const u32 ar7240_mdio_div_table[] = {
611	2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
612};
613
614static const u32 ar933x_mdio_div_table[] = {
615	4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
616};
617
618static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
619{
620	unsigned long ref_clock;
621	const u32 *table;
622	int ndivs, i;
623
624	ref_clock = clk_get_rate(ag->clk_mdio);
625	if (!ref_clock)
626		return -EINVAL;
627
628	if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
629		table = ar933x_mdio_div_table;
630		ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
631	} else if (ag71xx_is(ag, AR7240)) {
632		table = ar7240_mdio_div_table;
633		ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
634	} else {
635		table = ar71xx_mdio_div_table;
636		ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
637	}
638
639	for (i = 0; i < ndivs; i++) {
640		unsigned long t;
641
642		t = ref_clock / table[i];
643		if (t <= AG71XX_MDIO_MAX_CLK) {
644			*div = i;
645			return 0;
646		}
647	}
648
649	return -ENOENT;
650}
651
652static int ag71xx_mdio_reset(struct mii_bus *bus)
653{
654	struct ag71xx *ag = bus->priv;
655	int err;
656	u32 t;
657
658	err = ag71xx_mdio_get_divider(ag, &t);
659	if (err)
660		return err;
661
662	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
663	usleep_range(100, 200);
664
665	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
666	usleep_range(100, 200);
667
668	return 0;
669}
670
671static int ag71xx_mdio_probe(struct ag71xx *ag)
672{
673	struct device *dev = &ag->pdev->dev;
674	struct net_device *ndev = ag->ndev;
675	static struct mii_bus *mii_bus;
676	struct device_node *np, *mnp;
677	int err;
678
679	np = dev->of_node;
680	ag->mii_bus = NULL;
681
682	ag->clk_mdio = devm_clk_get(dev, "mdio");
683	if (IS_ERR(ag->clk_mdio)) {
684		netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
685		return PTR_ERR(ag->clk_mdio);
686	}
687
688	err = clk_prepare_enable(ag->clk_mdio);
689	if (err) {
690		netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
691		return err;
692	}
693
694	mii_bus = devm_mdiobus_alloc(dev);
695	if (!mii_bus) {
696		err = -ENOMEM;
697		goto mdio_err_put_clk;
698	}
699
700	ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
701	if (IS_ERR(ag->mdio_reset)) {
702		netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
703		err = PTR_ERR(ag->mdio_reset);
704		goto mdio_err_put_clk;
705	}
706
707	mii_bus->name = "ag71xx_mdio";
708	mii_bus->read = ag71xx_mdio_mii_read;
709	mii_bus->write = ag71xx_mdio_mii_write;
710	mii_bus->reset = ag71xx_mdio_reset;
711	mii_bus->priv = ag;
712	mii_bus->parent = dev;
713	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
714
715	if (!IS_ERR(ag->mdio_reset)) {
716		reset_control_assert(ag->mdio_reset);
717		msleep(100);
718		reset_control_deassert(ag->mdio_reset);
719		msleep(200);
720	}
721
722	mnp = of_get_child_by_name(np, "mdio");
723	err = of_mdiobus_register(mii_bus, mnp);
724	of_node_put(mnp);
725	if (err)
726		goto mdio_err_put_clk;
727
728	ag->mii_bus = mii_bus;
729
730	return 0;
731
732mdio_err_put_clk:
733	clk_disable_unprepare(ag->clk_mdio);
734	return err;
735}
736
737static void ag71xx_mdio_remove(struct ag71xx *ag)
738{
739	if (ag->mii_bus)
740		mdiobus_unregister(ag->mii_bus);
741	clk_disable_unprepare(ag->clk_mdio);
742}
743
744static void ag71xx_hw_stop(struct ag71xx *ag)
745{
746	/* disable all interrupts and stop the rx/tx engine */
747	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
748	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
749	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
750}
751
752static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
753{
754	unsigned long timestamp;
755	u32 rx_sm, tx_sm, rx_fd;
756
757	timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
758	if (likely(time_before(jiffies, timestamp + HZ / 10)))
759		return false;
760
761	if (!netif_carrier_ok(ag->ndev))
762		return false;
763
764	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
765	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
766		return true;
767
768	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
769	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
770	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
771	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
772		return true;
773
774	return false;
775}
776
777static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
778{
779	struct ag71xx_ring *ring = &ag->tx_ring;
780	int sent = 0, bytes_compl = 0, n = 0;
781	struct net_device *ndev = ag->ndev;
782	int ring_mask, ring_size;
783	bool dma_stuck = false;
784
785	ring_mask = BIT(ring->order) - 1;
786	ring_size = BIT(ring->order);
787
788	netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
789
790	while (ring->dirty + n != ring->curr) {
791		struct ag71xx_desc *desc;
792		struct sk_buff *skb;
793		unsigned int i;
794
795		i = (ring->dirty + n) & ring_mask;
796		desc = ag71xx_ring_desc(ring, i);
797		skb = ring->buf[i].tx.skb;
798
799		if (!flush && !ag71xx_desc_empty(desc)) {
800			if (ag->dcfg->tx_hang_workaround &&
801			    ag71xx_check_dma_stuck(ag)) {
802				schedule_delayed_work(&ag->restart_work,
803						      HZ / 2);
804				dma_stuck = true;
805			}
806			break;
807		}
808
809		if (flush)
810			desc->ctrl |= DESC_EMPTY;
811
812		n++;
813		if (!skb)
814			continue;
815
816		dev_kfree_skb_any(skb);
817		ring->buf[i].tx.skb = NULL;
818
819		bytes_compl += ring->buf[i].tx.len;
820
821		sent++;
822		ring->dirty += n;
823
824		while (n > 0) {
825			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
826			n--;
827		}
828	}
829
830	netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
831
832	if (!sent)
833		return 0;
834
835	ag->ndev->stats.tx_bytes += bytes_compl;
836	ag->ndev->stats.tx_packets += sent;
837
838	netdev_completed_queue(ag->ndev, sent, bytes_compl);
839	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
840		netif_wake_queue(ag->ndev);
841
842	if (!dma_stuck)
843		cancel_delayed_work(&ag->restart_work);
844
845	return sent;
846}
847
848static void ag71xx_dma_wait_stop(struct ag71xx *ag)
849{
850	struct net_device *ndev = ag->ndev;
851	int i;
852
853	for (i = 0; i < AG71XX_DMA_RETRY; i++) {
854		u32 rx, tx;
855
856		mdelay(AG71XX_DMA_DELAY);
857
858		rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
859		tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
860		if (!rx && !tx)
861			return;
862	}
863
864	netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
865}
866
867static void ag71xx_dma_reset(struct ag71xx *ag)
868{
869	struct net_device *ndev = ag->ndev;
870	u32 val;
871	int i;
872
873	/* stop RX and TX */
874	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
875	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
876
877	/* give the hardware some time to really stop all rx/tx activity
878	 * clearing the descriptors too early causes random memory corruption
879	 */
880	ag71xx_dma_wait_stop(ag);
881
882	/* clear descriptor addresses */
883	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
884	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
885
886	/* clear pending RX/TX interrupts */
887	for (i = 0; i < 256; i++) {
888		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
889		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
890	}
891
892	/* clear pending errors */
893	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
894	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
895
896	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
897	if (val)
898		netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
899			  val);
900
901	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
902
903	/* mask out reserved bits */
904	val &= ~0xff000000;
905
906	if (val)
907		netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
908			  val);
909}
910
911static void ag71xx_hw_setup(struct ag71xx *ag)
912{
913	u32 init = MAC_CFG1_INIT;
914
915	/* setup MAC configuration registers */
916	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
917
918	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
919		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
920
921	/* setup max frame length to zero */
922	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
923
924	/* setup FIFO configuration registers */
925	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
926	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
927	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
928	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
929	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
930}
931
932static unsigned int ag71xx_max_frame_len(unsigned int mtu)
933{
934	return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
935}
936
937static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
938{
939	u32 t;
940
941	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
942	  | (((u32)mac[3]) << 8) | ((u32)mac[2]);
943
944	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
945
946	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
947	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
948}
949
950static void ag71xx_fast_reset(struct ag71xx *ag)
951{
952	struct net_device *dev = ag->ndev;
953	u32 rx_ds;
954	u32 mii_reg;
955
956	ag71xx_hw_stop(ag);
957
958	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
959	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
960
961	ag71xx_tx_packets(ag, true);
962
963	reset_control_assert(ag->mac_reset);
964	usleep_range(10, 20);
965	reset_control_deassert(ag->mac_reset);
966	usleep_range(10, 20);
967
968	ag71xx_dma_reset(ag);
969	ag71xx_hw_setup(ag);
970	ag->tx_ring.curr = 0;
971	ag->tx_ring.dirty = 0;
972	netdev_reset_queue(ag->ndev);
973
974	/* setup max frame length */
975	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
976		  ag71xx_max_frame_len(ag->ndev->mtu));
977
978	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
979	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
980	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
981
982	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
983}
984
985static void ag71xx_hw_start(struct ag71xx *ag)
986{
987	/* start RX engine */
988	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
989
990	/* enable interrupts */
991	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
992
993	netif_wake_queue(ag->ndev);
994}
995
996static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
997			      const struct phylink_link_state *state)
998{
999	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1000
1001	if (phylink_autoneg_inband(mode))
1002		return;
1003
1004	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1005		ag71xx_fast_reset(ag);
1006
1007	if (ag->tx_ring.desc_split) {
1008		ag->fifodata[2] &= 0xffff;
1009		ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1010	}
1011
1012	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1013}
1014
1015static void ag71xx_mac_validate(struct phylink_config *config,
1016			    unsigned long *supported,
1017			    struct phylink_link_state *state)
1018{
1019	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1020	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1021
1022	switch (state->interface) {
1023	case PHY_INTERFACE_MODE_NA:
1024		break;
1025	case PHY_INTERFACE_MODE_MII:
1026		if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1027		    ag71xx_is(ag, AR9340) ||
1028		    ag71xx_is(ag, QCA9530) ||
1029		    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1030			break;
1031		goto unsupported;
1032	case PHY_INTERFACE_MODE_GMII:
1033		if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1034		    (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1035		    (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1036			break;
1037		goto unsupported;
1038	case PHY_INTERFACE_MODE_SGMII:
1039		if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1040			break;
1041		goto unsupported;
1042	case PHY_INTERFACE_MODE_RMII:
1043		if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1044			break;
1045		goto unsupported;
1046	case PHY_INTERFACE_MODE_RGMII:
1047		if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1048		    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1049			break;
1050		goto unsupported;
1051	default:
1052		goto unsupported;
1053	}
1054
1055	phylink_set(mask, MII);
1056
1057	phylink_set(mask, Pause);
1058	phylink_set(mask, Asym_Pause);
1059	phylink_set(mask, Autoneg);
1060	phylink_set(mask, 10baseT_Half);
1061	phylink_set(mask, 10baseT_Full);
1062	phylink_set(mask, 100baseT_Half);
1063	phylink_set(mask, 100baseT_Full);
1064
1065	if (state->interface == PHY_INTERFACE_MODE_NA ||
1066	    state->interface == PHY_INTERFACE_MODE_SGMII ||
1067	    state->interface == PHY_INTERFACE_MODE_RGMII ||
1068	    state->interface == PHY_INTERFACE_MODE_GMII) {
1069		phylink_set(mask, 1000baseT_Full);
1070		phylink_set(mask, 1000baseX_Full);
1071	}
1072
1073	bitmap_and(supported, supported, mask,
1074		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1075	bitmap_and(state->advertising, state->advertising, mask,
1076		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1077
1078	return;
1079unsupported:
1080	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1081}
1082
1083static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
1084				     struct phylink_link_state *state)
1085{
1086	state->link = 0;
1087}
1088
1089static void ag71xx_mac_an_restart(struct phylink_config *config)
1090{
1091	/* Not Supported */
1092}
1093
1094static void ag71xx_mac_link_down(struct phylink_config *config,
1095				 unsigned int mode, phy_interface_t interface)
1096{
1097	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1098
1099	ag71xx_hw_stop(ag);
1100}
1101
1102static void ag71xx_mac_link_up(struct phylink_config *config,
1103			       struct phy_device *phy,
1104			       unsigned int mode, phy_interface_t interface,
1105			       int speed, int duplex,
1106			       bool tx_pause, bool rx_pause)
1107{
1108	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1109	u32 cfg1, cfg2;
1110	u32 ifctl;
1111	u32 fifo5;
1112
1113	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1114	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1115	cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1116
1117	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1118	ifctl &= ~(MAC_IFCTL_SPEED);
1119
1120	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1121	fifo5 &= ~FIFO_CFG5_BM;
1122
1123	switch (speed) {
1124	case SPEED_1000:
1125		cfg2 |= MAC_CFG2_IF_1000;
1126		fifo5 |= FIFO_CFG5_BM;
1127		break;
1128	case SPEED_100:
1129		cfg2 |= MAC_CFG2_IF_10_100;
1130		ifctl |= MAC_IFCTL_SPEED;
1131		break;
1132	case SPEED_10:
1133		cfg2 |= MAC_CFG2_IF_10_100;
1134		break;
1135	default:
1136		return;
1137	}
1138
1139	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1140	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1141	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1142
1143	cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1144	cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1145	if (tx_pause)
1146		cfg1 |= MAC_CFG1_TFC;
1147
1148	if (rx_pause)
1149		cfg1 |= MAC_CFG1_RFC;
1150	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1151
1152	ag71xx_hw_start(ag);
1153}
1154
1155static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1156	.validate = ag71xx_mac_validate,
1157	.mac_pcs_get_state = ag71xx_mac_pcs_get_state,
1158	.mac_an_restart = ag71xx_mac_an_restart,
1159	.mac_config = ag71xx_mac_config,
1160	.mac_link_down = ag71xx_mac_link_down,
1161	.mac_link_up = ag71xx_mac_link_up,
1162};
1163
1164static int ag71xx_phylink_setup(struct ag71xx *ag)
1165{
1166	struct phylink *phylink;
1167
1168	ag->phylink_config.dev = &ag->ndev->dev;
1169	ag->phylink_config.type = PHYLINK_NETDEV;
1170
1171	phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1172				 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1173	if (IS_ERR(phylink))
1174		return PTR_ERR(phylink);
1175
1176	ag->phylink = phylink;
1177	return 0;
1178}
1179
1180static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1181{
1182	struct ag71xx_ring *ring = &ag->tx_ring;
1183	int ring_mask = BIT(ring->order) - 1;
1184	u32 bytes_compl = 0, pkts_compl = 0;
1185	struct net_device *ndev = ag->ndev;
1186
1187	while (ring->curr != ring->dirty) {
1188		struct ag71xx_desc *desc;
1189		u32 i = ring->dirty & ring_mask;
1190
1191		desc = ag71xx_ring_desc(ring, i);
1192		if (!ag71xx_desc_empty(desc)) {
1193			desc->ctrl = 0;
1194			ndev->stats.tx_errors++;
1195		}
1196
1197		if (ring->buf[i].tx.skb) {
1198			bytes_compl += ring->buf[i].tx.len;
1199			pkts_compl++;
1200			dev_kfree_skb_any(ring->buf[i].tx.skb);
1201		}
1202		ring->buf[i].tx.skb = NULL;
1203		ring->dirty++;
1204	}
1205
1206	/* flush descriptors */
1207	wmb();
1208
1209	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1210}
1211
1212static void ag71xx_ring_tx_init(struct ag71xx *ag)
1213{
1214	struct ag71xx_ring *ring = &ag->tx_ring;
1215	int ring_size = BIT(ring->order);
1216	int ring_mask = ring_size - 1;
1217	int i;
1218
1219	for (i = 0; i < ring_size; i++) {
1220		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1221
1222		desc->next = (u32)(ring->descs_dma +
1223			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1224
1225		desc->ctrl = DESC_EMPTY;
1226		ring->buf[i].tx.skb = NULL;
1227	}
1228
1229	/* flush descriptors */
1230	wmb();
1231
1232	ring->curr = 0;
1233	ring->dirty = 0;
1234	netdev_reset_queue(ag->ndev);
1235}
1236
1237static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1238{
1239	struct ag71xx_ring *ring = &ag->rx_ring;
1240	int ring_size = BIT(ring->order);
1241	int i;
1242
1243	if (!ring->buf)
1244		return;
1245
1246	for (i = 0; i < ring_size; i++)
1247		if (ring->buf[i].rx.rx_buf) {
1248			dma_unmap_single(&ag->pdev->dev,
1249					 ring->buf[i].rx.dma_addr,
1250					 ag->rx_buf_size, DMA_FROM_DEVICE);
1251			skb_free_frag(ring->buf[i].rx.rx_buf);
1252		}
1253}
1254
1255static int ag71xx_buffer_size(struct ag71xx *ag)
1256{
1257	return ag->rx_buf_size +
1258	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1259}
1260
1261static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1262			       int offset,
1263			       void *(*alloc)(unsigned int size))
1264{
1265	struct ag71xx_ring *ring = &ag->rx_ring;
1266	struct ag71xx_desc *desc;
1267	void *data;
1268
1269	desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1270
1271	data = alloc(ag71xx_buffer_size(ag));
1272	if (!data)
1273		return false;
1274
1275	buf->rx.rx_buf = data;
1276	buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1277					  DMA_FROM_DEVICE);
1278	desc->data = (u32)buf->rx.dma_addr + offset;
1279	return true;
1280}
1281
1282static int ag71xx_ring_rx_init(struct ag71xx *ag)
1283{
1284	struct ag71xx_ring *ring = &ag->rx_ring;
1285	struct net_device *ndev = ag->ndev;
1286	int ring_mask = BIT(ring->order) - 1;
1287	int ring_size = BIT(ring->order);
1288	unsigned int i;
1289	int ret;
1290
1291	ret = 0;
1292	for (i = 0; i < ring_size; i++) {
1293		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1294
1295		desc->next = (u32)(ring->descs_dma +
1296			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1297
1298		netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1299			  desc, desc->next);
1300	}
1301
1302	for (i = 0; i < ring_size; i++) {
1303		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1304
1305		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1306					netdev_alloc_frag)) {
1307			ret = -ENOMEM;
1308			break;
1309		}
1310
1311		desc->ctrl = DESC_EMPTY;
1312	}
1313
1314	/* flush descriptors */
1315	wmb();
1316
1317	ring->curr = 0;
1318	ring->dirty = 0;
1319
1320	return ret;
1321}
1322
1323static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1324{
1325	struct ag71xx_ring *ring = &ag->rx_ring;
1326	int ring_mask = BIT(ring->order) - 1;
1327	int offset = ag->rx_buf_offset;
1328	unsigned int count;
1329
1330	count = 0;
1331	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1332		struct ag71xx_desc *desc;
1333		unsigned int i;
1334
1335		i = ring->dirty & ring_mask;
1336		desc = ag71xx_ring_desc(ring, i);
1337
1338		if (!ring->buf[i].rx.rx_buf &&
1339		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1340					napi_alloc_frag))
1341			break;
1342
1343		desc->ctrl = DESC_EMPTY;
1344		count++;
1345	}
1346
1347	/* flush descriptors */
1348	wmb();
1349
1350	netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1351		  count);
1352
1353	return count;
1354}
1355
1356static int ag71xx_rings_init(struct ag71xx *ag)
1357{
1358	struct ag71xx_ring *tx = &ag->tx_ring;
1359	struct ag71xx_ring *rx = &ag->rx_ring;
1360	int ring_size, tx_size;
1361
1362	ring_size = BIT(tx->order) + BIT(rx->order);
1363	tx_size = BIT(tx->order);
1364
1365	tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1366	if (!tx->buf)
1367		return -ENOMEM;
1368
1369	tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1370					   ring_size * AG71XX_DESC_SIZE,
1371					   &tx->descs_dma, GFP_KERNEL);
1372	if (!tx->descs_cpu) {
1373		kfree(tx->buf);
1374		tx->buf = NULL;
1375		return -ENOMEM;
1376	}
1377
1378	rx->buf = &tx->buf[tx_size];
1379	rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1380	rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1381
1382	ag71xx_ring_tx_init(ag);
1383	return ag71xx_ring_rx_init(ag);
1384}
1385
1386static void ag71xx_rings_free(struct ag71xx *ag)
1387{
1388	struct ag71xx_ring *tx = &ag->tx_ring;
1389	struct ag71xx_ring *rx = &ag->rx_ring;
1390	int ring_size;
1391
1392	ring_size = BIT(tx->order) + BIT(rx->order);
1393
1394	if (tx->descs_cpu)
1395		dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1396				  tx->descs_cpu, tx->descs_dma);
1397
1398	kfree(tx->buf);
1399
1400	tx->descs_cpu = NULL;
1401	rx->descs_cpu = NULL;
1402	tx->buf = NULL;
1403	rx->buf = NULL;
1404}
1405
1406static void ag71xx_rings_cleanup(struct ag71xx *ag)
1407{
1408	ag71xx_ring_rx_clean(ag);
1409	ag71xx_ring_tx_clean(ag);
1410	ag71xx_rings_free(ag);
1411
1412	netdev_reset_queue(ag->ndev);
1413}
1414
1415static void ag71xx_hw_init(struct ag71xx *ag)
1416{
1417	ag71xx_hw_stop(ag);
1418
1419	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1420	usleep_range(20, 30);
1421
1422	reset_control_assert(ag->mac_reset);
1423	msleep(100);
1424	reset_control_deassert(ag->mac_reset);
1425	msleep(200);
1426
1427	ag71xx_hw_setup(ag);
1428
1429	ag71xx_dma_reset(ag);
1430}
1431
1432static int ag71xx_hw_enable(struct ag71xx *ag)
1433{
1434	int ret;
1435
1436	ret = ag71xx_rings_init(ag);
1437	if (ret)
1438		return ret;
1439
1440	napi_enable(&ag->napi);
1441	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1442	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1443	netif_start_queue(ag->ndev);
1444
1445	return 0;
1446}
1447
1448static void ag71xx_hw_disable(struct ag71xx *ag)
1449{
1450	netif_stop_queue(ag->ndev);
1451
1452	ag71xx_hw_stop(ag);
1453	ag71xx_dma_reset(ag);
1454
1455	napi_disable(&ag->napi);
1456	del_timer_sync(&ag->oom_timer);
1457
1458	ag71xx_rings_cleanup(ag);
1459}
1460
1461static int ag71xx_open(struct net_device *ndev)
1462{
1463	struct ag71xx *ag = netdev_priv(ndev);
1464	unsigned int max_frame_len;
1465	int ret;
1466
1467	ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1468	if (ret) {
1469		netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1470			  ret);
1471		return ret;
1472	}
1473
1474	max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1475	ag->rx_buf_size =
1476		SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1477
1478	/* setup max frame length */
1479	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1480	ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1481
1482	ret = ag71xx_hw_enable(ag);
1483	if (ret)
1484		goto err;
1485
1486	phylink_start(ag->phylink);
1487
1488	return 0;
1489
1490err:
1491	ag71xx_rings_cleanup(ag);
1492	phylink_disconnect_phy(ag->phylink);
1493	return ret;
1494}
1495
1496static int ag71xx_stop(struct net_device *ndev)
1497{
1498	struct ag71xx *ag = netdev_priv(ndev);
1499
1500	phylink_stop(ag->phylink);
1501	phylink_disconnect_phy(ag->phylink);
1502	ag71xx_hw_disable(ag);
1503
1504	return 0;
1505}
1506
1507static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1508{
1509	int i, ring_mask, ndesc, split;
1510	struct ag71xx_desc *desc;
1511
1512	ring_mask = BIT(ring->order) - 1;
1513	ndesc = 0;
1514	split = ring->desc_split;
1515
1516	if (!split)
1517		split = len;
1518
1519	while (len > 0) {
1520		unsigned int cur_len = len;
1521
1522		i = (ring->curr + ndesc) & ring_mask;
1523		desc = ag71xx_ring_desc(ring, i);
1524
1525		if (!ag71xx_desc_empty(desc))
1526			return -1;
1527
1528		if (cur_len > split) {
1529			cur_len = split;
1530
1531			/*  TX will hang if DMA transfers <= 4 bytes,
1532			 * make sure next segment is more than 4 bytes long.
1533			 */
1534			if (len <= split + 4)
1535				cur_len -= 4;
1536		}
1537
1538		desc->data = addr;
1539		addr += cur_len;
1540		len -= cur_len;
1541
1542		if (len > 0)
1543			cur_len |= DESC_MORE;
1544
1545		/* prevent early tx attempt of this descriptor */
1546		if (!ndesc)
1547			cur_len |= DESC_EMPTY;
1548
1549		desc->ctrl = cur_len;
1550		ndesc++;
1551	}
1552
1553	return ndesc;
1554}
1555
1556static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1557					  struct net_device *ndev)
1558{
1559	int i, n, ring_min, ring_mask, ring_size;
1560	struct ag71xx *ag = netdev_priv(ndev);
1561	struct ag71xx_ring *ring;
1562	struct ag71xx_desc *desc;
1563	dma_addr_t dma_addr;
1564
1565	ring = &ag->tx_ring;
1566	ring_mask = BIT(ring->order) - 1;
1567	ring_size = BIT(ring->order);
1568
1569	if (skb->len <= 4) {
1570		netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1571		goto err_drop;
1572	}
1573
1574	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1575				  DMA_TO_DEVICE);
1576
1577	i = ring->curr & ring_mask;
1578	desc = ag71xx_ring_desc(ring, i);
1579
1580	/* setup descriptor fields */
1581	n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1582				 skb->len & ag->dcfg->desc_pktlen_mask);
1583	if (n < 0)
1584		goto err_drop_unmap;
1585
1586	i = (ring->curr + n - 1) & ring_mask;
1587	ring->buf[i].tx.len = skb->len;
1588	ring->buf[i].tx.skb = skb;
1589
1590	netdev_sent_queue(ndev, skb->len);
1591
1592	skb_tx_timestamp(skb);
1593
1594	desc->ctrl &= ~DESC_EMPTY;
1595	ring->curr += n;
1596
1597	/* flush descriptor */
1598	wmb();
1599
1600	ring_min = 2;
1601	if (ring->desc_split)
1602		ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1603
1604	if (ring->curr - ring->dirty >= ring_size - ring_min) {
1605		netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1606		netif_stop_queue(ndev);
1607	}
1608
1609	netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1610
1611	/* enable TX engine */
1612	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1613
1614	return NETDEV_TX_OK;
1615
1616err_drop_unmap:
1617	dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1618
1619err_drop:
1620	ndev->stats.tx_dropped++;
1621
1622	dev_kfree_skb(skb);
1623	return NETDEV_TX_OK;
1624}
1625
1626static void ag71xx_oom_timer_handler(struct timer_list *t)
1627{
1628	struct ag71xx *ag = from_timer(ag, t, oom_timer);
1629
1630	napi_schedule(&ag->napi);
1631}
1632
1633static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1634{
1635	struct ag71xx *ag = netdev_priv(ndev);
1636
1637	netif_err(ag, tx_err, ndev, "tx timeout\n");
1638
1639	schedule_delayed_work(&ag->restart_work, 1);
1640}
1641
1642static void ag71xx_restart_work_func(struct work_struct *work)
1643{
1644	struct ag71xx *ag = container_of(work, struct ag71xx,
1645					 restart_work.work);
1646
1647	rtnl_lock();
1648	ag71xx_hw_disable(ag);
1649	ag71xx_hw_enable(ag);
1650
1651	phylink_stop(ag->phylink);
1652	phylink_start(ag->phylink);
1653
1654	rtnl_unlock();
1655}
1656
1657static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1658{
1659	struct net_device *ndev = ag->ndev;
1660	int ring_mask, ring_size, done = 0;
1661	unsigned int pktlen_mask, offset;
1662	struct sk_buff *next, *skb;
1663	struct ag71xx_ring *ring;
1664	struct list_head rx_list;
1665
1666	ring = &ag->rx_ring;
1667	pktlen_mask = ag->dcfg->desc_pktlen_mask;
1668	offset = ag->rx_buf_offset;
1669	ring_mask = BIT(ring->order) - 1;
1670	ring_size = BIT(ring->order);
1671
1672	netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1673		  limit, ring->curr, ring->dirty);
1674
1675	INIT_LIST_HEAD(&rx_list);
1676
1677	while (done < limit) {
1678		unsigned int i = ring->curr & ring_mask;
1679		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1680		int pktlen;
1681		int err = 0;
1682
1683		if (ag71xx_desc_empty(desc))
1684			break;
1685
1686		if ((ring->dirty + ring_size) == ring->curr) {
1687			WARN_ONCE(1, "RX out of ring");
1688			break;
1689		}
1690
1691		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1692
1693		pktlen = desc->ctrl & pktlen_mask;
1694		pktlen -= ETH_FCS_LEN;
1695
1696		dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1697				 ag->rx_buf_size, DMA_FROM_DEVICE);
1698
1699		ndev->stats.rx_packets++;
1700		ndev->stats.rx_bytes += pktlen;
1701
1702		skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1703		if (!skb) {
1704			skb_free_frag(ring->buf[i].rx.rx_buf);
1705			goto next;
1706		}
1707
1708		skb_reserve(skb, offset);
1709		skb_put(skb, pktlen);
1710
1711		if (err) {
1712			ndev->stats.rx_dropped++;
1713			kfree_skb(skb);
1714		} else {
1715			skb->dev = ndev;
1716			skb->ip_summed = CHECKSUM_NONE;
1717			list_add_tail(&skb->list, &rx_list);
1718		}
1719
1720next:
1721		ring->buf[i].rx.rx_buf = NULL;
1722		done++;
1723
1724		ring->curr++;
1725	}
1726
1727	ag71xx_ring_rx_refill(ag);
1728
1729	list_for_each_entry_safe(skb, next, &rx_list, list)
1730		skb->protocol = eth_type_trans(skb, ndev);
1731	netif_receive_skb_list(&rx_list);
1732
1733	netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1734		  ring->curr, ring->dirty, done);
1735
1736	return done;
1737}
1738
1739static int ag71xx_poll(struct napi_struct *napi, int limit)
1740{
1741	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1742	struct ag71xx_ring *rx_ring = &ag->rx_ring;
1743	int rx_ring_size = BIT(rx_ring->order);
1744	struct net_device *ndev = ag->ndev;
1745	int tx_done, rx_done;
1746	u32 status;
1747
1748	tx_done = ag71xx_tx_packets(ag, false);
1749
1750	netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1751	rx_done = ag71xx_rx_packets(ag, limit);
1752
1753	if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1754		goto oom;
1755
1756	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1757	if (unlikely(status & RX_STATUS_OF)) {
1758		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1759		ndev->stats.rx_fifo_errors++;
1760
1761		/* restart RX */
1762		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1763	}
1764
1765	if (rx_done < limit) {
1766		if (status & RX_STATUS_PR)
1767			goto more;
1768
1769		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1770		if (status & TX_STATUS_PS)
1771			goto more;
1772
1773		netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1774			  rx_done, tx_done, limit);
1775
1776		napi_complete(napi);
1777
1778		/* enable interrupts */
1779		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1780		return rx_done;
1781	}
1782
1783more:
1784	netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1785		  rx_done, tx_done, limit);
1786	return limit;
1787
1788oom:
1789	netif_err(ag, rx_err, ndev, "out of memory\n");
1790
1791	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1792	napi_complete(napi);
1793	return 0;
1794}
1795
1796static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1797{
1798	struct net_device *ndev = dev_id;
1799	struct ag71xx *ag;
1800	u32 status;
1801
1802	ag = netdev_priv(ndev);
1803	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1804
1805	if (unlikely(!status))
1806		return IRQ_NONE;
1807
1808	if (unlikely(status & AG71XX_INT_ERR)) {
1809		if (status & AG71XX_INT_TX_BE) {
1810			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1811			netif_err(ag, intr, ndev, "TX BUS error\n");
1812		}
1813		if (status & AG71XX_INT_RX_BE) {
1814			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1815			netif_err(ag, intr, ndev, "RX BUS error\n");
1816		}
1817	}
1818
1819	if (likely(status & AG71XX_INT_POLL)) {
1820		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1821		netif_dbg(ag, intr, ndev, "enable polling mode\n");
1822		napi_schedule(&ag->napi);
1823	}
1824
1825	return IRQ_HANDLED;
1826}
1827
1828static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1829{
1830	struct ag71xx *ag = netdev_priv(ndev);
1831
1832	ndev->mtu = new_mtu;
1833	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1834		  ag71xx_max_frame_len(ndev->mtu));
1835
1836	return 0;
1837}
1838
1839static const struct net_device_ops ag71xx_netdev_ops = {
1840	.ndo_open		= ag71xx_open,
1841	.ndo_stop		= ag71xx_stop,
1842	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1843	.ndo_do_ioctl		= phy_do_ioctl,
1844	.ndo_tx_timeout		= ag71xx_tx_timeout,
1845	.ndo_change_mtu		= ag71xx_change_mtu,
1846	.ndo_set_mac_address	= eth_mac_addr,
1847	.ndo_validate_addr	= eth_validate_addr,
1848};
1849
1850static const u32 ar71xx_addr_ar7100[] = {
1851	0x19000000, 0x1a000000,
1852};
1853
1854static int ag71xx_probe(struct platform_device *pdev)
1855{
1856	struct device_node *np = pdev->dev.of_node;
1857	const struct ag71xx_dcfg *dcfg;
1858	struct net_device *ndev;
1859	struct resource *res;
1860	const void *mac_addr;
1861	int tx_size, err, i;
1862	struct ag71xx *ag;
1863
1864	if (!np)
1865		return -ENODEV;
1866
1867	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1868	if (!ndev)
1869		return -ENOMEM;
1870
1871	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1872	if (!res)
1873		return -EINVAL;
1874
1875	dcfg = of_device_get_match_data(&pdev->dev);
1876	if (!dcfg)
1877		return -EINVAL;
1878
1879	ag = netdev_priv(ndev);
1880	ag->mac_idx = -1;
1881	for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1882		if (ar71xx_addr_ar7100[i] == res->start)
1883			ag->mac_idx = i;
1884	}
1885
1886	if (ag->mac_idx < 0) {
1887		netif_err(ag, probe, ndev, "unknown mac idx\n");
1888		return -EINVAL;
1889	}
1890
1891	ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1892	if (IS_ERR(ag->clk_eth)) {
1893		netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1894		return PTR_ERR(ag->clk_eth);
1895	}
1896
1897	SET_NETDEV_DEV(ndev, &pdev->dev);
1898
1899	ag->pdev = pdev;
1900	ag->ndev = ndev;
1901	ag->dcfg = dcfg;
1902	ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1903	memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1904
1905	ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1906	if (IS_ERR(ag->mac_reset)) {
1907		netif_err(ag, probe, ndev, "missing mac reset\n");
1908		return PTR_ERR(ag->mac_reset);
1909	}
1910
1911	ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1912	if (!ag->mac_base)
1913		return -ENOMEM;
1914
1915	ndev->irq = platform_get_irq(pdev, 0);
1916	err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1917			       0x0, dev_name(&pdev->dev), ndev);
1918	if (err) {
1919		netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1920			  ndev->irq);
1921		return err;
1922	}
1923
1924	ndev->netdev_ops = &ag71xx_netdev_ops;
1925	ndev->ethtool_ops = &ag71xx_ethtool_ops;
1926
1927	INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1928	timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1929
1930	tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1931	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1932
1933	ndev->min_mtu = 68;
1934	ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1935
1936	ag->rx_buf_offset = NET_SKB_PAD;
1937	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1938		ag->rx_buf_offset += NET_IP_ALIGN;
1939
1940	if (ag71xx_is(ag, AR7100)) {
1941		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1942		tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1943	}
1944	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1945
1946	ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1947					    sizeof(struct ag71xx_desc),
1948					    &ag->stop_desc_dma, GFP_KERNEL);
1949	if (!ag->stop_desc)
1950		return -ENOMEM;
1951
1952	ag->stop_desc->data = 0;
1953	ag->stop_desc->ctrl = 0;
1954	ag->stop_desc->next = (u32)ag->stop_desc_dma;
1955
1956	mac_addr = of_get_mac_address(np);
1957	if (!IS_ERR(mac_addr))
1958		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1959	if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
1960		netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1961		eth_random_addr(ndev->dev_addr);
1962	}
1963
1964	err = of_get_phy_mode(np, &ag->phy_if_mode);
1965	if (err) {
1966		netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1967		return err;
1968	}
1969
1970	netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1971
1972	err = clk_prepare_enable(ag->clk_eth);
1973	if (err) {
1974		netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1975		return err;
1976	}
1977
1978	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1979
1980	ag71xx_hw_init(ag);
1981
1982	err = ag71xx_mdio_probe(ag);
1983	if (err)
1984		goto err_put_clk;
1985
1986	platform_set_drvdata(pdev, ndev);
1987
1988	err = ag71xx_phylink_setup(ag);
1989	if (err) {
1990		netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1991		goto err_mdio_remove;
1992	}
1993
1994	err = register_netdev(ndev);
1995	if (err) {
1996		netif_err(ag, probe, ndev, "unable to register net device\n");
1997		platform_set_drvdata(pdev, NULL);
1998		goto err_mdio_remove;
1999	}
2000
2001	netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
2002		   (unsigned long)ag->mac_base, ndev->irq,
2003		   phy_modes(ag->phy_if_mode));
2004
2005	return 0;
2006
2007err_mdio_remove:
2008	ag71xx_mdio_remove(ag);
2009err_put_clk:
2010	clk_disable_unprepare(ag->clk_eth);
2011	return err;
2012}
2013
2014static int ag71xx_remove(struct platform_device *pdev)
2015{
2016	struct net_device *ndev = platform_get_drvdata(pdev);
2017	struct ag71xx *ag;
2018
2019	if (!ndev)
2020		return 0;
2021
2022	ag = netdev_priv(ndev);
2023	unregister_netdev(ndev);
2024	ag71xx_mdio_remove(ag);
2025	clk_disable_unprepare(ag->clk_eth);
2026	platform_set_drvdata(pdev, NULL);
2027
2028	return 0;
2029}
2030
2031static const u32 ar71xx_fifo_ar7100[] = {
2032	0x0fff0000, 0x00001fff, 0x00780fff,
2033};
2034
2035static const u32 ar71xx_fifo_ar9130[] = {
2036	0x0fff0000, 0x00001fff, 0x008001ff,
2037};
2038
2039static const u32 ar71xx_fifo_ar9330[] = {
2040	0x0010ffff, 0x015500aa, 0x01f00140,
2041};
2042
2043static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
2044	.type = AR7100,
2045	.fifodata = ar71xx_fifo_ar7100,
2046	.max_frame_len = 1540,
2047	.desc_pktlen_mask = SZ_4K - 1,
2048	.tx_hang_workaround = false,
2049};
2050
2051static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
2052	.type = AR7240,
2053	.fifodata = ar71xx_fifo_ar7100,
2054	.max_frame_len = 1540,
2055	.desc_pktlen_mask = SZ_4K - 1,
2056	.tx_hang_workaround = true,
2057};
2058
2059static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
2060	.type = AR9130,
2061	.fifodata = ar71xx_fifo_ar9130,
2062	.max_frame_len = 1540,
2063	.desc_pktlen_mask = SZ_4K - 1,
2064	.tx_hang_workaround = false,
2065};
2066
2067static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
2068	.type = AR9330,
2069	.fifodata = ar71xx_fifo_ar9330,
2070	.max_frame_len = 1540,
2071	.desc_pktlen_mask = SZ_4K - 1,
2072	.tx_hang_workaround = true,
2073};
2074
2075static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
2076	.type = AR9340,
2077	.fifodata = ar71xx_fifo_ar9330,
2078	.max_frame_len = SZ_16K - 1,
2079	.desc_pktlen_mask = SZ_16K - 1,
2080	.tx_hang_workaround = true,
2081};
2082
2083static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
2084	.type = QCA9530,
2085	.fifodata = ar71xx_fifo_ar9330,
2086	.max_frame_len = SZ_16K - 1,
2087	.desc_pktlen_mask = SZ_16K - 1,
2088	.tx_hang_workaround = true,
2089};
2090
2091static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
2092	.type = QCA9550,
2093	.fifodata = ar71xx_fifo_ar9330,
2094	.max_frame_len = 1540,
2095	.desc_pktlen_mask = SZ_16K - 1,
2096	.tx_hang_workaround = true,
2097};
2098
2099static const struct of_device_id ag71xx_match[] = {
2100	{ .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2101	{ .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2102	{ .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2103	{ .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2104	{ .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2105	{ .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2106	{ .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2107	{ .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2108	{ .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2109	{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2110	{}
2111};
2112
2113static struct platform_driver ag71xx_driver = {
2114	.probe		= ag71xx_probe,
2115	.remove		= ag71xx_remove,
2116	.driver = {
2117		.name	= "ag71xx",
2118		.of_match_table = ag71xx_match,
2119	}
2120};
2121
2122module_platform_driver(ag71xx_driver);
2123MODULE_LICENSE("GPL v2");
2124