1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 *
5 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
6 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
7 * Copyright (c) a lot of people too. Please respect their work.
8 *
9 * See MAINTAINERS file for support contact information.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/ethtool.h>
19#include <linux/phy.h>
20#include <linux/if_vlan.h>
21#include <linux/in.h>
22#include <linux/io.h>
23#include <linux/ip.h>
24#include <linux/tcp.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/pm_runtime.h>
28#include <linux/bitfield.h>
29#include <linux/prefetch.h>
30#include <linux/ipv6.h>
31#include <asm/unaligned.h>
32#include <net/ip6_checksum.h>
33#include <net/netdev_queues.h>
34
35#include "r8169.h"
36#include "r8169_firmware.h"
37
38#define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
39#define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
41#define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
42#define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
43#define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
44#define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
45#define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
46#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
47#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
48#define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
49#define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
51#define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
52#define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
53#define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
54#define FIRMWARE_8168FP_3	"rtl_nic/rtl8168fp-3.fw"
55#define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
56#define FIRMWARE_8125A_3	"rtl_nic/rtl8125a-3.fw"
57#define FIRMWARE_8125B_2	"rtl_nic/rtl8125b-2.fw"
58
59/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
60   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
61#define	MC_FILTER_LIMIT	32
62
63#define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
64#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
65
66#define R8169_REGS_SIZE		256
67#define R8169_RX_BUF_SIZE	(SZ_16K - 1)
68#define NUM_TX_DESC	256	/* Number of Tx descriptor registers */
69#define NUM_RX_DESC	256	/* Number of Rx descriptor registers */
70#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
71#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
72#define R8169_TX_STOP_THRS	(MAX_SKB_FRAGS + 1)
73#define R8169_TX_START_THRS	(2 * R8169_TX_STOP_THRS)
74
75#define OCP_STD_PHY_BASE	0xa400
76
77#define RTL_CFG_NO_GBIT	1
78
79/* write/read MMIO register */
80#define RTL_W8(tp, reg, val8)	writeb((val8), tp->mmio_addr + (reg))
81#define RTL_W16(tp, reg, val16)	writew((val16), tp->mmio_addr + (reg))
82#define RTL_W32(tp, reg, val32)	writel((val32), tp->mmio_addr + (reg))
83#define RTL_R8(tp, reg)		readb(tp->mmio_addr + (reg))
84#define RTL_R16(tp, reg)		readw(tp->mmio_addr + (reg))
85#define RTL_R32(tp, reg)		readl(tp->mmio_addr + (reg))
86
87#define JUMBO_4K	(4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
88#define JUMBO_6K	(6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
89#define JUMBO_7K	(7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
90#define JUMBO_9K	(9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
91
92static const struct {
93	const char *name;
94	const char *fw_name;
95} rtl_chip_infos[] = {
96	/* PCI devices. */
97	[RTL_GIGA_MAC_VER_02] = {"RTL8169s"				},
98	[RTL_GIGA_MAC_VER_03] = {"RTL8110s"				},
99	[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb"			},
100	[RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc"			},
101	[RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc"			},
102	/* PCI-E devices. */
103	[RTL_GIGA_MAC_VER_07] = {"RTL8102e"				},
104	[RTL_GIGA_MAC_VER_08] = {"RTL8102e"				},
105	[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e"			},
106	[RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e"			},
107	[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"			},
108	[RTL_GIGA_MAC_VER_14] = {"RTL8401"				},
109	[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"			},
110	[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"			},
111	[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c"			},
112	[RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c"			},
113	[RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c"			},
114	[RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c"			},
115	[RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp"			},
116	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
117	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
118	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
119	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
120	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
121	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
122	[RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp"			},
123	[RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e",	FIRMWARE_8168E_1},
124	[RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e",	FIRMWARE_8168E_2},
125	[RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl",	FIRMWARE_8168E_3},
126	[RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f",	FIRMWARE_8168F_1},
127	[RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f",	FIRMWARE_8168F_2},
128	[RTL_GIGA_MAC_VER_37] = {"RTL8402",		FIRMWARE_8402_1 },
129	[RTL_GIGA_MAC_VER_38] = {"RTL8411",		FIRMWARE_8411_1 },
130	[RTL_GIGA_MAC_VER_39] = {"RTL8106e",		FIRMWARE_8106E_1},
131	[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g",	FIRMWARE_8168G_2},
132	[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu",	FIRMWARE_8168G_3},
133	[RTL_GIGA_MAC_VER_43] = {"RTL8106eus",		FIRMWARE_8106E_2},
134	[RTL_GIGA_MAC_VER_44] = {"RTL8411b",		FIRMWARE_8411_2 },
135	[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h",	FIRMWARE_8168H_2},
136	[RTL_GIGA_MAC_VER_48] = {"RTL8107e",		FIRMWARE_8107E_2},
137	[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"			},
138	[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117",  FIRMWARE_8168FP_3},
139	[RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117",			},
140	[RTL_GIGA_MAC_VER_61] = {"RTL8125A",		FIRMWARE_8125A_3},
141	/* reserve 62 for CFG_METHOD_4 in the vendor driver */
142	[RTL_GIGA_MAC_VER_63] = {"RTL8125B",		FIRMWARE_8125B_2},
143};
144
145static const struct pci_device_id rtl8169_pci_tbl[] = {
146	{ PCI_VDEVICE(REALTEK,	0x2502) },
147	{ PCI_VDEVICE(REALTEK,	0x2600) },
148	{ PCI_VDEVICE(REALTEK,	0x8129) },
149	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_NO_GBIT },
150	{ PCI_VDEVICE(REALTEK,	0x8161) },
151	{ PCI_VDEVICE(REALTEK,	0x8162) },
152	{ PCI_VDEVICE(REALTEK,	0x8167) },
153	{ PCI_VDEVICE(REALTEK,	0x8168) },
154	{ PCI_VDEVICE(NCUBE,	0x8168) },
155	{ PCI_VDEVICE(REALTEK,	0x8169) },
156	{ PCI_VENDOR_ID_DLINK,	0x4300,
157		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
158	{ PCI_VDEVICE(DLINK,	0x4300) },
159	{ PCI_VDEVICE(DLINK,	0x4302) },
160	{ PCI_VDEVICE(AT,	0xc107) },
161	{ PCI_VDEVICE(USR,	0x0116) },
162	{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
163	{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
164	{ PCI_VDEVICE(REALTEK,	0x8125) },
165	{ PCI_VDEVICE(REALTEK,	0x3000) },
166	{}
167};
168
169MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
170
171enum rtl_registers {
172	MAC0		= 0,	/* Ethernet hardware address. */
173	MAC4		= 4,
174	MAR0		= 8,	/* Multicast filter. */
175	CounterAddrLow		= 0x10,
176	CounterAddrHigh		= 0x14,
177	TxDescStartAddrLow	= 0x20,
178	TxDescStartAddrHigh	= 0x24,
179	TxHDescStartAddrLow	= 0x28,
180	TxHDescStartAddrHigh	= 0x2c,
181	FLASH		= 0x30,
182	ERSR		= 0x36,
183	ChipCmd		= 0x37,
184	TxPoll		= 0x38,
185	IntrMask	= 0x3c,
186	IntrStatus	= 0x3e,
187
188	TxConfig	= 0x40,
189#define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
190#define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
191
192	RxConfig	= 0x44,
193#define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
194#define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
195#define	RXCFG_FIFO_SHIFT		13
196					/* No threshold before first PCI xfer */
197#define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
198#define	RX_EARLY_OFF			(1 << 11)
199#define	RX_PAUSE_SLOT_ON		(1 << 11)	/* 8125b and later */
200#define	RXCFG_DMA_SHIFT			8
201					/* Unlimited maximum PCI burst. */
202#define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
203
204	Cfg9346		= 0x50,
205	Config0		= 0x51,
206	Config1		= 0x52,
207	Config2		= 0x53,
208#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
209
210	Config3		= 0x54,
211	Config4		= 0x55,
212	Config5		= 0x56,
213	PHYAR		= 0x60,
214	PHYstatus	= 0x6c,
215	RxMaxSize	= 0xda,
216	CPlusCmd	= 0xe0,
217	IntrMitigate	= 0xe2,
218
219#define RTL_COALESCE_TX_USECS	GENMASK(15, 12)
220#define RTL_COALESCE_TX_FRAMES	GENMASK(11, 8)
221#define RTL_COALESCE_RX_USECS	GENMASK(7, 4)
222#define RTL_COALESCE_RX_FRAMES	GENMASK(3, 0)
223
224#define RTL_COALESCE_T_MAX	0x0fU
225#define RTL_COALESCE_FRAME_MAX	(RTL_COALESCE_T_MAX * 4)
226
227	RxDescAddrLow	= 0xe4,
228	RxDescAddrHigh	= 0xe8,
229	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
230
231#define NoEarlyTx	0x3f	/* Max value : no early transmit. */
232
233	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
234
235#define TxPacketMax	(8064 >> 7)
236#define EarlySize	0x27
237
238	FuncEvent	= 0xf0,
239	FuncEventMask	= 0xf4,
240	FuncPresetState	= 0xf8,
241	IBCR0           = 0xf8,
242	IBCR2           = 0xf9,
243	IBIMR0          = 0xfa,
244	IBISR0          = 0xfb,
245	FuncForceEvent	= 0xfc,
246};
247
248enum rtl8168_8101_registers {
249	CSIDR			= 0x64,
250	CSIAR			= 0x68,
251#define	CSIAR_FLAG			0x80000000
252#define	CSIAR_WRITE_CMD			0x80000000
253#define	CSIAR_BYTE_ENABLE		0x0000f000
254#define	CSIAR_ADDR_MASK			0x00000fff
255	PMCH			= 0x6f,
256#define D3COLD_NO_PLL_DOWN		BIT(7)
257#define D3HOT_NO_PLL_DOWN		BIT(6)
258#define D3_NO_PLL_DOWN			(BIT(7) | BIT(6))
259	EPHYAR			= 0x80,
260#define	EPHYAR_FLAG			0x80000000
261#define	EPHYAR_WRITE_CMD		0x80000000
262#define	EPHYAR_REG_MASK			0x1f
263#define	EPHYAR_REG_SHIFT		16
264#define	EPHYAR_DATA_MASK		0xffff
265	DLLPR			= 0xd0,
266#define	PFM_EN				(1 << 6)
267#define	TX_10M_PS_EN			(1 << 7)
268	DBG_REG			= 0xd1,
269#define	FIX_NAK_1			(1 << 4)
270#define	FIX_NAK_2			(1 << 3)
271	TWSI			= 0xd2,
272	MCU			= 0xd3,
273#define	NOW_IS_OOB			(1 << 7)
274#define	TX_EMPTY			(1 << 5)
275#define	RX_EMPTY			(1 << 4)
276#define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
277#define	EN_NDP				(1 << 3)
278#define	EN_OOB_RESET			(1 << 2)
279#define	LINK_LIST_RDY			(1 << 1)
280	EFUSEAR			= 0xdc,
281#define	EFUSEAR_FLAG			0x80000000
282#define	EFUSEAR_WRITE_CMD		0x80000000
283#define	EFUSEAR_READ_CMD		0x00000000
284#define	EFUSEAR_REG_MASK		0x03ff
285#define	EFUSEAR_REG_SHIFT		8
286#define	EFUSEAR_DATA_MASK		0xff
287	MISC_1			= 0xf2,
288#define	PFM_D3COLD_EN			(1 << 6)
289};
290
291enum rtl8168_registers {
292	LED_FREQ		= 0x1a,
293	EEE_LED			= 0x1b,
294	ERIDR			= 0x70,
295	ERIAR			= 0x74,
296#define ERIAR_FLAG			0x80000000
297#define ERIAR_WRITE_CMD			0x80000000
298#define ERIAR_READ_CMD			0x00000000
299#define ERIAR_ADDR_BYTE_ALIGN		4
300#define ERIAR_TYPE_SHIFT		16
301#define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
302#define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
303#define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
304#define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
305#define ERIAR_MASK_SHIFT		12
306#define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
307#define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
308#define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
309#define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
310#define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
311	EPHY_RXER_NUM		= 0x7c,
312	OCPDR			= 0xb0,	/* OCP GPHY access */
313#define OCPDR_WRITE_CMD			0x80000000
314#define OCPDR_READ_CMD			0x00000000
315#define OCPDR_REG_MASK			0x7f
316#define OCPDR_GPHY_REG_SHIFT		16
317#define OCPDR_DATA_MASK			0xffff
318	OCPAR			= 0xb4,
319#define OCPAR_FLAG			0x80000000
320#define OCPAR_GPHY_WRITE_CMD		0x8000f060
321#define OCPAR_GPHY_READ_CMD		0x0000f060
322	GPHY_OCP		= 0xb8,
323	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
324	MISC			= 0xf0,	/* 8168e only. */
325#define TXPLA_RST			(1 << 29)
326#define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
327#define PWM_EN				(1 << 22)
328#define RXDV_GATED_EN			(1 << 19)
329#define EARLY_TALLY_EN			(1 << 16)
330};
331
332enum rtl8125_registers {
333	IntrMask_8125		= 0x38,
334	IntrStatus_8125		= 0x3c,
335	TxPoll_8125		= 0x90,
336	MAC0_BKP		= 0x19e0,
337	EEE_TXIDLE_TIMER_8125	= 0x6048,
338};
339
340#define RX_VLAN_INNER_8125	BIT(22)
341#define RX_VLAN_OUTER_8125	BIT(23)
342#define RX_VLAN_8125		(RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
343
344#define RX_FETCH_DFLT_8125	(8 << 27)
345
346enum rtl_register_content {
347	/* InterruptStatusBits */
348	SYSErr		= 0x8000,
349	PCSTimeout	= 0x4000,
350	SWInt		= 0x0100,
351	TxDescUnavail	= 0x0080,
352	RxFIFOOver	= 0x0040,
353	LinkChg		= 0x0020,
354	RxOverflow	= 0x0010,
355	TxErr		= 0x0008,
356	TxOK		= 0x0004,
357	RxErr		= 0x0002,
358	RxOK		= 0x0001,
359
360	/* RxStatusDesc */
361	RxRWT	= (1 << 22),
362	RxRES	= (1 << 21),
363	RxRUNT	= (1 << 20),
364	RxCRC	= (1 << 19),
365
366	/* ChipCmdBits */
367	StopReq		= 0x80,
368	CmdReset	= 0x10,
369	CmdRxEnb	= 0x08,
370	CmdTxEnb	= 0x04,
371	RxBufEmpty	= 0x01,
372
373	/* TXPoll register p.5 */
374	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
375	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
376	FSWInt		= 0x01,		/* Forced software interrupt */
377
378	/* Cfg9346Bits */
379	Cfg9346_Lock	= 0x00,
380	Cfg9346_Unlock	= 0xc0,
381
382	/* rx_mode_bits */
383	AcceptErr	= 0x20,
384	AcceptRunt	= 0x10,
385#define RX_CONFIG_ACCEPT_ERR_MASK	0x30
386	AcceptBroadcast	= 0x08,
387	AcceptMulticast	= 0x04,
388	AcceptMyPhys	= 0x02,
389	AcceptAllPhys	= 0x01,
390#define RX_CONFIG_ACCEPT_OK_MASK	0x0f
391#define RX_CONFIG_ACCEPT_MASK		0x3f
392
393	/* TxConfigBits */
394	TxInterFrameGapShift = 24,
395	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
396
397	/* Config1 register p.24 */
398	LEDS1		= (1 << 7),
399	LEDS0		= (1 << 6),
400	Speed_down	= (1 << 4),
401	MEMMAP		= (1 << 3),
402	IOMAP		= (1 << 2),
403	VPD		= (1 << 1),
404	PMEnable	= (1 << 0),	/* Power Management Enable */
405
406	/* Config2 register p. 25 */
407	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
408	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
409	PCI_Clock_66MHz = 0x01,
410	PCI_Clock_33MHz = 0x00,
411
412	/* Config3 register p.25 */
413	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
414	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
415	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
416	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
417	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
418
419	/* Config4 register */
420	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
421
422	/* Config5 register p.27 */
423	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
424	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
425	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
426	Spi_en		= (1 << 3),
427	LanWake		= (1 << 1),	/* LanWake enable/disable */
428	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
429	ASPM_en		= (1 << 0),	/* ASPM enable */
430
431	/* CPlusCmd p.31 */
432	EnableBist	= (1 << 15),	// 8168 8101
433	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
434	EnAnaPLL	= (1 << 14),	// 8169
435	Normal_mode	= (1 << 13),	// unused
436	Force_half_dup	= (1 << 12),	// 8168 8101
437	Force_rxflow_en	= (1 << 11),	// 8168 8101
438	Force_txflow_en	= (1 << 10),	// 8168 8101
439	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
440	ASF		= (1 << 8),	// 8168 8101
441	PktCntrDisable	= (1 << 7),	// 8168 8101
442	Mac_dbgo_sel	= 0x001c,	// 8168
443	RxVlan		= (1 << 6),
444	RxChkSum	= (1 << 5),
445	PCIDAC		= (1 << 4),
446	PCIMulRW	= (1 << 3),
447#define INTT_MASK	GENMASK(1, 0)
448#define CPCMD_MASK	(Normal_mode | RxVlan | RxChkSum | INTT_MASK)
449
450	/* rtl8169_PHYstatus */
451	TBI_Enable	= 0x80,
452	TxFlowCtrl	= 0x40,
453	RxFlowCtrl	= 0x20,
454	_1000bpsF	= 0x10,
455	_100bps		= 0x08,
456	_10bps		= 0x04,
457	LinkStatus	= 0x02,
458	FullDup		= 0x01,
459
460	/* ResetCounterCommand */
461	CounterReset	= 0x1,
462
463	/* DumpCounterCommand */
464	CounterDump	= 0x8,
465
466	/* magic enable v2 */
467	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
468};
469
470enum rtl_desc_bit {
471	/* First doubleword. */
472	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
473	RingEnd		= (1 << 30), /* End of descriptor ring */
474	FirstFrag	= (1 << 29), /* First segment of a packet */
475	LastFrag	= (1 << 28), /* Final segment of a packet */
476};
477
478/* Generic case. */
479enum rtl_tx_desc_bit {
480	/* First doubleword. */
481	TD_LSO		= (1 << 27),		/* Large Send Offload */
482#define TD_MSS_MAX			0x07ffu	/* MSS value */
483
484	/* Second doubleword. */
485	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
486};
487
488/* 8169, 8168b and 810x except 8102e. */
489enum rtl_tx_desc_bit_0 {
490	/* First doubleword. */
491#define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
492	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
493	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
494	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
495};
496
497/* 8102e, 8168c and beyond. */
498enum rtl_tx_desc_bit_1 {
499	/* First doubleword. */
500	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
501	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
502#define GTTCPHO_SHIFT			18
503#define GTTCPHO_MAX			0x7f
504
505	/* Second doubleword. */
506#define TCPHO_SHIFT			18
507#define TCPHO_MAX			0x3ff
508#define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
509	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
510	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
511	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
512	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
513};
514
515enum rtl_rx_desc_bit {
516	/* Rx private */
517	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
518	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
519
520#define RxProtoUDP	(PID1)
521#define RxProtoTCP	(PID0)
522#define RxProtoIP	(PID1 | PID0)
523#define RxProtoMask	RxProtoIP
524
525	IPFail		= (1 << 16), /* IP checksum failed */
526	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
527	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
528
529#define RxCSFailMask	(IPFail | UDPFail | TCPFail)
530
531	RxVlanTag	= (1 << 16), /* VLAN tag available */
532};
533
534#define RTL_GSO_MAX_SIZE_V1	32000
535#define RTL_GSO_MAX_SEGS_V1	24
536#define RTL_GSO_MAX_SIZE_V2	64000
537#define RTL_GSO_MAX_SEGS_V2	64
538
539struct TxDesc {
540	__le32 opts1;
541	__le32 opts2;
542	__le64 addr;
543};
544
545struct RxDesc {
546	__le32 opts1;
547	__le32 opts2;
548	__le64 addr;
549};
550
551struct ring_info {
552	struct sk_buff	*skb;
553	u32		len;
554};
555
556struct rtl8169_counters {
557	__le64	tx_packets;
558	__le64	rx_packets;
559	__le64	tx_errors;
560	__le32	rx_errors;
561	__le16	rx_missed;
562	__le16	align_errors;
563	__le32	tx_one_collision;
564	__le32	tx_multi_collision;
565	__le64	rx_unicast;
566	__le64	rx_broadcast;
567	__le32	rx_multicast;
568	__le16	tx_aborted;
569	__le16	tx_underun;
570};
571
572struct rtl8169_tc_offsets {
573	bool	inited;
574	__le64	tx_errors;
575	__le32	tx_multi_collision;
576	__le16	tx_aborted;
577	__le16	rx_missed;
578};
579
580enum rtl_flag {
581	RTL_FLAG_TASK_ENABLED = 0,
582	RTL_FLAG_TASK_RESET_PENDING,
583	RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
584	RTL_FLAG_TASK_TX_TIMEOUT,
585	RTL_FLAG_MAX
586};
587
588enum rtl_dash_type {
589	RTL_DASH_NONE,
590	RTL_DASH_DP,
591	RTL_DASH_EP,
592};
593
594struct rtl8169_private {
595	void __iomem *mmio_addr;	/* memory map physical address */
596	struct pci_dev *pci_dev;
597	struct net_device *dev;
598	struct phy_device *phydev;
599	struct napi_struct napi;
600	enum mac_version mac_version;
601	enum rtl_dash_type dash_type;
602	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
603	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
604	u32 dirty_tx;
605	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
606	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
607	dma_addr_t TxPhyAddr;
608	dma_addr_t RxPhyAddr;
609	struct page *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
610	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
611	u16 cp_cmd;
612	u32 irq_mask;
613	int irq;
614	struct clk *clk;
615
616	struct {
617		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
618		struct work_struct work;
619	} wk;
620
621	raw_spinlock_t config25_lock;
622	raw_spinlock_t mac_ocp_lock;
623
624	raw_spinlock_t cfg9346_usage_lock;
625	int cfg9346_usage_count;
626
627	unsigned supports_gmii:1;
628	unsigned aspm_manageable:1;
629	unsigned dash_enabled:1;
630	dma_addr_t counters_phys_addr;
631	struct rtl8169_counters *counters;
632	struct rtl8169_tc_offsets tc_offset;
633	u32 saved_wolopts;
634	int eee_adv;
635
636	const char *fw_name;
637	struct rtl_fw *rtl_fw;
638
639	u32 ocp_base;
640};
641
642typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
643
644MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
645MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
646MODULE_SOFTDEP("pre: realtek");
647MODULE_LICENSE("GPL");
648MODULE_FIRMWARE(FIRMWARE_8168D_1);
649MODULE_FIRMWARE(FIRMWARE_8168D_2);
650MODULE_FIRMWARE(FIRMWARE_8168E_1);
651MODULE_FIRMWARE(FIRMWARE_8168E_2);
652MODULE_FIRMWARE(FIRMWARE_8168E_3);
653MODULE_FIRMWARE(FIRMWARE_8105E_1);
654MODULE_FIRMWARE(FIRMWARE_8168F_1);
655MODULE_FIRMWARE(FIRMWARE_8168F_2);
656MODULE_FIRMWARE(FIRMWARE_8402_1);
657MODULE_FIRMWARE(FIRMWARE_8411_1);
658MODULE_FIRMWARE(FIRMWARE_8411_2);
659MODULE_FIRMWARE(FIRMWARE_8106E_1);
660MODULE_FIRMWARE(FIRMWARE_8106E_2);
661MODULE_FIRMWARE(FIRMWARE_8168G_2);
662MODULE_FIRMWARE(FIRMWARE_8168G_3);
663MODULE_FIRMWARE(FIRMWARE_8168H_2);
664MODULE_FIRMWARE(FIRMWARE_8168FP_3);
665MODULE_FIRMWARE(FIRMWARE_8107E_2);
666MODULE_FIRMWARE(FIRMWARE_8125A_3);
667MODULE_FIRMWARE(FIRMWARE_8125B_2);
668
669static inline struct device *tp_to_dev(struct rtl8169_private *tp)
670{
671	return &tp->pci_dev->dev;
672}
673
674static void rtl_lock_config_regs(struct rtl8169_private *tp)
675{
676	unsigned long flags;
677
678	raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
679	if (!--tp->cfg9346_usage_count)
680		RTL_W8(tp, Cfg9346, Cfg9346_Lock);
681	raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
682}
683
684static void rtl_unlock_config_regs(struct rtl8169_private *tp)
685{
686	unsigned long flags;
687
688	raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
689	if (!tp->cfg9346_usage_count++)
690		RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
691	raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
692}
693
694static void rtl_pci_commit(struct rtl8169_private *tp)
695{
696	/* Read an arbitrary register to commit a preceding PCI write */
697	RTL_R8(tp, ChipCmd);
698}
699
700static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
701{
702	unsigned long flags;
703	u8 val;
704
705	raw_spin_lock_irqsave(&tp->config25_lock, flags);
706	val = RTL_R8(tp, Config2);
707	RTL_W8(tp, Config2, (val & ~clear) | set);
708	raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
709}
710
711static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
712{
713	unsigned long flags;
714	u8 val;
715
716	raw_spin_lock_irqsave(&tp->config25_lock, flags);
717	val = RTL_R8(tp, Config5);
718	RTL_W8(tp, Config5, (val & ~clear) | set);
719	raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
720}
721
722static bool rtl_is_8125(struct rtl8169_private *tp)
723{
724	return tp->mac_version >= RTL_GIGA_MAC_VER_61;
725}
726
727static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
728{
729	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
730	       tp->mac_version != RTL_GIGA_MAC_VER_39 &&
731	       tp->mac_version <= RTL_GIGA_MAC_VER_53;
732}
733
734static bool rtl_supports_eee(struct rtl8169_private *tp)
735{
736	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
737	       tp->mac_version != RTL_GIGA_MAC_VER_37 &&
738	       tp->mac_version != RTL_GIGA_MAC_VER_39;
739}
740
741static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
742{
743	int i;
744
745	for (i = 0; i < ETH_ALEN; i++)
746		mac[i] = RTL_R8(tp, reg + i);
747}
748
749struct rtl_cond {
750	bool (*check)(struct rtl8169_private *);
751	const char *msg;
752};
753
754static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
755			  unsigned long usecs, int n, bool high)
756{
757	int i;
758
759	for (i = 0; i < n; i++) {
760		if (c->check(tp) == high)
761			return true;
762		fsleep(usecs);
763	}
764
765	if (net_ratelimit())
766		netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
767			   c->msg, !high, n, usecs);
768	return false;
769}
770
771static bool rtl_loop_wait_high(struct rtl8169_private *tp,
772			       const struct rtl_cond *c,
773			       unsigned long d, int n)
774{
775	return rtl_loop_wait(tp, c, d, n, true);
776}
777
778static bool rtl_loop_wait_low(struct rtl8169_private *tp,
779			      const struct rtl_cond *c,
780			      unsigned long d, int n)
781{
782	return rtl_loop_wait(tp, c, d, n, false);
783}
784
785#define DECLARE_RTL_COND(name)				\
786static bool name ## _check(struct rtl8169_private *);	\
787							\
788static const struct rtl_cond name = {			\
789	.check	= name ## _check,			\
790	.msg	= #name					\
791};							\
792							\
793static bool name ## _check(struct rtl8169_private *tp)
794
795static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
796{
797	/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
798	if (type == ERIAR_OOB &&
799	    (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
800	     tp->mac_version == RTL_GIGA_MAC_VER_53))
801		*cmd |= 0xf70 << 18;
802}
803
804DECLARE_RTL_COND(rtl_eriar_cond)
805{
806	return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
807}
808
809static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
810			   u32 val, int type)
811{
812	u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
813
814	if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
815		return;
816
817	RTL_W32(tp, ERIDR, val);
818	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
819	RTL_W32(tp, ERIAR, cmd);
820
821	rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
822}
823
824static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
825			  u32 val)
826{
827	_rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
828}
829
830static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
831{
832	u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
833
834	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
835	RTL_W32(tp, ERIAR, cmd);
836
837	return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
838		RTL_R32(tp, ERIDR) : ~0;
839}
840
841static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
842{
843	return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
844}
845
846static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
847{
848	u32 val = rtl_eri_read(tp, addr);
849
850	rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
851}
852
853static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
854{
855	rtl_w0w1_eri(tp, addr, p, 0);
856}
857
858static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
859{
860	rtl_w0w1_eri(tp, addr, 0, m);
861}
862
863static bool rtl_ocp_reg_failure(u32 reg)
864{
865	return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
866}
867
868DECLARE_RTL_COND(rtl_ocp_gphy_cond)
869{
870	return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
871}
872
873static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
874{
875	if (rtl_ocp_reg_failure(reg))
876		return;
877
878	RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
879
880	rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
881}
882
883static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
884{
885	if (rtl_ocp_reg_failure(reg))
886		return 0;
887
888	RTL_W32(tp, GPHY_OCP, reg << 15);
889
890	return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
891		(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
892}
893
894static void __r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
895{
896	if (rtl_ocp_reg_failure(reg))
897		return;
898
899	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
900}
901
902static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
903{
904	unsigned long flags;
905
906	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
907	__r8168_mac_ocp_write(tp, reg, data);
908	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
909}
910
911static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
912{
913	if (rtl_ocp_reg_failure(reg))
914		return 0;
915
916	RTL_W32(tp, OCPDR, reg << 15);
917
918	return RTL_R32(tp, OCPDR);
919}
920
921static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
922{
923	unsigned long flags;
924	u16 val;
925
926	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
927	val = __r8168_mac_ocp_read(tp, reg);
928	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
929
930	return val;
931}
932
933static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
934				 u16 set)
935{
936	unsigned long flags;
937	u16 data;
938
939	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
940	data = __r8168_mac_ocp_read(tp, reg);
941	__r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
942	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
943}
944
945/* Work around a hw issue with RTL8168g PHY, the quirk disables
946 * PHY MCU interrupts before PHY power-down.
947 */
948static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
949{
950	switch (tp->mac_version) {
951	case RTL_GIGA_MAC_VER_40:
952		if (value & BMCR_RESET || !(value & BMCR_PDOWN))
953			rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
954		else
955			rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
956		break;
957	default:
958		break;
959	}
960};
961
962static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
963{
964	if (reg == 0x1f) {
965		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
966		return;
967	}
968
969	if (tp->ocp_base != OCP_STD_PHY_BASE)
970		reg -= 0x10;
971
972	if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
973		rtl8168g_phy_suspend_quirk(tp, value);
974
975	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
976}
977
978static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
979{
980	if (reg == 0x1f)
981		return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
982
983	if (tp->ocp_base != OCP_STD_PHY_BASE)
984		reg -= 0x10;
985
986	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
987}
988
989static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
990{
991	if (reg == 0x1f) {
992		tp->ocp_base = value << 4;
993		return;
994	}
995
996	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
997}
998
999static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1000{
1001	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1002}
1003
1004DECLARE_RTL_COND(rtl_phyar_cond)
1005{
1006	return RTL_R32(tp, PHYAR) & 0x80000000;
1007}
1008
1009static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1010{
1011	RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1012
1013	rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1014	/*
1015	 * According to hardware specs a 20us delay is required after write
1016	 * complete indication, but before sending next command.
1017	 */
1018	udelay(20);
1019}
1020
1021static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1022{
1023	int value;
1024
1025	RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
1026
1027	value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1028		RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
1029
1030	/*
1031	 * According to hardware specs a 20us delay is required after read
1032	 * complete indication, but before sending next command.
1033	 */
1034	udelay(20);
1035
1036	return value;
1037}
1038
1039DECLARE_RTL_COND(rtl_ocpar_cond)
1040{
1041	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
1042}
1043
1044#define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
1045
1046static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
1047{
1048	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1049}
1050
1051static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
1052{
1053	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1054}
1055
1056static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1057{
1058	r8168dp_2_mdio_start(tp);
1059
1060	r8169_mdio_write(tp, reg, value);
1061
1062	r8168dp_2_mdio_stop(tp);
1063}
1064
1065static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1066{
1067	int value;
1068
1069	/* Work around issue with chip reporting wrong PHY ID */
1070	if (reg == MII_PHYSID2)
1071		return 0xc912;
1072
1073	r8168dp_2_mdio_start(tp);
1074
1075	value = r8169_mdio_read(tp, reg);
1076
1077	r8168dp_2_mdio_stop(tp);
1078
1079	return value;
1080}
1081
1082static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
1083{
1084	switch (tp->mac_version) {
1085	case RTL_GIGA_MAC_VER_28:
1086	case RTL_GIGA_MAC_VER_31:
1087		r8168dp_2_mdio_write(tp, location, val);
1088		break;
1089	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
1090		r8168g_mdio_write(tp, location, val);
1091		break;
1092	default:
1093		r8169_mdio_write(tp, location, val);
1094		break;
1095	}
1096}
1097
1098static int rtl_readphy(struct rtl8169_private *tp, int location)
1099{
1100	switch (tp->mac_version) {
1101	case RTL_GIGA_MAC_VER_28:
1102	case RTL_GIGA_MAC_VER_31:
1103		return r8168dp_2_mdio_read(tp, location);
1104	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
1105		return r8168g_mdio_read(tp, location);
1106	default:
1107		return r8169_mdio_read(tp, location);
1108	}
1109}
1110
1111DECLARE_RTL_COND(rtl_ephyar_cond)
1112{
1113	return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1114}
1115
1116static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1117{
1118	RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1119		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1120
1121	rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1122
1123	udelay(10);
1124}
1125
1126static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1127{
1128	RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1129
1130	return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1131		RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1132}
1133
1134static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
1135{
1136	RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
1137	return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1138		RTL_R32(tp, OCPDR) : ~0;
1139}
1140
1141static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg)
1142{
1143	return _rtl_eri_read(tp, reg, ERIAR_OOB);
1144}
1145
1146static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1147			      u32 data)
1148{
1149	RTL_W32(tp, OCPDR, data);
1150	RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1151	rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1152}
1153
1154static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1155			      u32 data)
1156{
1157	_rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1158		       data, ERIAR_OOB);
1159}
1160
1161static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1162{
1163	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1164
1165	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1166}
1167
1168#define OOB_CMD_RESET		0x00
1169#define OOB_CMD_DRIVER_START	0x05
1170#define OOB_CMD_DRIVER_STOP	0x06
1171
1172static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1173{
1174	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1175}
1176
1177DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1178{
1179	u16 reg;
1180
1181	reg = rtl8168_get_ocp_reg(tp);
1182
1183	return r8168dp_ocp_read(tp, reg) & 0x00000800;
1184}
1185
1186DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1187{
1188	return r8168ep_ocp_read(tp, 0x124) & 0x00000001;
1189}
1190
1191DECLARE_RTL_COND(rtl_ocp_tx_cond)
1192{
1193	return RTL_R8(tp, IBISR0) & 0x20;
1194}
1195
1196static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1197{
1198	RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1199	rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
1200	RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1201	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1202}
1203
1204static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1205{
1206	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1207	rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
1208}
1209
1210static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1211{
1212	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1213	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1214	rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
1215}
1216
1217static void rtl8168_driver_start(struct rtl8169_private *tp)
1218{
1219	if (tp->dash_type == RTL_DASH_DP)
1220		rtl8168dp_driver_start(tp);
1221	else
1222		rtl8168ep_driver_start(tp);
1223}
1224
1225static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1226{
1227	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1228	rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
1229}
1230
1231static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1232{
1233	rtl8168ep_stop_cmac(tp);
1234	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1235	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1236	rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
1237}
1238
1239static void rtl8168_driver_stop(struct rtl8169_private *tp)
1240{
1241	if (tp->dash_type == RTL_DASH_DP)
1242		rtl8168dp_driver_stop(tp);
1243	else
1244		rtl8168ep_driver_stop(tp);
1245}
1246
1247static bool r8168dp_check_dash(struct rtl8169_private *tp)
1248{
1249	u16 reg = rtl8168_get_ocp_reg(tp);
1250
1251	return r8168dp_ocp_read(tp, reg) & BIT(15);
1252}
1253
1254static bool r8168ep_check_dash(struct rtl8169_private *tp)
1255{
1256	return r8168ep_ocp_read(tp, 0x128) & BIT(0);
1257}
1258
1259static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
1260{
1261	switch (tp->dash_type) {
1262	case RTL_DASH_DP:
1263		return r8168dp_check_dash(tp);
1264	case RTL_DASH_EP:
1265		return r8168ep_check_dash(tp);
1266	default:
1267		return false;
1268	}
1269}
1270
1271static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
1272{
1273	switch (tp->mac_version) {
1274	case RTL_GIGA_MAC_VER_28:
1275	case RTL_GIGA_MAC_VER_31:
1276		return RTL_DASH_DP;
1277	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
1278		return RTL_DASH_EP;
1279	default:
1280		return RTL_DASH_NONE;
1281	}
1282}
1283
1284static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
1285{
1286	switch (tp->mac_version) {
1287	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
1288	case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
1289	case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
1290	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
1291		if (enable)
1292			RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
1293		else
1294			RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
1295		break;
1296	default:
1297		break;
1298	}
1299}
1300
1301static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1302{
1303	rtl_eri_clear_bits(tp, 0xdc, BIT(0));
1304	rtl_eri_set_bits(tp, 0xdc, BIT(0));
1305}
1306
1307DECLARE_RTL_COND(rtl_efusear_cond)
1308{
1309	return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1310}
1311
1312u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1313{
1314	RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1315
1316	return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1317		RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1318}
1319
1320static u32 rtl_get_events(struct rtl8169_private *tp)
1321{
1322	if (rtl_is_8125(tp))
1323		return RTL_R32(tp, IntrStatus_8125);
1324	else
1325		return RTL_R16(tp, IntrStatus);
1326}
1327
1328static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
1329{
1330	if (rtl_is_8125(tp))
1331		RTL_W32(tp, IntrStatus_8125, bits);
1332	else
1333		RTL_W16(tp, IntrStatus, bits);
1334}
1335
1336static void rtl_irq_disable(struct rtl8169_private *tp)
1337{
1338	if (rtl_is_8125(tp))
1339		RTL_W32(tp, IntrMask_8125, 0);
1340	else
1341		RTL_W16(tp, IntrMask, 0);
1342}
1343
1344static void rtl_irq_enable(struct rtl8169_private *tp)
1345{
1346	if (rtl_is_8125(tp))
1347		RTL_W32(tp, IntrMask_8125, tp->irq_mask);
1348	else
1349		RTL_W16(tp, IntrMask, tp->irq_mask);
1350}
1351
1352static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1353{
1354	rtl_irq_disable(tp);
1355	rtl_ack_events(tp, 0xffffffff);
1356	rtl_pci_commit(tp);
1357}
1358
1359static void rtl_link_chg_patch(struct rtl8169_private *tp)
1360{
1361	struct phy_device *phydev = tp->phydev;
1362
1363	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1364	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1365		if (phydev->speed == SPEED_1000) {
1366			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1367			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1368		} else if (phydev->speed == SPEED_100) {
1369			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1370			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1371		} else {
1372			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1373			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1374		}
1375		rtl_reset_packet_filter(tp);
1376	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1377		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1378		if (phydev->speed == SPEED_1000) {
1379			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1380			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1381		} else {
1382			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1383			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1384		}
1385	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1386		if (phydev->speed == SPEED_10) {
1387			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1388			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1389		} else {
1390			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1391		}
1392	}
1393}
1394
1395#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1396
1397static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1398{
1399	struct rtl8169_private *tp = netdev_priv(dev);
1400
1401	wol->supported = WAKE_ANY;
1402	wol->wolopts = tp->saved_wolopts;
1403}
1404
1405static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1406{
1407	static const struct {
1408		u32 opt;
1409		u16 reg;
1410		u8  mask;
1411	} cfg[] = {
1412		{ WAKE_PHY,   Config3, LinkUp },
1413		{ WAKE_UCAST, Config5, UWF },
1414		{ WAKE_BCAST, Config5, BWF },
1415		{ WAKE_MCAST, Config5, MWF },
1416		{ WAKE_ANY,   Config5, LanWake },
1417		{ WAKE_MAGIC, Config3, MagicPacket }
1418	};
1419	unsigned int i, tmp = ARRAY_SIZE(cfg);
1420	unsigned long flags;
1421	u8 options;
1422
1423	rtl_unlock_config_regs(tp);
1424
1425	if (rtl_is_8168evl_up(tp)) {
1426		tmp--;
1427		if (wolopts & WAKE_MAGIC)
1428			rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
1429		else
1430			rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
1431	} else if (rtl_is_8125(tp)) {
1432		tmp--;
1433		if (wolopts & WAKE_MAGIC)
1434			r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
1435		else
1436			r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
1437	}
1438
1439	raw_spin_lock_irqsave(&tp->config25_lock, flags);
1440	for (i = 0; i < tmp; i++) {
1441		options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
1442		if (wolopts & cfg[i].opt)
1443			options |= cfg[i].mask;
1444		RTL_W8(tp, cfg[i].reg, options);
1445	}
1446	raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
1447
1448	switch (tp->mac_version) {
1449	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
1450		options = RTL_R8(tp, Config1) & ~PMEnable;
1451		if (wolopts)
1452			options |= PMEnable;
1453		RTL_W8(tp, Config1, options);
1454		break;
1455	case RTL_GIGA_MAC_VER_34:
1456	case RTL_GIGA_MAC_VER_37:
1457	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
1458		if (wolopts)
1459			rtl_mod_config2(tp, 0, PME_SIGNAL);
1460		else
1461			rtl_mod_config2(tp, PME_SIGNAL, 0);
1462		break;
1463	default:
1464		break;
1465	}
1466
1467	rtl_lock_config_regs(tp);
1468
1469	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1470
1471	if (!tp->dash_enabled) {
1472		rtl_set_d3_pll_down(tp, !wolopts);
1473		tp->dev->wol_enabled = wolopts ? 1 : 0;
1474	}
1475}
1476
1477static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1478{
1479	struct rtl8169_private *tp = netdev_priv(dev);
1480
1481	if (wol->wolopts & ~WAKE_ANY)
1482		return -EINVAL;
1483
1484	tp->saved_wolopts = wol->wolopts;
1485	__rtl8169_set_wol(tp, tp->saved_wolopts);
1486
1487	return 0;
1488}
1489
1490static void rtl8169_get_drvinfo(struct net_device *dev,
1491				struct ethtool_drvinfo *info)
1492{
1493	struct rtl8169_private *tp = netdev_priv(dev);
1494	struct rtl_fw *rtl_fw = tp->rtl_fw;
1495
1496	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1497	strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1498	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1499	if (rtl_fw)
1500		strscpy(info->fw_version, rtl_fw->version,
1501			sizeof(info->fw_version));
1502}
1503
1504static int rtl8169_get_regs_len(struct net_device *dev)
1505{
1506	return R8169_REGS_SIZE;
1507}
1508
1509static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1510	netdev_features_t features)
1511{
1512	struct rtl8169_private *tp = netdev_priv(dev);
1513
1514	if (dev->mtu > TD_MSS_MAX)
1515		features &= ~NETIF_F_ALL_TSO;
1516
1517	if (dev->mtu > ETH_DATA_LEN &&
1518	    tp->mac_version > RTL_GIGA_MAC_VER_06)
1519		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
1520
1521	return features;
1522}
1523
1524static void rtl_set_rx_config_features(struct rtl8169_private *tp,
1525				       netdev_features_t features)
1526{
1527	u32 rx_config = RTL_R32(tp, RxConfig);
1528
1529	if (features & NETIF_F_RXALL)
1530		rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
1531	else
1532		rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
1533
1534	if (rtl_is_8125(tp)) {
1535		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1536			rx_config |= RX_VLAN_8125;
1537		else
1538			rx_config &= ~RX_VLAN_8125;
1539	}
1540
1541	RTL_W32(tp, RxConfig, rx_config);
1542}
1543
1544static int rtl8169_set_features(struct net_device *dev,
1545				netdev_features_t features)
1546{
1547	struct rtl8169_private *tp = netdev_priv(dev);
1548
1549	rtl_set_rx_config_features(tp, features);
1550
1551	if (features & NETIF_F_RXCSUM)
1552		tp->cp_cmd |= RxChkSum;
1553	else
1554		tp->cp_cmd &= ~RxChkSum;
1555
1556	if (!rtl_is_8125(tp)) {
1557		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1558			tp->cp_cmd |= RxVlan;
1559		else
1560			tp->cp_cmd &= ~RxVlan;
1561	}
1562
1563	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1564	rtl_pci_commit(tp);
1565
1566	return 0;
1567}
1568
1569static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1570{
1571	return (skb_vlan_tag_present(skb)) ?
1572		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1573}
1574
1575static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1576{
1577	u32 opts2 = le32_to_cpu(desc->opts2);
1578
1579	if (opts2 & RxVlanTag)
1580		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1581}
1582
1583static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1584			     void *p)
1585{
1586	struct rtl8169_private *tp = netdev_priv(dev);
1587	u32 __iomem *data = tp->mmio_addr;
1588	u32 *dw = p;
1589	int i;
1590
1591	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1592		memcpy_fromio(dw++, data++, 4);
1593}
1594
1595static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1596	"tx_packets",
1597	"rx_packets",
1598	"tx_errors",
1599	"rx_errors",
1600	"rx_missed",
1601	"align_errors",
1602	"tx_single_collisions",
1603	"tx_multi_collisions",
1604	"unicast",
1605	"broadcast",
1606	"multicast",
1607	"tx_aborted",
1608	"tx_underrun",
1609};
1610
1611static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1612{
1613	switch (sset) {
1614	case ETH_SS_STATS:
1615		return ARRAY_SIZE(rtl8169_gstrings);
1616	default:
1617		return -EOPNOTSUPP;
1618	}
1619}
1620
1621DECLARE_RTL_COND(rtl_counters_cond)
1622{
1623	return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1624}
1625
1626static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1627{
1628	u32 cmd = lower_32_bits(tp->counters_phys_addr);
1629
1630	RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr));
1631	rtl_pci_commit(tp);
1632	RTL_W32(tp, CounterAddrLow, cmd);
1633	RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1634
1635	rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1636}
1637
1638static void rtl8169_update_counters(struct rtl8169_private *tp)
1639{
1640	u8 val = RTL_R8(tp, ChipCmd);
1641
1642	/*
1643	 * Some chips are unable to dump tally counters when the receiver
1644	 * is disabled. If 0xff chip may be in a PCI power-save state.
1645	 */
1646	if (val & CmdRxEnb && val != 0xff)
1647		rtl8169_do_counters(tp, CounterDump);
1648}
1649
1650static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1651{
1652	struct rtl8169_counters *counters = tp->counters;
1653
1654	/*
1655	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
1656	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1657	 * reset by a power cycle, while the counter values collected by the
1658	 * driver are reset at every driver unload/load cycle.
1659	 *
1660	 * To make sure the HW values returned by @get_stats64 match the SW
1661	 * values, we collect the initial values at first open(*) and use them
1662	 * as offsets to normalize the values returned by @get_stats64.
1663	 *
1664	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1665	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1666	 * set at open time by rtl_hw_start.
1667	 */
1668
1669	if (tp->tc_offset.inited)
1670		return;
1671
1672	if (tp->mac_version >= RTL_GIGA_MAC_VER_19) {
1673		rtl8169_do_counters(tp, CounterReset);
1674	} else {
1675		rtl8169_update_counters(tp);
1676		tp->tc_offset.tx_errors = counters->tx_errors;
1677		tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1678		tp->tc_offset.tx_aborted = counters->tx_aborted;
1679		tp->tc_offset.rx_missed = counters->rx_missed;
1680	}
1681
1682	tp->tc_offset.inited = true;
1683}
1684
1685static void rtl8169_get_ethtool_stats(struct net_device *dev,
1686				      struct ethtool_stats *stats, u64 *data)
1687{
1688	struct rtl8169_private *tp = netdev_priv(dev);
1689	struct rtl8169_counters *counters;
1690
1691	counters = tp->counters;
1692	rtl8169_update_counters(tp);
1693
1694	data[0] = le64_to_cpu(counters->tx_packets);
1695	data[1] = le64_to_cpu(counters->rx_packets);
1696	data[2] = le64_to_cpu(counters->tx_errors);
1697	data[3] = le32_to_cpu(counters->rx_errors);
1698	data[4] = le16_to_cpu(counters->rx_missed);
1699	data[5] = le16_to_cpu(counters->align_errors);
1700	data[6] = le32_to_cpu(counters->tx_one_collision);
1701	data[7] = le32_to_cpu(counters->tx_multi_collision);
1702	data[8] = le64_to_cpu(counters->rx_unicast);
1703	data[9] = le64_to_cpu(counters->rx_broadcast);
1704	data[10] = le32_to_cpu(counters->rx_multicast);
1705	data[11] = le16_to_cpu(counters->tx_aborted);
1706	data[12] = le16_to_cpu(counters->tx_underun);
1707}
1708
1709static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1710{
1711	switch(stringset) {
1712	case ETH_SS_STATS:
1713		memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
1714		break;
1715	}
1716}
1717
1718/*
1719 * Interrupt coalescing
1720 *
1721 * > 1 - the availability of the IntrMitigate (0xe2) register through the
1722 * >     8169, 8168 and 810x line of chipsets
1723 *
1724 * 8169, 8168, and 8136(810x) serial chipsets support it.
1725 *
1726 * > 2 - the Tx timer unit at gigabit speed
1727 *
1728 * The unit of the timer depends on both the speed and the setting of CPlusCmd
1729 * (0xe0) bit 1 and bit 0.
1730 *
1731 * For 8169
1732 * bit[1:0] \ speed        1000M           100M            10M
1733 * 0 0                     320ns           2.56us          40.96us
1734 * 0 1                     2.56us          20.48us         327.7us
1735 * 1 0                     5.12us          40.96us         655.4us
1736 * 1 1                     10.24us         81.92us         1.31ms
1737 *
1738 * For the other
1739 * bit[1:0] \ speed        1000M           100M            10M
1740 * 0 0                     5us             2.56us          40.96us
1741 * 0 1                     40us            20.48us         327.7us
1742 * 1 0                     80us            40.96us         655.4us
1743 * 1 1                     160us           81.92us         1.31ms
1744 */
1745
1746/* rx/tx scale factors for all CPlusCmd[0:1] cases */
1747struct rtl_coalesce_info {
1748	u32 speed;
1749	u32 scale_nsecs[4];
1750};
1751
1752/* produce array with base delay *1, *8, *8*2, *8*2*2 */
1753#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
1754
1755static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1756	{ SPEED_1000,	COALESCE_DELAY(320) },
1757	{ SPEED_100,	COALESCE_DELAY(2560) },
1758	{ SPEED_10,	COALESCE_DELAY(40960) },
1759	{ 0 },
1760};
1761
1762static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1763	{ SPEED_1000,	COALESCE_DELAY(5000) },
1764	{ SPEED_100,	COALESCE_DELAY(2560) },
1765	{ SPEED_10,	COALESCE_DELAY(40960) },
1766	{ 0 },
1767};
1768#undef COALESCE_DELAY
1769
1770/* get rx/tx scale vector corresponding to current speed */
1771static const struct rtl_coalesce_info *
1772rtl_coalesce_info(struct rtl8169_private *tp)
1773{
1774	const struct rtl_coalesce_info *ci;
1775
1776	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1777		ci = rtl_coalesce_info_8169;
1778	else
1779		ci = rtl_coalesce_info_8168_8136;
1780
1781	/* if speed is unknown assume highest one */
1782	if (tp->phydev->speed == SPEED_UNKNOWN)
1783		return ci;
1784
1785	for (; ci->speed; ci++) {
1786		if (tp->phydev->speed == ci->speed)
1787			return ci;
1788	}
1789
1790	return ERR_PTR(-ELNRNG);
1791}
1792
1793static int rtl_get_coalesce(struct net_device *dev,
1794			    struct ethtool_coalesce *ec,
1795			    struct kernel_ethtool_coalesce *kernel_coal,
1796			    struct netlink_ext_ack *extack)
1797{
1798	struct rtl8169_private *tp = netdev_priv(dev);
1799	const struct rtl_coalesce_info *ci;
1800	u32 scale, c_us, c_fr;
1801	u16 intrmit;
1802
1803	if (rtl_is_8125(tp))
1804		return -EOPNOTSUPP;
1805
1806	memset(ec, 0, sizeof(*ec));
1807
1808	/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1809	ci = rtl_coalesce_info(tp);
1810	if (IS_ERR(ci))
1811		return PTR_ERR(ci);
1812
1813	scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
1814
1815	intrmit = RTL_R16(tp, IntrMitigate);
1816
1817	c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
1818	ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1819
1820	c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
1821	/* ethtool_coalesce states usecs and max_frames must not both be 0 */
1822	ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1823
1824	c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
1825	ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1826
1827	c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
1828	ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1829
1830	return 0;
1831}
1832
1833/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
1834static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
1835				     u16 *cp01)
1836{
1837	const struct rtl_coalesce_info *ci;
1838	u16 i;
1839
1840	ci = rtl_coalesce_info(tp);
1841	if (IS_ERR(ci))
1842		return PTR_ERR(ci);
1843
1844	for (i = 0; i < 4; i++) {
1845		if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
1846			*cp01 = i;
1847			return ci->scale_nsecs[i];
1848		}
1849	}
1850
1851	return -ERANGE;
1852}
1853
1854static int rtl_set_coalesce(struct net_device *dev,
1855			    struct ethtool_coalesce *ec,
1856			    struct kernel_ethtool_coalesce *kernel_coal,
1857			    struct netlink_ext_ack *extack)
1858{
1859	struct rtl8169_private *tp = netdev_priv(dev);
1860	u32 tx_fr = ec->tx_max_coalesced_frames;
1861	u32 rx_fr = ec->rx_max_coalesced_frames;
1862	u32 coal_usec_max, units;
1863	u16 w = 0, cp01 = 0;
1864	int scale;
1865
1866	if (rtl_is_8125(tp))
1867		return -EOPNOTSUPP;
1868
1869	if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
1870		return -ERANGE;
1871
1872	coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
1873	scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
1874	if (scale < 0)
1875		return scale;
1876
1877	/* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
1878	 * not only when usecs=0 because of e.g. the following scenario:
1879	 *
1880	 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1881	 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1882	 * - then user does `ethtool -C eth0 rx-usecs 100`
1883	 *
1884	 * Since ethtool sends to kernel whole ethtool_coalesce settings,
1885	 * if we want to ignore rx_frames then it has to be set to 0.
1886	 */
1887	if (rx_fr == 1)
1888		rx_fr = 0;
1889	if (tx_fr == 1)
1890		tx_fr = 0;
1891
1892	/* HW requires time limit to be set if frame limit is set */
1893	if ((tx_fr && !ec->tx_coalesce_usecs) ||
1894	    (rx_fr && !ec->rx_coalesce_usecs))
1895		return -EINVAL;
1896
1897	w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
1898	w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
1899
1900	units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
1901	w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
1902	units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
1903	w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
1904
1905	RTL_W16(tp, IntrMitigate, w);
1906
1907	/* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
1908	if (rtl_is_8168evl_up(tp)) {
1909		if (!rx_fr && !tx_fr)
1910			/* disable packet counter */
1911			tp->cp_cmd |= PktCntrDisable;
1912		else
1913			tp->cp_cmd &= ~PktCntrDisable;
1914	}
1915
1916	tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
1917	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1918	rtl_pci_commit(tp);
1919
1920	return 0;
1921}
1922
1923static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
1924{
1925	struct rtl8169_private *tp = netdev_priv(dev);
1926
1927	if (!rtl_supports_eee(tp))
1928		return -EOPNOTSUPP;
1929
1930	return phy_ethtool_get_eee(tp->phydev, data);
1931}
1932
1933static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
1934{
1935	struct rtl8169_private *tp = netdev_priv(dev);
1936	int ret;
1937
1938	if (!rtl_supports_eee(tp))
1939		return -EOPNOTSUPP;
1940
1941	ret = phy_ethtool_set_eee(tp->phydev, data);
1942
1943	if (!ret)
1944		tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
1945					   MDIO_AN_EEE_ADV);
1946	return ret;
1947}
1948
1949static void rtl8169_get_ringparam(struct net_device *dev,
1950				  struct ethtool_ringparam *data,
1951				  struct kernel_ethtool_ringparam *kernel_data,
1952				  struct netlink_ext_ack *extack)
1953{
1954	data->rx_max_pending = NUM_RX_DESC;
1955	data->rx_pending = NUM_RX_DESC;
1956	data->tx_max_pending = NUM_TX_DESC;
1957	data->tx_pending = NUM_TX_DESC;
1958}
1959
1960static void rtl8169_get_pauseparam(struct net_device *dev,
1961				   struct ethtool_pauseparam *data)
1962{
1963	struct rtl8169_private *tp = netdev_priv(dev);
1964	bool tx_pause, rx_pause;
1965
1966	phy_get_pause(tp->phydev, &tx_pause, &rx_pause);
1967
1968	data->autoneg = tp->phydev->autoneg;
1969	data->tx_pause = tx_pause ? 1 : 0;
1970	data->rx_pause = rx_pause ? 1 : 0;
1971}
1972
1973static int rtl8169_set_pauseparam(struct net_device *dev,
1974				  struct ethtool_pauseparam *data)
1975{
1976	struct rtl8169_private *tp = netdev_priv(dev);
1977
1978	if (dev->mtu > ETH_DATA_LEN)
1979		return -EOPNOTSUPP;
1980
1981	phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause);
1982
1983	return 0;
1984}
1985
1986static const struct ethtool_ops rtl8169_ethtool_ops = {
1987	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1988				     ETHTOOL_COALESCE_MAX_FRAMES,
1989	.get_drvinfo		= rtl8169_get_drvinfo,
1990	.get_regs_len		= rtl8169_get_regs_len,
1991	.get_link		= ethtool_op_get_link,
1992	.get_coalesce		= rtl_get_coalesce,
1993	.set_coalesce		= rtl_set_coalesce,
1994	.get_regs		= rtl8169_get_regs,
1995	.get_wol		= rtl8169_get_wol,
1996	.set_wol		= rtl8169_set_wol,
1997	.get_strings		= rtl8169_get_strings,
1998	.get_sset_count		= rtl8169_get_sset_count,
1999	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2000	.get_ts_info		= ethtool_op_get_ts_info,
2001	.nway_reset		= phy_ethtool_nway_reset,
2002	.get_eee		= rtl8169_get_eee,
2003	.set_eee		= rtl8169_set_eee,
2004	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2005	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2006	.get_ringparam		= rtl8169_get_ringparam,
2007	.get_pauseparam		= rtl8169_get_pauseparam,
2008	.set_pauseparam		= rtl8169_set_pauseparam,
2009};
2010
2011static void rtl_enable_eee(struct rtl8169_private *tp)
2012{
2013	struct phy_device *phydev = tp->phydev;
2014	int adv;
2015
2016	/* respect EEE advertisement the user may have set */
2017	if (tp->eee_adv >= 0)
2018		adv = tp->eee_adv;
2019	else
2020		adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
2021
2022	if (adv >= 0)
2023		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
2024}
2025
2026static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
2027{
2028	/*
2029	 * The driver currently handles the 8168Bf and the 8168Be identically
2030	 * but they can be identified more specifically through the test below
2031	 * if needed:
2032	 *
2033	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2034	 *
2035	 * Same thing for the 8101Eb and the 8101Ec:
2036	 *
2037	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2038	 */
2039	static const struct rtl_mac_info {
2040		u16 mask;
2041		u16 val;
2042		enum mac_version ver;
2043	} mac_info[] = {
2044		/* 8125B family. */
2045		{ 0x7cf, 0x641,	RTL_GIGA_MAC_VER_63 },
2046
2047		/* 8125A family. */
2048		{ 0x7cf, 0x609,	RTL_GIGA_MAC_VER_61 },
2049		/* It seems only XID 609 made it to the mass market.
2050		 * { 0x7cf, 0x608,	RTL_GIGA_MAC_VER_60 },
2051		 * { 0x7c8, 0x608,	RTL_GIGA_MAC_VER_61 },
2052		 */
2053
2054		/* RTL8117 */
2055		{ 0x7cf, 0x54b,	RTL_GIGA_MAC_VER_53 },
2056		{ 0x7cf, 0x54a,	RTL_GIGA_MAC_VER_52 },
2057
2058		/* 8168EP family. */
2059		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
2060		/* It seems this chip version never made it to
2061		 * the wild. Let's disable detection.
2062		 * { 0x7cf, 0x501,      RTL_GIGA_MAC_VER_50 },
2063		 * { 0x7cf, 0x500,      RTL_GIGA_MAC_VER_49 },
2064		 */
2065
2066		/* 8168H family. */
2067		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
2068		/* It seems this chip version never made it to
2069		 * the wild. Let's disable detection.
2070		 * { 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
2071		 */
2072
2073		/* 8168G family. */
2074		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
2075		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
2076		/* It seems this chip version never made it to
2077		 * the wild. Let's disable detection.
2078		 * { 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
2079		 */
2080		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
2081
2082		/* 8168F family. */
2083		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
2084		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
2085		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
2086
2087		/* 8168E family. */
2088		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
2089		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
2090		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
2091
2092		/* 8168D family. */
2093		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
2094		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
2095
2096		/* 8168DP family. */
2097		/* It seems this early RTL8168dp version never made it to
2098		 * the wild. Support has been removed.
2099		 * { 0x7cf, 0x288,      RTL_GIGA_MAC_VER_27 },
2100		 */
2101		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
2102		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
2103
2104		/* 8168C family. */
2105		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
2106		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
2107		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
2108		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
2109		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
2110		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
2111		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
2112
2113		/* 8168B family. */
2114		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
2115		{ 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
2116
2117		/* 8101 family. */
2118		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
2119		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
2120		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
2121		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
2122		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
2123		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
2124		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
2125		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
2126		{ 0x7cf, 0x240,	RTL_GIGA_MAC_VER_14 },
2127		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
2128		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
2129		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_10 },
2130
2131		/* 8110 family. */
2132		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
2133		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
2134		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
2135		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
2136		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
2137
2138		/* Catch-all */
2139		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
2140	};
2141	const struct rtl_mac_info *p = mac_info;
2142	enum mac_version ver;
2143
2144	while ((xid & p->mask) != p->val)
2145		p++;
2146	ver = p->ver;
2147
2148	if (ver != RTL_GIGA_MAC_NONE && !gmii) {
2149		if (ver == RTL_GIGA_MAC_VER_42)
2150			ver = RTL_GIGA_MAC_VER_43;
2151		else if (ver == RTL_GIGA_MAC_VER_46)
2152			ver = RTL_GIGA_MAC_VER_48;
2153	}
2154
2155	return ver;
2156}
2157
2158static void rtl_release_firmware(struct rtl8169_private *tp)
2159{
2160	if (tp->rtl_fw) {
2161		rtl_fw_release_firmware(tp->rtl_fw);
2162		kfree(tp->rtl_fw);
2163		tp->rtl_fw = NULL;
2164	}
2165}
2166
2167void r8169_apply_firmware(struct rtl8169_private *tp)
2168{
2169	int val;
2170
2171	/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2172	if (tp->rtl_fw) {
2173		rtl_fw_write_firmware(tp, tp->rtl_fw);
2174		/* At least one firmware doesn't reset tp->ocp_base. */
2175		tp->ocp_base = OCP_STD_PHY_BASE;
2176
2177		/* PHY soft reset may still be in progress */
2178		phy_read_poll_timeout(tp->phydev, MII_BMCR, val,
2179				      !(val & BMCR_RESET),
2180				      50000, 600000, true);
2181	}
2182}
2183
2184static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2185{
2186	/* Adjust EEE LED frequency */
2187	if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2188		RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2189
2190	rtl_eri_set_bits(tp, 0x1b0, 0x0003);
2191}
2192
2193static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
2194{
2195	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2196	r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
2197}
2198
2199static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
2200{
2201	RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
2202}
2203
2204static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
2205{
2206	rtl8125_set_eee_txidle_timer(tp);
2207	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2208}
2209
2210static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
2211{
2212	rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
2213	rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
2214	rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
2215	rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
2216}
2217
2218u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
2219{
2220	u16 data1, data2, ioffset;
2221
2222	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
2223	data1 = r8168_mac_ocp_read(tp, 0xdd02);
2224	data2 = r8168_mac_ocp_read(tp, 0xdd00);
2225
2226	ioffset = (data2 >> 1) & 0x7ff8;
2227	ioffset |= data2 & 0x0007;
2228	if (data1 & BIT(7))
2229		ioffset |= BIT(15);
2230
2231	return ioffset;
2232}
2233
2234static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
2235{
2236	set_bit(flag, tp->wk.flags);
2237	schedule_work(&tp->wk.work);
2238}
2239
2240static void rtl8169_init_phy(struct rtl8169_private *tp)
2241{
2242	r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
2243
2244	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2245		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2246		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2247		/* set undocumented MAC Reg C+CR Offset 0x82h */
2248		RTL_W8(tp, 0x82, 0x01);
2249	}
2250
2251	if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
2252	    tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
2253	    tp->pci_dev->subsystem_device == 0xe000)
2254		phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
2255
2256	/* We may have called phy_speed_down before */
2257	phy_speed_up(tp->phydev);
2258
2259	if (rtl_supports_eee(tp))
2260		rtl_enable_eee(tp);
2261
2262	genphy_soft_reset(tp->phydev);
2263}
2264
2265static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
2266{
2267	rtl_unlock_config_regs(tp);
2268
2269	RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
2270	rtl_pci_commit(tp);
2271
2272	RTL_W32(tp, MAC0, get_unaligned_le32(addr));
2273	rtl_pci_commit(tp);
2274
2275	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
2276		rtl_rar_exgmac_set(tp, addr);
2277
2278	rtl_lock_config_regs(tp);
2279}
2280
2281static int rtl_set_mac_address(struct net_device *dev, void *p)
2282{
2283	struct rtl8169_private *tp = netdev_priv(dev);
2284	int ret;
2285
2286	ret = eth_mac_addr(dev, p);
2287	if (ret)
2288		return ret;
2289
2290	rtl_rar_set(tp, dev->dev_addr);
2291
2292	return 0;
2293}
2294
2295static void rtl_init_rxcfg(struct rtl8169_private *tp)
2296{
2297	switch (tp->mac_version) {
2298	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
2299	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
2300		RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
2301		break;
2302	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
2303	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2304	case RTL_GIGA_MAC_VER_38:
2305		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
2306		break;
2307	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
2308		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
2309		break;
2310	case RTL_GIGA_MAC_VER_61:
2311		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
2312		break;
2313	case RTL_GIGA_MAC_VER_63:
2314		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
2315			RX_PAUSE_SLOT_ON);
2316		break;
2317	default:
2318		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
2319		break;
2320	}
2321}
2322
2323static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2324{
2325	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
2326}
2327
2328static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
2329{
2330	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2331	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
2332}
2333
2334static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
2335{
2336	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2337	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
2338}
2339
2340static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
2341{
2342	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2343}
2344
2345static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
2346{
2347	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2348}
2349
2350static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
2351{
2352	RTL_W8(tp, MaxTxPacketSize, 0x24);
2353	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2354	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
2355}
2356
2357static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
2358{
2359	RTL_W8(tp, MaxTxPacketSize, 0x3f);
2360	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2361	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
2362}
2363
2364static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
2365{
2366	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
2367}
2368
2369static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
2370{
2371	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
2372}
2373
2374static void rtl_jumbo_config(struct rtl8169_private *tp)
2375{
2376	bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
2377	int readrq = 4096;
2378
2379	rtl_unlock_config_regs(tp);
2380	switch (tp->mac_version) {
2381	case RTL_GIGA_MAC_VER_17:
2382		if (jumbo) {
2383			readrq = 512;
2384			r8168b_1_hw_jumbo_enable(tp);
2385		} else {
2386			r8168b_1_hw_jumbo_disable(tp);
2387		}
2388		break;
2389	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
2390		if (jumbo) {
2391			readrq = 512;
2392			r8168c_hw_jumbo_enable(tp);
2393		} else {
2394			r8168c_hw_jumbo_disable(tp);
2395		}
2396		break;
2397	case RTL_GIGA_MAC_VER_28:
2398		if (jumbo)
2399			r8168dp_hw_jumbo_enable(tp);
2400		else
2401			r8168dp_hw_jumbo_disable(tp);
2402		break;
2403	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
2404		if (jumbo)
2405			r8168e_hw_jumbo_enable(tp);
2406		else
2407			r8168e_hw_jumbo_disable(tp);
2408		break;
2409	default:
2410		break;
2411	}
2412	rtl_lock_config_regs(tp);
2413
2414	if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2415		pcie_set_readrq(tp->pci_dev, readrq);
2416
2417	/* Chip doesn't support pause in jumbo mode */
2418	if (jumbo) {
2419		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2420				   tp->phydev->advertising);
2421		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2422				   tp->phydev->advertising);
2423		phy_start_aneg(tp->phydev);
2424	}
2425}
2426
2427DECLARE_RTL_COND(rtl_chipcmd_cond)
2428{
2429	return RTL_R8(tp, ChipCmd) & CmdReset;
2430}
2431
2432static void rtl_hw_reset(struct rtl8169_private *tp)
2433{
2434	RTL_W8(tp, ChipCmd, CmdReset);
2435
2436	rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
2437}
2438
2439static void rtl_request_firmware(struct rtl8169_private *tp)
2440{
2441	struct rtl_fw *rtl_fw;
2442
2443	/* firmware loaded already or no firmware available */
2444	if (tp->rtl_fw || !tp->fw_name)
2445		return;
2446
2447	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
2448	if (!rtl_fw)
2449		return;
2450
2451	rtl_fw->phy_write = rtl_writephy;
2452	rtl_fw->phy_read = rtl_readphy;
2453	rtl_fw->mac_mcu_write = mac_mcu_write;
2454	rtl_fw->mac_mcu_read = mac_mcu_read;
2455	rtl_fw->fw_name = tp->fw_name;
2456	rtl_fw->dev = tp_to_dev(tp);
2457
2458	if (rtl_fw_request_firmware(rtl_fw))
2459		kfree(rtl_fw);
2460	else
2461		tp->rtl_fw = rtl_fw;
2462}
2463
2464static void rtl_rx_close(struct rtl8169_private *tp)
2465{
2466	RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
2467}
2468
2469DECLARE_RTL_COND(rtl_npq_cond)
2470{
2471	return RTL_R8(tp, TxPoll) & NPQ;
2472}
2473
2474DECLARE_RTL_COND(rtl_txcfg_empty_cond)
2475{
2476	return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
2477}
2478
2479DECLARE_RTL_COND(rtl_rxtx_empty_cond)
2480{
2481	return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
2482}
2483
2484DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
2485{
2486	/* IntrMitigate has new functionality on RTL8125 */
2487	return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103;
2488}
2489
2490static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
2491{
2492	switch (tp->mac_version) {
2493	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
2494		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
2495		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2496		break;
2497	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
2498		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2499		break;
2500	case RTL_GIGA_MAC_VER_63:
2501		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
2502		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2503		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
2504		break;
2505	default:
2506		break;
2507	}
2508}
2509
2510static void rtl_disable_rxdvgate(struct rtl8169_private *tp)
2511{
2512	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
2513}
2514
2515static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
2516{
2517	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
2518	fsleep(2000);
2519	rtl_wait_txrx_fifo_empty(tp);
2520}
2521
2522static void rtl_wol_enable_rx(struct rtl8169_private *tp)
2523{
2524	if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
2525		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
2526			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
2527
2528	if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
2529		rtl_disable_rxdvgate(tp);
2530}
2531
2532static void rtl_prepare_power_down(struct rtl8169_private *tp)
2533{
2534	if (tp->dash_enabled)
2535		return;
2536
2537	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
2538	    tp->mac_version == RTL_GIGA_MAC_VER_33)
2539		rtl_ephy_write(tp, 0x19, 0xff64);
2540
2541	if (device_may_wakeup(tp_to_dev(tp))) {
2542		phy_speed_down(tp->phydev, false);
2543		rtl_wol_enable_rx(tp);
2544	}
2545}
2546
2547static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
2548{
2549	u32 val = TX_DMA_BURST << TxDMAShift |
2550		  InterFrameGap << TxInterFrameGapShift;
2551
2552	if (rtl_is_8168evl_up(tp))
2553		val |= TXCFG_AUTO_FIFO;
2554
2555	RTL_W32(tp, TxConfig, val);
2556}
2557
2558static void rtl_set_rx_max_size(struct rtl8169_private *tp)
2559{
2560	/* Low hurts. Let's disable the filtering. */
2561	RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
2562}
2563
2564static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
2565{
2566	/*
2567	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
2568	 * register to be written before TxDescAddrLow to work.
2569	 * Switching from MMIO to I/O access fixes the issue as well.
2570	 */
2571	RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
2572	RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
2573	RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
2574	RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
2575}
2576
2577static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
2578{
2579	u32 val;
2580
2581	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
2582		val = 0x000fff00;
2583	else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
2584		val = 0x00ffff00;
2585	else
2586		return;
2587
2588	if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
2589		val |= 0xff;
2590
2591	RTL_W32(tp, 0x7c, val);
2592}
2593
2594static void rtl_set_rx_mode(struct net_device *dev)
2595{
2596	u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
2597	/* Multicast hash filter */
2598	u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
2599	struct rtl8169_private *tp = netdev_priv(dev);
2600	u32 tmp;
2601
2602	if (dev->flags & IFF_PROMISC) {
2603		rx_mode |= AcceptAllPhys;
2604	} else if (!(dev->flags & IFF_MULTICAST)) {
2605		rx_mode &= ~AcceptMulticast;
2606	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
2607		   dev->flags & IFF_ALLMULTI ||
2608		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
2609		/* accept all multicasts */
2610	} else if (netdev_mc_empty(dev)) {
2611		rx_mode &= ~AcceptMulticast;
2612	} else {
2613		struct netdev_hw_addr *ha;
2614
2615		mc_filter[1] = mc_filter[0] = 0;
2616		netdev_for_each_mc_addr(ha, dev) {
2617			u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
2618			mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
2619		}
2620
2621		if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
2622			tmp = mc_filter[0];
2623			mc_filter[0] = swab32(mc_filter[1]);
2624			mc_filter[1] = swab32(tmp);
2625		}
2626	}
2627
2628	RTL_W32(tp, MAR0 + 4, mc_filter[1]);
2629	RTL_W32(tp, MAR0 + 0, mc_filter[0]);
2630
2631	tmp = RTL_R32(tp, RxConfig);
2632	RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
2633}
2634
2635DECLARE_RTL_COND(rtl_csiar_cond)
2636{
2637	return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
2638}
2639
2640static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
2641{
2642	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2643
2644	RTL_W32(tp, CSIDR, value);
2645	RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
2646		CSIAR_BYTE_ENABLE | func << 16);
2647
2648	rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
2649}
2650
2651static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
2652{
2653	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2654
2655	RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
2656		CSIAR_BYTE_ENABLE);
2657
2658	return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
2659		RTL_R32(tp, CSIDR) : ~0;
2660}
2661
2662static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
2663{
2664	struct pci_dev *pdev = tp->pci_dev;
2665	u32 csi;
2666
2667	/* According to Realtek the value at config space address 0x070f
2668	 * controls the L0s/L1 entrance latency. We try standard ECAM access
2669	 * first and if it fails fall back to CSI.
2670	 * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo)
2671	 * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us
2672	 */
2673	if (pdev->cfg_size > 0x070f &&
2674	    pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
2675		return;
2676
2677	netdev_notice_once(tp->dev,
2678		"No native access to PCI extended config space, falling back to CSI\n");
2679	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
2680	rtl_csi_write(tp, 0x070c, csi | val << 24);
2681}
2682
2683static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
2684{
2685	/* L0 7us, L1 16us */
2686	rtl_set_aspm_entry_latency(tp, 0x27);
2687}
2688
2689struct ephy_info {
2690	unsigned int offset;
2691	u16 mask;
2692	u16 bits;
2693};
2694
2695static void __rtl_ephy_init(struct rtl8169_private *tp,
2696			    const struct ephy_info *e, int len)
2697{
2698	u16 w;
2699
2700	while (len-- > 0) {
2701		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
2702		rtl_ephy_write(tp, e->offset, w);
2703		e++;
2704	}
2705}
2706
2707#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
2708
2709static void rtl_disable_clock_request(struct rtl8169_private *tp)
2710{
2711	pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
2712				   PCI_EXP_LNKCTL_CLKREQ_EN);
2713}
2714
2715static void rtl_enable_clock_request(struct rtl8169_private *tp)
2716{
2717	pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
2718				 PCI_EXP_LNKCTL_CLKREQ_EN);
2719}
2720
2721static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
2722{
2723	/* work around an issue when PCI reset occurs during L2/L3 state */
2724	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
2725}
2726
2727static void rtl_enable_exit_l1(struct rtl8169_private *tp)
2728{
2729	/* Bits control which events trigger ASPM L1 exit:
2730	 * Bit 12: rxdv
2731	 * Bit 11: ltr_msg
2732	 * Bit 10: txdma_poll
2733	 * Bit  9: xadm
2734	 * Bit  8: pktavi
2735	 * Bit  7: txpla
2736	 */
2737	switch (tp->mac_version) {
2738	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2739		rtl_eri_set_bits(tp, 0xd4, 0x1f00);
2740		break;
2741	case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
2742		rtl_eri_set_bits(tp, 0xd4, 0x0c00);
2743		break;
2744	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
2745		r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
2746		break;
2747	default:
2748		break;
2749	}
2750}
2751
2752static void rtl_disable_exit_l1(struct rtl8169_private *tp)
2753{
2754	switch (tp->mac_version) {
2755	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
2756		rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
2757		break;
2758	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
2759		r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
2760		break;
2761	default:
2762		break;
2763	}
2764}
2765
2766static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
2767{
2768	if (tp->mac_version < RTL_GIGA_MAC_VER_32)
2769		return;
2770
2771	/* Don't enable ASPM in the chip if OS can't control ASPM */
2772	if (enable && tp->aspm_manageable) {
2773		/* On these chip versions ASPM can even harm
2774		 * bus communication of other PCI devices.
2775		 */
2776		if (tp->mac_version == RTL_GIGA_MAC_VER_42 ||
2777		    tp->mac_version == RTL_GIGA_MAC_VER_43)
2778			return;
2779
2780		rtl_mod_config5(tp, 0, ASPM_en);
2781		rtl_mod_config2(tp, 0, ClkReqEn);
2782
2783		switch (tp->mac_version) {
2784		case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
2785		case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
2786			/* reset ephy tx/rx disable timer */
2787			r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
2788			/* chip can trigger L1.2 */
2789			r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2));
2790			break;
2791		default:
2792			break;
2793		}
2794	} else {
2795		switch (tp->mac_version) {
2796		case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
2797		case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
2798			r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
2799			break;
2800		default:
2801			break;
2802		}
2803
2804		rtl_mod_config2(tp, ClkReqEn, 0);
2805		rtl_mod_config5(tp, ASPM_en, 0);
2806	}
2807}
2808
2809static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
2810			      u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
2811{
2812	/* Usage of dynamic vs. static FIFO is controlled by bit
2813	 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
2814	 */
2815	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
2816	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
2817}
2818
2819static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
2820					  u8 low, u8 high)
2821{
2822	/* FIFO thresholds for pause flow control */
2823	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
2824	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
2825}
2826
2827static void rtl_hw_start_8168b(struct rtl8169_private *tp)
2828{
2829	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2830}
2831
2832static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
2833{
2834	RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
2835
2836	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2837
2838	rtl_disable_clock_request(tp);
2839}
2840
2841static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
2842{
2843	static const struct ephy_info e_info_8168cp[] = {
2844		{ 0x01, 0,	0x0001 },
2845		{ 0x02, 0x0800,	0x1000 },
2846		{ 0x03, 0,	0x0042 },
2847		{ 0x06, 0x0080,	0x0000 },
2848		{ 0x07, 0,	0x2000 }
2849	};
2850
2851	rtl_set_def_aspm_entry_latency(tp);
2852
2853	rtl_ephy_init(tp, e_info_8168cp);
2854
2855	__rtl_hw_start_8168cp(tp);
2856}
2857
2858static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
2859{
2860	rtl_set_def_aspm_entry_latency(tp);
2861
2862	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2863}
2864
2865static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
2866{
2867	rtl_set_def_aspm_entry_latency(tp);
2868
2869	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2870
2871	/* Magic. */
2872	RTL_W8(tp, DBG_REG, 0x20);
2873}
2874
2875static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
2876{
2877	static const struct ephy_info e_info_8168c_1[] = {
2878		{ 0x02, 0x0800,	0x1000 },
2879		{ 0x03, 0,	0x0002 },
2880		{ 0x06, 0x0080,	0x0000 }
2881	};
2882
2883	rtl_set_def_aspm_entry_latency(tp);
2884
2885	RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
2886
2887	rtl_ephy_init(tp, e_info_8168c_1);
2888
2889	__rtl_hw_start_8168cp(tp);
2890}
2891
2892static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
2893{
2894	static const struct ephy_info e_info_8168c_2[] = {
2895		{ 0x01, 0,	0x0001 },
2896		{ 0x03, 0x0400,	0x0020 }
2897	};
2898
2899	rtl_set_def_aspm_entry_latency(tp);
2900
2901	rtl_ephy_init(tp, e_info_8168c_2);
2902
2903	__rtl_hw_start_8168cp(tp);
2904}
2905
2906static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
2907{
2908	rtl_set_def_aspm_entry_latency(tp);
2909
2910	__rtl_hw_start_8168cp(tp);
2911}
2912
2913static void rtl_hw_start_8168d(struct rtl8169_private *tp)
2914{
2915	rtl_set_def_aspm_entry_latency(tp);
2916
2917	rtl_disable_clock_request(tp);
2918}
2919
2920static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
2921{
2922	static const struct ephy_info e_info_8168d_4[] = {
2923		{ 0x0b, 0x0000,	0x0048 },
2924		{ 0x19, 0x0020,	0x0050 },
2925		{ 0x0c, 0x0100,	0x0020 },
2926		{ 0x10, 0x0004,	0x0000 },
2927	};
2928
2929	rtl_set_def_aspm_entry_latency(tp);
2930
2931	rtl_ephy_init(tp, e_info_8168d_4);
2932
2933	rtl_enable_clock_request(tp);
2934}
2935
2936static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
2937{
2938	static const struct ephy_info e_info_8168e_1[] = {
2939		{ 0x00, 0x0200,	0x0100 },
2940		{ 0x00, 0x0000,	0x0004 },
2941		{ 0x06, 0x0002,	0x0001 },
2942		{ 0x06, 0x0000,	0x0030 },
2943		{ 0x07, 0x0000,	0x2000 },
2944		{ 0x00, 0x0000,	0x0020 },
2945		{ 0x03, 0x5800,	0x2000 },
2946		{ 0x03, 0x0000,	0x0001 },
2947		{ 0x01, 0x0800,	0x1000 },
2948		{ 0x07, 0x0000,	0x4000 },
2949		{ 0x1e, 0x0000,	0x2000 },
2950		{ 0x19, 0xffff,	0xfe6c },
2951		{ 0x0a, 0x0000,	0x0040 }
2952	};
2953
2954	rtl_set_def_aspm_entry_latency(tp);
2955
2956	rtl_ephy_init(tp, e_info_8168e_1);
2957
2958	rtl_disable_clock_request(tp);
2959
2960	/* Reset tx FIFO pointer */
2961	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
2962	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
2963
2964	rtl_mod_config5(tp, Spi_en, 0);
2965}
2966
2967static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
2968{
2969	static const struct ephy_info e_info_8168e_2[] = {
2970		{ 0x09, 0x0000,	0x0080 },
2971		{ 0x19, 0x0000,	0x0224 },
2972		{ 0x00, 0x0000,	0x0004 },
2973		{ 0x0c, 0x3df0,	0x0200 },
2974	};
2975
2976	rtl_set_def_aspm_entry_latency(tp);
2977
2978	rtl_ephy_init(tp, e_info_8168e_2);
2979
2980	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
2981	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
2982	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
2983	rtl_eri_set_bits(tp, 0x1d0, BIT(1));
2984	rtl_reset_packet_filter(tp);
2985	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
2986	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
2987	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
2988
2989	rtl_disable_clock_request(tp);
2990
2991	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
2992
2993	rtl8168_config_eee_mac(tp);
2994
2995	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
2996	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
2997	rtl_mod_config5(tp, Spi_en, 0);
2998}
2999
3000static void rtl_hw_start_8168f(struct rtl8169_private *tp)
3001{
3002	rtl_set_def_aspm_entry_latency(tp);
3003
3004	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3005	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
3006	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
3007	rtl_reset_packet_filter(tp);
3008	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
3009	rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));
3010	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
3011	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
3012
3013	rtl_disable_clock_request(tp);
3014
3015	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3016	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3017	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
3018	rtl_mod_config5(tp, Spi_en, 0);
3019
3020	rtl8168_config_eee_mac(tp);
3021}
3022
3023static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
3024{
3025	static const struct ephy_info e_info_8168f_1[] = {
3026		{ 0x06, 0x00c0,	0x0020 },
3027		{ 0x08, 0x0001,	0x0002 },
3028		{ 0x09, 0x0000,	0x0080 },
3029		{ 0x19, 0x0000,	0x0224 },
3030		{ 0x00, 0x0000,	0x0008 },
3031		{ 0x0c, 0x3df0,	0x0200 },
3032	};
3033
3034	rtl_hw_start_8168f(tp);
3035
3036	rtl_ephy_init(tp, e_info_8168f_1);
3037}
3038
3039static void rtl_hw_start_8411(struct rtl8169_private *tp)
3040{
3041	static const struct ephy_info e_info_8168f_1[] = {
3042		{ 0x06, 0x00c0,	0x0020 },
3043		{ 0x0f, 0xffff,	0x5200 },
3044		{ 0x19, 0x0000,	0x0224 },
3045		{ 0x00, 0x0000,	0x0008 },
3046		{ 0x0c, 0x3df0,	0x0200 },
3047	};
3048
3049	rtl_hw_start_8168f(tp);
3050	rtl_pcie_state_l2l3_disable(tp);
3051
3052	rtl_ephy_init(tp, e_info_8168f_1);
3053}
3054
3055static void rtl_hw_start_8168g(struct rtl8169_private *tp)
3056{
3057	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3058	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3059
3060	rtl_set_def_aspm_entry_latency(tp);
3061
3062	rtl_reset_packet_filter(tp);
3063	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
3064
3065	rtl_disable_rxdvgate(tp);
3066
3067	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3068	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3069
3070	rtl8168_config_eee_mac(tp);
3071
3072	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
3073	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3074
3075	rtl_pcie_state_l2l3_disable(tp);
3076}
3077
3078static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
3079{
3080	static const struct ephy_info e_info_8168g_1[] = {
3081		{ 0x00, 0x0008,	0x0000 },
3082		{ 0x0c, 0x3ff0,	0x0820 },
3083		{ 0x1e, 0x0000,	0x0001 },
3084		{ 0x19, 0x8000,	0x0000 }
3085	};
3086
3087	rtl_hw_start_8168g(tp);
3088	rtl_ephy_init(tp, e_info_8168g_1);
3089}
3090
3091static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
3092{
3093	static const struct ephy_info e_info_8168g_2[] = {
3094		{ 0x00, 0x0008,	0x0000 },
3095		{ 0x0c, 0x3ff0,	0x0820 },
3096		{ 0x19, 0xffff,	0x7c00 },
3097		{ 0x1e, 0xffff,	0x20eb },
3098		{ 0x0d, 0xffff,	0x1666 },
3099		{ 0x00, 0xffff,	0x10a3 },
3100		{ 0x06, 0xffff,	0xf050 },
3101		{ 0x04, 0x0000,	0x0010 },
3102		{ 0x1d, 0x4000,	0x0000 },
3103	};
3104
3105	rtl_hw_start_8168g(tp);
3106	rtl_ephy_init(tp, e_info_8168g_2);
3107}
3108
3109static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
3110{
3111	static const struct ephy_info e_info_8411_2[] = {
3112		{ 0x00, 0x0008,	0x0000 },
3113		{ 0x0c, 0x37d0,	0x0820 },
3114		{ 0x1e, 0x0000,	0x0001 },
3115		{ 0x19, 0x8021,	0x0000 },
3116		{ 0x1e, 0x0000,	0x2000 },
3117		{ 0x0d, 0x0100,	0x0200 },
3118		{ 0x00, 0x0000,	0x0080 },
3119		{ 0x06, 0x0000,	0x0010 },
3120		{ 0x04, 0x0000,	0x0010 },
3121		{ 0x1d, 0x0000,	0x4000 },
3122	};
3123
3124	rtl_hw_start_8168g(tp);
3125
3126	rtl_ephy_init(tp, e_info_8411_2);
3127
3128	/* The following Realtek-provided magic fixes an issue with the RX unit
3129	 * getting confused after the PHY having been powered-down.
3130	 */
3131	r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
3132	r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
3133	r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
3134	r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
3135	r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
3136	r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
3137	r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
3138	r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
3139	mdelay(3);
3140	r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
3141
3142	r8168_mac_ocp_write(tp, 0xF800, 0xE008);
3143	r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
3144	r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
3145	r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
3146	r8168_mac_ocp_write(tp, 0xF808, 0xE027);
3147	r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
3148	r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
3149	r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
3150	r8168_mac_ocp_write(tp, 0xF810, 0xC602);
3151	r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
3152	r8168_mac_ocp_write(tp, 0xF814, 0x0000);
3153	r8168_mac_ocp_write(tp, 0xF816, 0xC502);
3154	r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
3155	r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
3156	r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
3157	r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
3158	r8168_mac_ocp_write(tp, 0xF820, 0x080A);
3159	r8168_mac_ocp_write(tp, 0xF822, 0x6420);
3160	r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
3161	r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
3162	r8168_mac_ocp_write(tp, 0xF828, 0xC516);
3163	r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
3164	r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
3165	r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
3166	r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
3167	r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
3168	r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
3169	r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
3170	r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
3171	r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
3172	r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
3173	r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
3174	r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
3175	r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
3176	r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
3177	r8168_mac_ocp_write(tp, 0xF846, 0xC404);
3178	r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
3179	r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
3180	r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
3181	r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
3182	r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
3183	r8168_mac_ocp_write(tp, 0xF852, 0xE434);
3184	r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
3185	r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
3186	r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
3187	r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
3188	r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
3189	r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
3190	r8168_mac_ocp_write(tp, 0xF860, 0xF007);
3191	r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
3192	r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
3193	r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
3194	r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
3195	r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
3196	r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
3197	r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
3198	r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
3199	r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
3200	r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
3201	r8168_mac_ocp_write(tp, 0xF876, 0xC516);
3202	r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
3203	r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
3204	r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
3205	r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
3206	r8168_mac_ocp_write(tp, 0xF880, 0xC512);
3207	r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
3208	r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
3209	r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
3210	r8168_mac_ocp_write(tp, 0xF888, 0x483F);
3211	r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
3212	r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
3213	r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
3214	r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
3215	r8168_mac_ocp_write(tp, 0xF892, 0xC505);
3216	r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
3217	r8168_mac_ocp_write(tp, 0xF896, 0xC502);
3218	r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
3219	r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
3220	r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
3221	r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
3222	r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
3223	r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
3224	r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
3225	r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
3226	r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
3227	r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
3228	r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
3229	r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
3230	r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
3231	r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
3232	r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
3233	r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
3234	r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
3235	r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
3236	r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
3237	r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
3238	r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
3239	r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
3240	r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
3241	r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
3242	r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
3243	r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
3244	r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
3245	r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
3246	r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
3247	r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
3248	r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
3249	r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
3250	r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
3251	r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
3252	r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
3253
3254	r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
3255
3256	r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
3257	r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
3258	r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
3259	r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
3260	r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
3261	r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
3262	r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
3263}
3264
3265static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
3266{
3267	static const struct ephy_info e_info_8168h_1[] = {
3268		{ 0x1e, 0x0800,	0x0001 },
3269		{ 0x1d, 0x0000,	0x0800 },
3270		{ 0x05, 0xffff,	0x2089 },
3271		{ 0x06, 0xffff,	0x5881 },
3272		{ 0x04, 0xffff,	0x854a },
3273		{ 0x01, 0xffff,	0x068b }
3274	};
3275	int rg_saw_cnt;
3276
3277	rtl_ephy_init(tp, e_info_8168h_1);
3278
3279	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3280	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3281
3282	rtl_set_def_aspm_entry_latency(tp);
3283
3284	rtl_reset_packet_filter(tp);
3285
3286	rtl_eri_set_bits(tp, 0xdc, 0x001c);
3287
3288	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3289
3290	rtl_disable_rxdvgate(tp);
3291
3292	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3293	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3294
3295	rtl8168_config_eee_mac(tp);
3296
3297	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3298	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3299
3300	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3301
3302	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3303
3304	rtl_pcie_state_l2l3_disable(tp);
3305
3306	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3307	if (rg_saw_cnt > 0) {
3308		u16 sw_cnt_1ms_ini;
3309
3310		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
3311		sw_cnt_1ms_ini &= 0x0fff;
3312		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3313	}
3314
3315	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3316	r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
3317	r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
3318	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3319
3320	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3321	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3322	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3323	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3324}
3325
3326static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
3327{
3328	rtl8168ep_stop_cmac(tp);
3329
3330	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3331	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3332
3333	rtl_set_def_aspm_entry_latency(tp);
3334
3335	rtl_reset_packet_filter(tp);
3336
3337	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3338
3339	rtl_disable_rxdvgate(tp);
3340
3341	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3342	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3343
3344	rtl8168_config_eee_mac(tp);
3345
3346	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
3347
3348	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3349
3350	rtl_pcie_state_l2l3_disable(tp);
3351}
3352
3353static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
3354{
3355	static const struct ephy_info e_info_8168ep_3[] = {
3356		{ 0x00, 0x0000,	0x0080 },
3357		{ 0x0d, 0x0100,	0x0200 },
3358		{ 0x19, 0x8021,	0x0000 },
3359		{ 0x1e, 0x0000,	0x2000 },
3360	};
3361
3362	rtl_ephy_init(tp, e_info_8168ep_3);
3363
3364	rtl_hw_start_8168ep(tp);
3365
3366	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3367	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3368
3369	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
3370	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3371	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3372}
3373
3374static void rtl_hw_start_8117(struct rtl8169_private *tp)
3375{
3376	static const struct ephy_info e_info_8117[] = {
3377		{ 0x19, 0x0040,	0x1100 },
3378		{ 0x59, 0x0040,	0x1100 },
3379	};
3380	int rg_saw_cnt;
3381
3382	rtl8168ep_stop_cmac(tp);
3383	rtl_ephy_init(tp, e_info_8117);
3384
3385	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3386	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3387
3388	rtl_set_def_aspm_entry_latency(tp);
3389
3390	rtl_reset_packet_filter(tp);
3391
3392	rtl_eri_set_bits(tp, 0xd4, 0x0010);
3393
3394	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3395
3396	rtl_disable_rxdvgate(tp);
3397
3398	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3399	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3400
3401	rtl8168_config_eee_mac(tp);
3402
3403	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3404	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3405
3406	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3407
3408	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3409
3410	rtl_pcie_state_l2l3_disable(tp);
3411
3412	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3413	if (rg_saw_cnt > 0) {
3414		u16 sw_cnt_1ms_ini;
3415
3416		sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
3417		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3418	}
3419
3420	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3421	r8168_mac_ocp_write(tp, 0xea80, 0x0003);
3422	r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
3423	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3424
3425	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3426	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3427	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3428	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3429
3430	/* firmware is for MAC only */
3431	r8169_apply_firmware(tp);
3432}
3433
3434static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
3435{
3436	static const struct ephy_info e_info_8102e_1[] = {
3437		{ 0x01,	0, 0x6e65 },
3438		{ 0x02,	0, 0x091f },
3439		{ 0x03,	0, 0xc2f9 },
3440		{ 0x06,	0, 0xafb5 },
3441		{ 0x07,	0, 0x0e00 },
3442		{ 0x19,	0, 0xec80 },
3443		{ 0x01,	0, 0x2e65 },
3444		{ 0x01,	0, 0x6e65 }
3445	};
3446	u8 cfg1;
3447
3448	rtl_set_def_aspm_entry_latency(tp);
3449
3450	RTL_W8(tp, DBG_REG, FIX_NAK_1);
3451
3452	RTL_W8(tp, Config1,
3453	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3454	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3455
3456	cfg1 = RTL_R8(tp, Config1);
3457	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3458		RTL_W8(tp, Config1, cfg1 & ~LEDS0);
3459
3460	rtl_ephy_init(tp, e_info_8102e_1);
3461}
3462
3463static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
3464{
3465	rtl_set_def_aspm_entry_latency(tp);
3466
3467	RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
3468	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3469}
3470
3471static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
3472{
3473	rtl_hw_start_8102e_2(tp);
3474
3475	rtl_ephy_write(tp, 0x03, 0xc2f9);
3476}
3477
3478static void rtl_hw_start_8401(struct rtl8169_private *tp)
3479{
3480	static const struct ephy_info e_info_8401[] = {
3481		{ 0x01,	0xffff, 0x6fe5 },
3482		{ 0x03,	0xffff, 0x0599 },
3483		{ 0x06,	0xffff, 0xaf25 },
3484		{ 0x07,	0xffff, 0x8e68 },
3485	};
3486
3487	rtl_ephy_init(tp, e_info_8401);
3488	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3489}
3490
3491static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
3492{
3493	static const struct ephy_info e_info_8105e_1[] = {
3494		{ 0x07,	0, 0x4000 },
3495		{ 0x19,	0, 0x0200 },
3496		{ 0x19,	0, 0x0020 },
3497		{ 0x1e,	0, 0x2000 },
3498		{ 0x03,	0, 0x0001 },
3499		{ 0x19,	0, 0x0100 },
3500		{ 0x19,	0, 0x0004 },
3501		{ 0x0a,	0, 0x0020 }
3502	};
3503
3504	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3505	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3506
3507	/* Disable Early Tally Counter */
3508	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
3509
3510	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3511	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3512
3513	rtl_ephy_init(tp, e_info_8105e_1);
3514
3515	rtl_pcie_state_l2l3_disable(tp);
3516}
3517
3518static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
3519{
3520	rtl_hw_start_8105e_1(tp);
3521	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
3522}
3523
3524static void rtl_hw_start_8402(struct rtl8169_private *tp)
3525{
3526	static const struct ephy_info e_info_8402[] = {
3527		{ 0x19,	0xffff, 0xff64 },
3528		{ 0x1e,	0, 0x4000 }
3529	};
3530
3531	rtl_set_def_aspm_entry_latency(tp);
3532
3533	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3534	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3535
3536	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3537
3538	rtl_ephy_init(tp, e_info_8402);
3539
3540	rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
3541	rtl_reset_packet_filter(tp);
3542	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3543	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3544	rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00);
3545
3546	/* disable EEE */
3547	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3548
3549	rtl_pcie_state_l2l3_disable(tp);
3550}
3551
3552static void rtl_hw_start_8106(struct rtl8169_private *tp)
3553{
3554	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3555	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3556
3557	RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
3558	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3559	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3560
3561	/* L0 7us, L1 32us - needed to avoid issues with link-up detection */
3562	rtl_set_aspm_entry_latency(tp, 0x2f);
3563
3564	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3565
3566	/* disable EEE */
3567	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3568
3569	rtl_pcie_state_l2l3_disable(tp);
3570}
3571
3572DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
3573{
3574	return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
3575}
3576
3577static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
3578{
3579	rtl_pcie_state_l2l3_disable(tp);
3580
3581	RTL_W16(tp, 0x382, 0x221b);
3582	RTL_W8(tp, 0x4500, 0);
3583	RTL_W16(tp, 0x4800, 0);
3584
3585	/* disable UPS */
3586	r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
3587
3588	RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
3589
3590	r8168_mac_ocp_write(tp, 0xc140, 0xffff);
3591	r8168_mac_ocp_write(tp, 0xc142, 0xffff);
3592
3593	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
3594	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3595	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3596
3597	/* disable new tx descriptor format */
3598	r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
3599
3600	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3601		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
3602	else
3603		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
3604
3605	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3606		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
3607	else
3608		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
3609
3610	r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
3611	r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
3612	r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
3613	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
3614	r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
3615	r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
3616	r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
3617	r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
3618	r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
3619
3620	r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
3621	r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
3622	udelay(1);
3623	r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
3624	RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
3625
3626	r8168_mac_ocp_write(tp, 0xe098, 0xc302);
3627
3628	rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
3629
3630	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3631		rtl8125b_config_eee_mac(tp);
3632	else
3633		rtl8125a_config_eee_mac(tp);
3634
3635	rtl_disable_rxdvgate(tp);
3636}
3637
3638static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
3639{
3640	static const struct ephy_info e_info_8125a_2[] = {
3641		{ 0x04, 0xffff, 0xd000 },
3642		{ 0x0a, 0xffff, 0x8653 },
3643		{ 0x23, 0xffff, 0xab66 },
3644		{ 0x20, 0xffff, 0x9455 },
3645		{ 0x21, 0xffff, 0x99ff },
3646		{ 0x29, 0xffff, 0xfe04 },
3647
3648		{ 0x44, 0xffff, 0xd000 },
3649		{ 0x4a, 0xffff, 0x8653 },
3650		{ 0x63, 0xffff, 0xab66 },
3651		{ 0x60, 0xffff, 0x9455 },
3652		{ 0x61, 0xffff, 0x99ff },
3653		{ 0x69, 0xffff, 0xfe04 },
3654	};
3655
3656	rtl_set_def_aspm_entry_latency(tp);
3657	rtl_ephy_init(tp, e_info_8125a_2);
3658	rtl_hw_start_8125_common(tp);
3659}
3660
3661static void rtl_hw_start_8125b(struct rtl8169_private *tp)
3662{
3663	static const struct ephy_info e_info_8125b[] = {
3664		{ 0x0b, 0xffff, 0xa908 },
3665		{ 0x1e, 0xffff, 0x20eb },
3666		{ 0x4b, 0xffff, 0xa908 },
3667		{ 0x5e, 0xffff, 0x20eb },
3668		{ 0x22, 0x0030, 0x0020 },
3669		{ 0x62, 0x0030, 0x0020 },
3670	};
3671
3672	rtl_set_def_aspm_entry_latency(tp);
3673	rtl_ephy_init(tp, e_info_8125b);
3674	rtl_hw_start_8125_common(tp);
3675}
3676
3677static void rtl_hw_config(struct rtl8169_private *tp)
3678{
3679	static const rtl_generic_fct hw_configs[] = {
3680		[RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
3681		[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
3682		[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
3683		[RTL_GIGA_MAC_VER_10] = NULL,
3684		[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
3685		[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
3686		[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
3687		[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
3688		[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
3689		[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
3690		[RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2,
3691		[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
3692		[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
3693		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
3694		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
3695		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
3696		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
3697		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
3698		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
3699		[RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
3700		[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
3701		[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
3702		[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
3703		[RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
3704		[RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
3705		[RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
3706		[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
3707		[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
3708		[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
3709		[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
3710		[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
3711		[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
3712		[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
3713		[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
3714		[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
3715		[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
3716		[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
3717		[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
3718		[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
3719	};
3720
3721	if (hw_configs[tp->mac_version])
3722		hw_configs[tp->mac_version](tp);
3723}
3724
3725static void rtl_hw_start_8125(struct rtl8169_private *tp)
3726{
3727	int i;
3728
3729	/* disable interrupt coalescing */
3730	for (i = 0xa00; i < 0xb00; i += 4)
3731		RTL_W32(tp, i, 0);
3732
3733	rtl_hw_config(tp);
3734}
3735
3736static void rtl_hw_start_8168(struct rtl8169_private *tp)
3737{
3738	if (rtl_is_8168evl_up(tp))
3739		RTL_W8(tp, MaxTxPacketSize, EarlySize);
3740	else
3741		RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
3742
3743	rtl_hw_config(tp);
3744
3745	/* disable interrupt coalescing */
3746	RTL_W16(tp, IntrMitigate, 0x0000);
3747}
3748
3749static void rtl_hw_start_8169(struct rtl8169_private *tp)
3750{
3751	RTL_W8(tp, EarlyTxThres, NoEarlyTx);
3752
3753	tp->cp_cmd |= PCIMulRW;
3754
3755	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3756	    tp->mac_version == RTL_GIGA_MAC_VER_03)
3757		tp->cp_cmd |= EnAnaPLL;
3758
3759	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3760
3761	rtl8169_set_magic_reg(tp);
3762
3763	/* disable interrupt coalescing */
3764	RTL_W16(tp, IntrMitigate, 0x0000);
3765}
3766
3767static void rtl_hw_start(struct  rtl8169_private *tp)
3768{
3769	rtl_unlock_config_regs(tp);
3770	/* disable aspm and clock request before ephy access */
3771	rtl_hw_aspm_clkreq_enable(tp, false);
3772	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3773
3774	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3775		rtl_hw_start_8169(tp);
3776	else if (rtl_is_8125(tp))
3777		rtl_hw_start_8125(tp);
3778	else
3779		rtl_hw_start_8168(tp);
3780
3781	rtl_enable_exit_l1(tp);
3782	rtl_hw_aspm_clkreq_enable(tp, true);
3783	rtl_set_rx_max_size(tp);
3784	rtl_set_rx_tx_desc_registers(tp);
3785	rtl_lock_config_regs(tp);
3786
3787	rtl_jumbo_config(tp);
3788
3789	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3790	rtl_pci_commit(tp);
3791
3792	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
3793	rtl_init_rxcfg(tp);
3794	rtl_set_tx_config_registers(tp);
3795	rtl_set_rx_config_features(tp, tp->dev->features);
3796	rtl_set_rx_mode(tp->dev);
3797	rtl_irq_enable(tp);
3798}
3799
3800static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3801{
3802	struct rtl8169_private *tp = netdev_priv(dev);
3803
3804	dev->mtu = new_mtu;
3805	netdev_update_features(dev);
3806	rtl_jumbo_config(tp);
3807
3808	switch (tp->mac_version) {
3809	case RTL_GIGA_MAC_VER_61:
3810	case RTL_GIGA_MAC_VER_63:
3811		rtl8125_set_eee_txidle_timer(tp);
3812		break;
3813	default:
3814		break;
3815	}
3816
3817	return 0;
3818}
3819
3820static void rtl8169_mark_to_asic(struct RxDesc *desc)
3821{
3822	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3823
3824	desc->opts2 = 0;
3825	/* Force memory writes to complete before releasing descriptor */
3826	dma_wmb();
3827	WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE));
3828}
3829
3830static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3831					  struct RxDesc *desc)
3832{
3833	struct device *d = tp_to_dev(tp);
3834	int node = dev_to_node(d);
3835	dma_addr_t mapping;
3836	struct page *data;
3837
3838	data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
3839	if (!data)
3840		return NULL;
3841
3842	mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3843	if (unlikely(dma_mapping_error(d, mapping))) {
3844		netdev_err(tp->dev, "Failed to map RX DMA!\n");
3845		__free_pages(data, get_order(R8169_RX_BUF_SIZE));
3846		return NULL;
3847	}
3848
3849	desc->addr = cpu_to_le64(mapping);
3850	rtl8169_mark_to_asic(desc);
3851
3852	return data;
3853}
3854
3855static void rtl8169_rx_clear(struct rtl8169_private *tp)
3856{
3857	int i;
3858
3859	for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
3860		dma_unmap_page(tp_to_dev(tp),
3861			       le64_to_cpu(tp->RxDescArray[i].addr),
3862			       R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3863		__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
3864		tp->Rx_databuff[i] = NULL;
3865		tp->RxDescArray[i].addr = 0;
3866		tp->RxDescArray[i].opts1 = 0;
3867	}
3868}
3869
3870static int rtl8169_rx_fill(struct rtl8169_private *tp)
3871{
3872	int i;
3873
3874	for (i = 0; i < NUM_RX_DESC; i++) {
3875		struct page *data;
3876
3877		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
3878		if (!data) {
3879			rtl8169_rx_clear(tp);
3880			return -ENOMEM;
3881		}
3882		tp->Rx_databuff[i] = data;
3883	}
3884
3885	/* mark as last descriptor in the ring */
3886	tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
3887
3888	return 0;
3889}
3890
3891static int rtl8169_init_ring(struct rtl8169_private *tp)
3892{
3893	rtl8169_init_ring_indexes(tp);
3894
3895	memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
3896	memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
3897
3898	return rtl8169_rx_fill(tp);
3899}
3900
3901static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
3902{
3903	struct ring_info *tx_skb = tp->tx_skb + entry;
3904	struct TxDesc *desc = tp->TxDescArray + entry;
3905
3906	dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
3907			 DMA_TO_DEVICE);
3908	memset(desc, 0, sizeof(*desc));
3909	memset(tx_skb, 0, sizeof(*tx_skb));
3910}
3911
3912static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
3913				   unsigned int n)
3914{
3915	unsigned int i;
3916
3917	for (i = 0; i < n; i++) {
3918		unsigned int entry = (start + i) % NUM_TX_DESC;
3919		struct ring_info *tx_skb = tp->tx_skb + entry;
3920		unsigned int len = tx_skb->len;
3921
3922		if (len) {
3923			struct sk_buff *skb = tx_skb->skb;
3924
3925			rtl8169_unmap_tx_skb(tp, entry);
3926			if (skb)
3927				dev_consume_skb_any(skb);
3928		}
3929	}
3930}
3931
3932static void rtl8169_tx_clear(struct rtl8169_private *tp)
3933{
3934	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
3935	netdev_reset_queue(tp->dev);
3936}
3937
3938static void rtl8169_cleanup(struct rtl8169_private *tp)
3939{
3940	napi_disable(&tp->napi);
3941
3942	/* Give a racing hard_start_xmit a few cycles to complete. */
3943	synchronize_net();
3944
3945	/* Disable interrupts */
3946	rtl8169_irq_mask_and_ack(tp);
3947
3948	rtl_rx_close(tp);
3949
3950	switch (tp->mac_version) {
3951	case RTL_GIGA_MAC_VER_28:
3952	case RTL_GIGA_MAC_VER_31:
3953		rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
3954		break;
3955	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
3956		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
3957		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
3958		break;
3959	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
3960		rtl_enable_rxdvgate(tp);
3961		fsleep(2000);
3962		break;
3963	default:
3964		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
3965		fsleep(100);
3966		break;
3967	}
3968
3969	rtl_hw_reset(tp);
3970
3971	rtl8169_tx_clear(tp);
3972	rtl8169_init_ring_indexes(tp);
3973}
3974
3975static void rtl_reset_work(struct rtl8169_private *tp)
3976{
3977	int i;
3978
3979	netif_stop_queue(tp->dev);
3980
3981	rtl8169_cleanup(tp);
3982
3983	for (i = 0; i < NUM_RX_DESC; i++)
3984		rtl8169_mark_to_asic(tp->RxDescArray + i);
3985
3986	napi_enable(&tp->napi);
3987	rtl_hw_start(tp);
3988}
3989
3990static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
3991{
3992	struct rtl8169_private *tp = netdev_priv(dev);
3993
3994	rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
3995}
3996
3997static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
3998			  void *addr, unsigned int entry, bool desc_own)
3999{
4000	struct TxDesc *txd = tp->TxDescArray + entry;
4001	struct device *d = tp_to_dev(tp);
4002	dma_addr_t mapping;
4003	u32 opts1;
4004	int ret;
4005
4006	mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4007	ret = dma_mapping_error(d, mapping);
4008	if (unlikely(ret)) {
4009		if (net_ratelimit())
4010			netdev_err(tp->dev, "Failed to map TX data!\n");
4011		return ret;
4012	}
4013
4014	txd->addr = cpu_to_le64(mapping);
4015	txd->opts2 = cpu_to_le32(opts[1]);
4016
4017	opts1 = opts[0] | len;
4018	if (entry == NUM_TX_DESC - 1)
4019		opts1 |= RingEnd;
4020	if (desc_own)
4021		opts1 |= DescOwn;
4022	txd->opts1 = cpu_to_le32(opts1);
4023
4024	tp->tx_skb[entry].len = len;
4025
4026	return 0;
4027}
4028
4029static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4030			      const u32 *opts, unsigned int entry)
4031{
4032	struct skb_shared_info *info = skb_shinfo(skb);
4033	unsigned int cur_frag;
4034
4035	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
4036		const skb_frag_t *frag = info->frags + cur_frag;
4037		void *addr = skb_frag_address(frag);
4038		u32 len = skb_frag_size(frag);
4039
4040		entry = (entry + 1) % NUM_TX_DESC;
4041
4042		if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
4043			goto err_out;
4044	}
4045
4046	return 0;
4047
4048err_out:
4049	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4050	return -EIO;
4051}
4052
4053static bool rtl_skb_is_udp(struct sk_buff *skb)
4054{
4055	int no = skb_network_offset(skb);
4056	struct ipv6hdr *i6h, _i6h;
4057	struct iphdr *ih, _ih;
4058
4059	switch (vlan_get_protocol(skb)) {
4060	case htons(ETH_P_IP):
4061		ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
4062		return ih && ih->protocol == IPPROTO_UDP;
4063	case htons(ETH_P_IPV6):
4064		i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
4065		return i6h && i6h->nexthdr == IPPROTO_UDP;
4066	default:
4067		return false;
4068	}
4069}
4070
4071#define RTL_MIN_PATCH_LEN	47
4072
4073/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
4074static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
4075					    struct sk_buff *skb)
4076{
4077	unsigned int padto = 0, len = skb->len;
4078
4079	if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
4080	    rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
4081		unsigned int trans_data_len = skb_tail_pointer(skb) -
4082					      skb_transport_header(skb);
4083
4084		if (trans_data_len >= offsetof(struct udphdr, len) &&
4085		    trans_data_len < RTL_MIN_PATCH_LEN) {
4086			u16 dest = ntohs(udp_hdr(skb)->dest);
4087
4088			/* dest is a standard PTP port */
4089			if (dest == 319 || dest == 320)
4090				padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
4091		}
4092
4093		if (trans_data_len < sizeof(struct udphdr))
4094			padto = max_t(unsigned int, padto,
4095				      len + sizeof(struct udphdr) - trans_data_len);
4096	}
4097
4098	return padto;
4099}
4100
4101static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
4102					   struct sk_buff *skb)
4103{
4104	unsigned int padto;
4105
4106	padto = rtl8125_quirk_udp_padto(tp, skb);
4107
4108	switch (tp->mac_version) {
4109	case RTL_GIGA_MAC_VER_34:
4110	case RTL_GIGA_MAC_VER_61:
4111	case RTL_GIGA_MAC_VER_63:
4112		padto = max_t(unsigned int, padto, ETH_ZLEN);
4113		break;
4114	default:
4115		break;
4116	}
4117
4118	return padto;
4119}
4120
4121static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
4122{
4123	u32 mss = skb_shinfo(skb)->gso_size;
4124
4125	if (mss) {
4126		opts[0] |= TD_LSO;
4127		opts[0] |= mss << TD0_MSS_SHIFT;
4128	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4129		const struct iphdr *ip = ip_hdr(skb);
4130
4131		if (ip->protocol == IPPROTO_TCP)
4132			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
4133		else if (ip->protocol == IPPROTO_UDP)
4134			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
4135		else
4136			WARN_ON_ONCE(1);
4137	}
4138}
4139
4140static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
4141				struct sk_buff *skb, u32 *opts)
4142{
4143	struct skb_shared_info *shinfo = skb_shinfo(skb);
4144	u32 mss = shinfo->gso_size;
4145
4146	if (mss) {
4147		if (shinfo->gso_type & SKB_GSO_TCPV4) {
4148			opts[0] |= TD1_GTSENV4;
4149		} else if (shinfo->gso_type & SKB_GSO_TCPV6) {
4150			if (skb_cow_head(skb, 0))
4151				return false;
4152
4153			tcp_v6_gso_csum_prep(skb);
4154			opts[0] |= TD1_GTSENV6;
4155		} else {
4156			WARN_ON_ONCE(1);
4157		}
4158
4159		opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
4160		opts[1] |= mss << TD1_MSS_SHIFT;
4161	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4162		u8 ip_protocol;
4163
4164		switch (vlan_get_protocol(skb)) {
4165		case htons(ETH_P_IP):
4166			opts[1] |= TD1_IPv4_CS;
4167			ip_protocol = ip_hdr(skb)->protocol;
4168			break;
4169
4170		case htons(ETH_P_IPV6):
4171			opts[1] |= TD1_IPv6_CS;
4172			ip_protocol = ipv6_hdr(skb)->nexthdr;
4173			break;
4174
4175		default:
4176			ip_protocol = IPPROTO_RAW;
4177			break;
4178		}
4179
4180		if (ip_protocol == IPPROTO_TCP)
4181			opts[1] |= TD1_TCP_CS;
4182		else if (ip_protocol == IPPROTO_UDP)
4183			opts[1] |= TD1_UDP_CS;
4184		else
4185			WARN_ON_ONCE(1);
4186
4187		opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
4188	} else {
4189		unsigned int padto = rtl_quirk_packet_padto(tp, skb);
4190
4191		/* skb_padto would free the skb on error */
4192		return !__skb_put_padto(skb, padto, false);
4193	}
4194
4195	return true;
4196}
4197
4198static unsigned int rtl_tx_slots_avail(struct rtl8169_private *tp)
4199{
4200	return READ_ONCE(tp->dirty_tx) + NUM_TX_DESC - READ_ONCE(tp->cur_tx);
4201}
4202
4203/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
4204static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
4205{
4206	switch (tp->mac_version) {
4207	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
4208	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
4209		return false;
4210	default:
4211		return true;
4212	}
4213}
4214
4215static void rtl8169_doorbell(struct rtl8169_private *tp)
4216{
4217	if (rtl_is_8125(tp))
4218		RTL_W16(tp, TxPoll_8125, BIT(0));
4219	else
4220		RTL_W8(tp, TxPoll, NPQ);
4221}
4222
4223static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4224				      struct net_device *dev)
4225{
4226	unsigned int frags = skb_shinfo(skb)->nr_frags;
4227	struct rtl8169_private *tp = netdev_priv(dev);
4228	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4229	struct TxDesc *txd_first, *txd_last;
4230	bool stop_queue, door_bell;
4231	u32 opts[2];
4232
4233	if (unlikely(!rtl_tx_slots_avail(tp))) {
4234		if (net_ratelimit())
4235			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
4236		goto err_stop_0;
4237	}
4238
4239	opts[1] = rtl8169_tx_vlan_tag(skb);
4240	opts[0] = 0;
4241
4242	if (!rtl_chip_supports_csum_v2(tp))
4243		rtl8169_tso_csum_v1(skb, opts);
4244	else if (!rtl8169_tso_csum_v2(tp, skb, opts))
4245		goto err_dma_0;
4246
4247	if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
4248				    entry, false)))
4249		goto err_dma_0;
4250
4251	txd_first = tp->TxDescArray + entry;
4252
4253	if (frags) {
4254		if (rtl8169_xmit_frags(tp, skb, opts, entry))
4255			goto err_dma_1;
4256		entry = (entry + frags) % NUM_TX_DESC;
4257	}
4258
4259	txd_last = tp->TxDescArray + entry;
4260	txd_last->opts1 |= cpu_to_le32(LastFrag);
4261	tp->tx_skb[entry].skb = skb;
4262
4263	skb_tx_timestamp(skb);
4264
4265	/* Force memory writes to complete before releasing descriptor */
4266	dma_wmb();
4267
4268	door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
4269
4270	txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
4271
4272	/* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
4273	smp_wmb();
4274
4275	WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
4276
4277	stop_queue = !netif_subqueue_maybe_stop(dev, 0, rtl_tx_slots_avail(tp),
4278						R8169_TX_STOP_THRS,
4279						R8169_TX_START_THRS);
4280	if (door_bell || stop_queue)
4281		rtl8169_doorbell(tp);
4282
4283	return NETDEV_TX_OK;
4284
4285err_dma_1:
4286	rtl8169_unmap_tx_skb(tp, entry);
4287err_dma_0:
4288	dev_kfree_skb_any(skb);
4289	dev->stats.tx_dropped++;
4290	return NETDEV_TX_OK;
4291
4292err_stop_0:
4293	netif_stop_queue(dev);
4294	dev->stats.tx_dropped++;
4295	return NETDEV_TX_BUSY;
4296}
4297
4298static unsigned int rtl_last_frag_len(struct sk_buff *skb)
4299{
4300	struct skb_shared_info *info = skb_shinfo(skb);
4301	unsigned int nr_frags = info->nr_frags;
4302
4303	if (!nr_frags)
4304		return UINT_MAX;
4305
4306	return skb_frag_size(info->frags + nr_frags - 1);
4307}
4308
4309/* Workaround for hw issues with TSO on RTL8168evl */
4310static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
4311					    netdev_features_t features)
4312{
4313	/* IPv4 header has options field */
4314	if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
4315	    ip_hdrlen(skb) > sizeof(struct iphdr))
4316		features &= ~NETIF_F_ALL_TSO;
4317
4318	/* IPv4 TCP header has options field */
4319	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
4320		 tcp_hdrlen(skb) > sizeof(struct tcphdr))
4321		features &= ~NETIF_F_ALL_TSO;
4322
4323	else if (rtl_last_frag_len(skb) <= 6)
4324		features &= ~NETIF_F_ALL_TSO;
4325
4326	return features;
4327}
4328
4329static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
4330						struct net_device *dev,
4331						netdev_features_t features)
4332{
4333	struct rtl8169_private *tp = netdev_priv(dev);
4334
4335	if (skb_is_gso(skb)) {
4336		if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4337			features = rtl8168evl_fix_tso(skb, features);
4338
4339		if (skb_transport_offset(skb) > GTTCPHO_MAX &&
4340		    rtl_chip_supports_csum_v2(tp))
4341			features &= ~NETIF_F_ALL_TSO;
4342	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4343		/* work around hw bug on some chip versions */
4344		if (skb->len < ETH_ZLEN)
4345			features &= ~NETIF_F_CSUM_MASK;
4346
4347		if (rtl_quirk_packet_padto(tp, skb))
4348			features &= ~NETIF_F_CSUM_MASK;
4349
4350		if (skb_transport_offset(skb) > TCPHO_MAX &&
4351		    rtl_chip_supports_csum_v2(tp))
4352			features &= ~NETIF_F_CSUM_MASK;
4353	}
4354
4355	return vlan_features_check(skb, features);
4356}
4357
4358static void rtl8169_pcierr_interrupt(struct net_device *dev)
4359{
4360	struct rtl8169_private *tp = netdev_priv(dev);
4361	struct pci_dev *pdev = tp->pci_dev;
4362	int pci_status_errs;
4363	u16 pci_cmd;
4364
4365	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4366
4367	pci_status_errs = pci_status_get_and_clear_errors(pdev);
4368
4369	if (net_ratelimit())
4370		netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
4371			   pci_cmd, pci_status_errs);
4372
4373	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4374}
4375
4376static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
4377		   int budget)
4378{
4379	unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0;
4380	struct sk_buff *skb;
4381
4382	dirty_tx = tp->dirty_tx;
4383
4384	while (READ_ONCE(tp->cur_tx) != dirty_tx) {
4385		unsigned int entry = dirty_tx % NUM_TX_DESC;
4386		u32 status;
4387
4388		status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
4389		if (status & DescOwn)
4390			break;
4391
4392		skb = tp->tx_skb[entry].skb;
4393		rtl8169_unmap_tx_skb(tp, entry);
4394
4395		if (skb) {
4396			pkts_compl++;
4397			bytes_compl += skb->len;
4398			napi_consume_skb(skb, budget);
4399		}
4400		dirty_tx++;
4401	}
4402
4403	if (tp->dirty_tx != dirty_tx) {
4404		dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
4405		WRITE_ONCE(tp->dirty_tx, dirty_tx);
4406
4407		netif_subqueue_completed_wake(dev, 0, pkts_compl, bytes_compl,
4408					      rtl_tx_slots_avail(tp),
4409					      R8169_TX_START_THRS);
4410		/*
4411		 * 8168 hack: TxPoll requests are lost when the Tx packets are
4412		 * too close. Let's kick an extra TxPoll request when a burst
4413		 * of start_xmit activity is detected (if it is not detected,
4414		 * it is slow enough). -- FR
4415		 * If skb is NULL then we come here again once a tx irq is
4416		 * triggered after the last fragment is marked transmitted.
4417		 */
4418		if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
4419			rtl8169_doorbell(tp);
4420	}
4421}
4422
4423static inline int rtl8169_fragmented_frame(u32 status)
4424{
4425	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4426}
4427
4428static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4429{
4430	u32 status = opts1 & (RxProtoMask | RxCSFailMask);
4431
4432	if (status == RxProtoTCP || status == RxProtoUDP)
4433		skb->ip_summed = CHECKSUM_UNNECESSARY;
4434	else
4435		skb_checksum_none_assert(skb);
4436}
4437
4438static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget)
4439{
4440	struct device *d = tp_to_dev(tp);
4441	int count;
4442
4443	for (count = 0; count < budget; count++, tp->cur_rx++) {
4444		unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC;
4445		struct RxDesc *desc = tp->RxDescArray + entry;
4446		struct sk_buff *skb;
4447		const void *rx_buf;
4448		dma_addr_t addr;
4449		u32 status;
4450
4451		status = le32_to_cpu(READ_ONCE(desc->opts1));
4452		if (status & DescOwn)
4453			break;
4454
4455		/* This barrier is needed to keep us from reading
4456		 * any other fields out of the Rx descriptor until
4457		 * we know the status of DescOwn
4458		 */
4459		dma_rmb();
4460
4461		if (unlikely(status & RxRES)) {
4462			if (net_ratelimit())
4463				netdev_warn(dev, "Rx ERROR. status = %08x\n",
4464					    status);
4465			dev->stats.rx_errors++;
4466			if (status & (RxRWT | RxRUNT))
4467				dev->stats.rx_length_errors++;
4468			if (status & RxCRC)
4469				dev->stats.rx_crc_errors++;
4470
4471			if (!(dev->features & NETIF_F_RXALL))
4472				goto release_descriptor;
4473			else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
4474				goto release_descriptor;
4475		}
4476
4477		pkt_size = status & GENMASK(13, 0);
4478		if (likely(!(dev->features & NETIF_F_RXFCS)))
4479			pkt_size -= ETH_FCS_LEN;
4480
4481		/* The driver does not support incoming fragmented frames.
4482		 * They are seen as a symptom of over-mtu sized frames.
4483		 */
4484		if (unlikely(rtl8169_fragmented_frame(status))) {
4485			dev->stats.rx_dropped++;
4486			dev->stats.rx_length_errors++;
4487			goto release_descriptor;
4488		}
4489
4490		skb = napi_alloc_skb(&tp->napi, pkt_size);
4491		if (unlikely(!skb)) {
4492			dev->stats.rx_dropped++;
4493			goto release_descriptor;
4494		}
4495
4496		addr = le64_to_cpu(desc->addr);
4497		rx_buf = page_address(tp->Rx_databuff[entry]);
4498
4499		dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
4500		prefetch(rx_buf);
4501		skb_copy_to_linear_data(skb, rx_buf, pkt_size);
4502		skb->tail += pkt_size;
4503		skb->len = pkt_size;
4504		dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
4505
4506		rtl8169_rx_csum(skb, status);
4507		skb->protocol = eth_type_trans(skb, dev);
4508
4509		rtl8169_rx_vlan_tag(desc, skb);
4510
4511		if (skb->pkt_type == PACKET_MULTICAST)
4512			dev->stats.multicast++;
4513
4514		napi_gro_receive(&tp->napi, skb);
4515
4516		dev_sw_netstats_rx_add(dev, pkt_size);
4517release_descriptor:
4518		rtl8169_mark_to_asic(desc);
4519	}
4520
4521	return count;
4522}
4523
4524static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4525{
4526	struct rtl8169_private *tp = dev_instance;
4527	u32 status = rtl_get_events(tp);
4528
4529	if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
4530		return IRQ_NONE;
4531
4532	if (unlikely(status & SYSErr)) {
4533		rtl8169_pcierr_interrupt(tp->dev);
4534		goto out;
4535	}
4536
4537	if (status & LinkChg)
4538		phy_mac_interrupt(tp->phydev);
4539
4540	if (unlikely(status & RxFIFOOver &&
4541	    tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4542		netif_stop_queue(tp->dev);
4543		rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4544	}
4545
4546	if (napi_schedule_prep(&tp->napi)) {
4547		rtl_irq_disable(tp);
4548		__napi_schedule(&tp->napi);
4549	}
4550out:
4551	rtl_ack_events(tp, status);
4552
4553	return IRQ_HANDLED;
4554}
4555
4556static void rtl_task(struct work_struct *work)
4557{
4558	struct rtl8169_private *tp =
4559		container_of(work, struct rtl8169_private, wk.work);
4560	int ret;
4561
4562	rtnl_lock();
4563
4564	if (!netif_running(tp->dev) ||
4565	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
4566		goto out_unlock;
4567
4568	if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
4569		/* if chip isn't accessible, reset bus to revive it */
4570		if (RTL_R32(tp, TxConfig) == ~0) {
4571			ret = pci_reset_bus(tp->pci_dev);
4572			if (ret < 0) {
4573				netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
4574				netif_device_detach(tp->dev);
4575				goto out_unlock;
4576			}
4577		}
4578
4579		/* ASPM compatibility issues are a typical reason for tx timeouts */
4580		ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
4581							  PCIE_LINK_STATE_L0S);
4582		if (!ret)
4583			netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
4584		goto reset;
4585	}
4586
4587	if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
4588reset:
4589		rtl_reset_work(tp);
4590		netif_wake_queue(tp->dev);
4591	} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
4592		rtl_reset_work(tp);
4593	}
4594out_unlock:
4595	rtnl_unlock();
4596}
4597
4598static int rtl8169_poll(struct napi_struct *napi, int budget)
4599{
4600	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4601	struct net_device *dev = tp->dev;
4602	int work_done;
4603
4604	rtl_tx(dev, tp, budget);
4605
4606	work_done = rtl_rx(dev, tp, budget);
4607
4608	if (work_done < budget && napi_complete_done(napi, work_done))
4609		rtl_irq_enable(tp);
4610
4611	return work_done;
4612}
4613
4614static void r8169_phylink_handler(struct net_device *ndev)
4615{
4616	struct rtl8169_private *tp = netdev_priv(ndev);
4617	struct device *d = tp_to_dev(tp);
4618
4619	if (netif_carrier_ok(ndev)) {
4620		rtl_link_chg_patch(tp);
4621		pm_request_resume(d);
4622		netif_wake_queue(tp->dev);
4623	} else {
4624		/* In few cases rx is broken after link-down otherwise */
4625		if (rtl_is_8125(tp))
4626			rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
4627		pm_runtime_idle(d);
4628	}
4629
4630	phy_print_status(tp->phydev);
4631}
4632
4633static int r8169_phy_connect(struct rtl8169_private *tp)
4634{
4635	struct phy_device *phydev = tp->phydev;
4636	phy_interface_t phy_mode;
4637	int ret;
4638
4639	phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
4640		   PHY_INTERFACE_MODE_MII;
4641
4642	ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
4643				 phy_mode);
4644	if (ret)
4645		return ret;
4646
4647	if (!tp->supports_gmii)
4648		phy_set_max_speed(phydev, SPEED_100);
4649
4650	phy_attached_info(phydev);
4651
4652	return 0;
4653}
4654
4655static void rtl8169_down(struct rtl8169_private *tp)
4656{
4657	/* Clear all task flags */
4658	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
4659
4660	phy_stop(tp->phydev);
4661
4662	rtl8169_update_counters(tp);
4663
4664	pci_clear_master(tp->pci_dev);
4665	rtl_pci_commit(tp);
4666
4667	rtl8169_cleanup(tp);
4668	rtl_disable_exit_l1(tp);
4669	rtl_prepare_power_down(tp);
4670
4671	if (tp->dash_type != RTL_DASH_NONE)
4672		rtl8168_driver_stop(tp);
4673}
4674
4675static void rtl8169_up(struct rtl8169_private *tp)
4676{
4677	if (tp->dash_type != RTL_DASH_NONE)
4678		rtl8168_driver_start(tp);
4679
4680	pci_set_master(tp->pci_dev);
4681	phy_init_hw(tp->phydev);
4682	phy_resume(tp->phydev);
4683	rtl8169_init_phy(tp);
4684	napi_enable(&tp->napi);
4685	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
4686	rtl_reset_work(tp);
4687
4688	phy_start(tp->phydev);
4689}
4690
4691static int rtl8169_close(struct net_device *dev)
4692{
4693	struct rtl8169_private *tp = netdev_priv(dev);
4694	struct pci_dev *pdev = tp->pci_dev;
4695
4696	pm_runtime_get_sync(&pdev->dev);
4697
4698	netif_stop_queue(dev);
4699	rtl8169_down(tp);
4700	rtl8169_rx_clear(tp);
4701
4702	cancel_work(&tp->wk.work);
4703
4704	free_irq(tp->irq, tp);
4705
4706	phy_disconnect(tp->phydev);
4707
4708	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4709			  tp->RxPhyAddr);
4710	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4711			  tp->TxPhyAddr);
4712	tp->TxDescArray = NULL;
4713	tp->RxDescArray = NULL;
4714
4715	pm_runtime_put_sync(&pdev->dev);
4716
4717	return 0;
4718}
4719
4720#ifdef CONFIG_NET_POLL_CONTROLLER
4721static void rtl8169_netpoll(struct net_device *dev)
4722{
4723	struct rtl8169_private *tp = netdev_priv(dev);
4724
4725	rtl8169_interrupt(tp->irq, tp);
4726}
4727#endif
4728
4729static int rtl_open(struct net_device *dev)
4730{
4731	struct rtl8169_private *tp = netdev_priv(dev);
4732	struct pci_dev *pdev = tp->pci_dev;
4733	unsigned long irqflags;
4734	int retval = -ENOMEM;
4735
4736	pm_runtime_get_sync(&pdev->dev);
4737
4738	/*
4739	 * Rx and Tx descriptors needs 256 bytes alignment.
4740	 * dma_alloc_coherent provides more.
4741	 */
4742	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
4743					     &tp->TxPhyAddr, GFP_KERNEL);
4744	if (!tp->TxDescArray)
4745		goto out;
4746
4747	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
4748					     &tp->RxPhyAddr, GFP_KERNEL);
4749	if (!tp->RxDescArray)
4750		goto err_free_tx_0;
4751
4752	retval = rtl8169_init_ring(tp);
4753	if (retval < 0)
4754		goto err_free_rx_1;
4755
4756	rtl_request_firmware(tp);
4757
4758	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
4759	retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp);
4760	if (retval < 0)
4761		goto err_release_fw_2;
4762
4763	retval = r8169_phy_connect(tp);
4764	if (retval)
4765		goto err_free_irq;
4766
4767	rtl8169_up(tp);
4768	rtl8169_init_counter_offsets(tp);
4769	netif_start_queue(dev);
4770out:
4771	pm_runtime_put_sync(&pdev->dev);
4772
4773	return retval;
4774
4775err_free_irq:
4776	free_irq(tp->irq, tp);
4777err_release_fw_2:
4778	rtl_release_firmware(tp);
4779	rtl8169_rx_clear(tp);
4780err_free_rx_1:
4781	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4782			  tp->RxPhyAddr);
4783	tp->RxDescArray = NULL;
4784err_free_tx_0:
4785	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4786			  tp->TxPhyAddr);
4787	tp->TxDescArray = NULL;
4788	goto out;
4789}
4790
4791static void
4792rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4793{
4794	struct rtl8169_private *tp = netdev_priv(dev);
4795	struct pci_dev *pdev = tp->pci_dev;
4796	struct rtl8169_counters *counters = tp->counters;
4797
4798	pm_runtime_get_noresume(&pdev->dev);
4799
4800	netdev_stats_to_stats64(stats, &dev->stats);
4801	dev_fetch_sw_netstats(stats, dev->tstats);
4802
4803	/*
4804	 * Fetch additional counter values missing in stats collected by driver
4805	 * from tally counters.
4806	 */
4807	if (pm_runtime_active(&pdev->dev))
4808		rtl8169_update_counters(tp);
4809
4810	/*
4811	 * Subtract values fetched during initalization.
4812	 * See rtl8169_init_counter_offsets for a description why we do that.
4813	 */
4814	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
4815		le64_to_cpu(tp->tc_offset.tx_errors);
4816	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
4817		le32_to_cpu(tp->tc_offset.tx_multi_collision);
4818	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
4819		le16_to_cpu(tp->tc_offset.tx_aborted);
4820	stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
4821		le16_to_cpu(tp->tc_offset.rx_missed);
4822
4823	pm_runtime_put_noidle(&pdev->dev);
4824}
4825
4826static void rtl8169_net_suspend(struct rtl8169_private *tp)
4827{
4828	netif_device_detach(tp->dev);
4829
4830	if (netif_running(tp->dev))
4831		rtl8169_down(tp);
4832}
4833
4834static int rtl8169_runtime_resume(struct device *dev)
4835{
4836	struct rtl8169_private *tp = dev_get_drvdata(dev);
4837
4838	rtl_rar_set(tp, tp->dev->dev_addr);
4839	__rtl8169_set_wol(tp, tp->saved_wolopts);
4840
4841	if (tp->TxDescArray)
4842		rtl8169_up(tp);
4843
4844	netif_device_attach(tp->dev);
4845
4846	return 0;
4847}
4848
4849static int rtl8169_suspend(struct device *device)
4850{
4851	struct rtl8169_private *tp = dev_get_drvdata(device);
4852
4853	rtnl_lock();
4854	rtl8169_net_suspend(tp);
4855	if (!device_may_wakeup(tp_to_dev(tp)))
4856		clk_disable_unprepare(tp->clk);
4857	rtnl_unlock();
4858
4859	return 0;
4860}
4861
4862static int rtl8169_resume(struct device *device)
4863{
4864	struct rtl8169_private *tp = dev_get_drvdata(device);
4865
4866	if (!device_may_wakeup(tp_to_dev(tp)))
4867		clk_prepare_enable(tp->clk);
4868
4869	/* Reportedly at least Asus X453MA truncates packets otherwise */
4870	if (tp->mac_version == RTL_GIGA_MAC_VER_37)
4871		rtl_init_rxcfg(tp);
4872
4873	return rtl8169_runtime_resume(device);
4874}
4875
4876static int rtl8169_runtime_suspend(struct device *device)
4877{
4878	struct rtl8169_private *tp = dev_get_drvdata(device);
4879
4880	if (!tp->TxDescArray) {
4881		netif_device_detach(tp->dev);
4882		return 0;
4883	}
4884
4885	rtnl_lock();
4886	__rtl8169_set_wol(tp, WAKE_PHY);
4887	rtl8169_net_suspend(tp);
4888	rtnl_unlock();
4889
4890	return 0;
4891}
4892
4893static int rtl8169_runtime_idle(struct device *device)
4894{
4895	struct rtl8169_private *tp = dev_get_drvdata(device);
4896
4897	if (tp->dash_enabled)
4898		return -EBUSY;
4899
4900	if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
4901		pm_schedule_suspend(device, 10000);
4902
4903	return -EBUSY;
4904}
4905
4906static const struct dev_pm_ops rtl8169_pm_ops = {
4907	SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
4908	RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
4909		       rtl8169_runtime_idle)
4910};
4911
4912static void rtl_shutdown(struct pci_dev *pdev)
4913{
4914	struct rtl8169_private *tp = pci_get_drvdata(pdev);
4915
4916	rtnl_lock();
4917	rtl8169_net_suspend(tp);
4918	rtnl_unlock();
4919
4920	/* Restore original MAC address */
4921	rtl_rar_set(tp, tp->dev->perm_addr);
4922
4923	if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
4924		pci_wake_from_d3(pdev, tp->saved_wolopts);
4925		pci_set_power_state(pdev, PCI_D3hot);
4926	}
4927}
4928
4929static void rtl_remove_one(struct pci_dev *pdev)
4930{
4931	struct rtl8169_private *tp = pci_get_drvdata(pdev);
4932
4933	if (pci_dev_run_wake(pdev))
4934		pm_runtime_get_noresume(&pdev->dev);
4935
4936	cancel_work_sync(&tp->wk.work);
4937
4938	unregister_netdev(tp->dev);
4939
4940	if (tp->dash_type != RTL_DASH_NONE)
4941		rtl8168_driver_stop(tp);
4942
4943	rtl_release_firmware(tp);
4944
4945	/* restore original MAC address */
4946	rtl_rar_set(tp, tp->dev->perm_addr);
4947}
4948
4949static const struct net_device_ops rtl_netdev_ops = {
4950	.ndo_open		= rtl_open,
4951	.ndo_stop		= rtl8169_close,
4952	.ndo_get_stats64	= rtl8169_get_stats64,
4953	.ndo_start_xmit		= rtl8169_start_xmit,
4954	.ndo_features_check	= rtl8169_features_check,
4955	.ndo_tx_timeout		= rtl8169_tx_timeout,
4956	.ndo_validate_addr	= eth_validate_addr,
4957	.ndo_change_mtu		= rtl8169_change_mtu,
4958	.ndo_fix_features	= rtl8169_fix_features,
4959	.ndo_set_features	= rtl8169_set_features,
4960	.ndo_set_mac_address	= rtl_set_mac_address,
4961	.ndo_eth_ioctl		= phy_do_ioctl_running,
4962	.ndo_set_rx_mode	= rtl_set_rx_mode,
4963#ifdef CONFIG_NET_POLL_CONTROLLER
4964	.ndo_poll_controller	= rtl8169_netpoll,
4965#endif
4966
4967};
4968
4969static void rtl_set_irq_mask(struct rtl8169_private *tp)
4970{
4971	tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
4972
4973	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
4974		tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
4975	else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
4976		/* special workaround needed */
4977		tp->irq_mask |= RxFIFOOver;
4978	else
4979		tp->irq_mask |= RxOverflow;
4980}
4981
4982static int rtl_alloc_irq(struct rtl8169_private *tp)
4983{
4984	unsigned int flags;
4985
4986	switch (tp->mac_version) {
4987	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
4988		rtl_unlock_config_regs(tp);
4989		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
4990		rtl_lock_config_regs(tp);
4991		fallthrough;
4992	case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
4993		flags = PCI_IRQ_LEGACY;
4994		break;
4995	default:
4996		flags = PCI_IRQ_ALL_TYPES;
4997		break;
4998	}
4999
5000	return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
5001}
5002
5003static void rtl_read_mac_address(struct rtl8169_private *tp,
5004				 u8 mac_addr[ETH_ALEN])
5005{
5006	/* Get MAC address */
5007	if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
5008		u32 value;
5009
5010		value = rtl_eri_read(tp, 0xe0);
5011		put_unaligned_le32(value, mac_addr);
5012		value = rtl_eri_read(tp, 0xe4);
5013		put_unaligned_le16(value, mac_addr + 4);
5014	} else if (rtl_is_8125(tp)) {
5015		rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
5016	}
5017}
5018
5019DECLARE_RTL_COND(rtl_link_list_ready_cond)
5020{
5021	return RTL_R8(tp, MCU) & LINK_LIST_RDY;
5022}
5023
5024static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
5025{
5026	rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
5027}
5028
5029static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
5030{
5031	struct rtl8169_private *tp = mii_bus->priv;
5032
5033	if (phyaddr > 0)
5034		return -ENODEV;
5035
5036	return rtl_readphy(tp, phyreg);
5037}
5038
5039static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
5040				int phyreg, u16 val)
5041{
5042	struct rtl8169_private *tp = mii_bus->priv;
5043
5044	if (phyaddr > 0)
5045		return -ENODEV;
5046
5047	rtl_writephy(tp, phyreg, val);
5048
5049	return 0;
5050}
5051
5052static int r8169_mdio_register(struct rtl8169_private *tp)
5053{
5054	struct pci_dev *pdev = tp->pci_dev;
5055	struct mii_bus *new_bus;
5056	int ret;
5057
5058	new_bus = devm_mdiobus_alloc(&pdev->dev);
5059	if (!new_bus)
5060		return -ENOMEM;
5061
5062	new_bus->name = "r8169";
5063	new_bus->priv = tp;
5064	new_bus->parent = &pdev->dev;
5065	new_bus->irq[0] = PHY_MAC_INTERRUPT;
5066	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
5067		 pci_domain_nr(pdev->bus), pci_dev_id(pdev));
5068
5069	new_bus->read = r8169_mdio_read_reg;
5070	new_bus->write = r8169_mdio_write_reg;
5071
5072	ret = devm_mdiobus_register(&pdev->dev, new_bus);
5073	if (ret)
5074		return ret;
5075
5076	tp->phydev = mdiobus_get_phy(new_bus, 0);
5077	if (!tp->phydev) {
5078		return -ENODEV;
5079	} else if (!tp->phydev->drv) {
5080		/* Most chip versions fail with the genphy driver.
5081		 * Therefore ensure that the dedicated PHY driver is loaded.
5082		 */
5083		dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
5084			tp->phydev->phy_id);
5085		return -EUNATCH;
5086	}
5087
5088	tp->phydev->mac_managed_pm = true;
5089
5090	phy_support_asym_pause(tp->phydev);
5091
5092	/* PHY will be woken up in rtl_open() */
5093	phy_suspend(tp->phydev);
5094
5095	return 0;
5096}
5097
5098static void rtl_hw_init_8168g(struct rtl8169_private *tp)
5099{
5100	rtl_enable_rxdvgate(tp);
5101
5102	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5103	msleep(1);
5104	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5105
5106	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5107	r8168g_wait_ll_share_fifo_ready(tp);
5108
5109	r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
5110	r8168g_wait_ll_share_fifo_ready(tp);
5111}
5112
5113static void rtl_hw_init_8125(struct rtl8169_private *tp)
5114{
5115	rtl_enable_rxdvgate(tp);
5116
5117	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5118	msleep(1);
5119	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5120
5121	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5122	r8168g_wait_ll_share_fifo_ready(tp);
5123
5124	r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
5125	r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
5126	r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
5127	r8168g_wait_ll_share_fifo_ready(tp);
5128}
5129
5130static void rtl_hw_initialize(struct rtl8169_private *tp)
5131{
5132	switch (tp->mac_version) {
5133	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
5134		rtl8168ep_stop_cmac(tp);
5135		fallthrough;
5136	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
5137		rtl_hw_init_8168g(tp);
5138		break;
5139	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
5140		rtl_hw_init_8125(tp);
5141		break;
5142	default:
5143		break;
5144	}
5145}
5146
5147static int rtl_jumbo_max(struct rtl8169_private *tp)
5148{
5149	/* Non-GBit versions don't support jumbo frames */
5150	if (!tp->supports_gmii)
5151		return 0;
5152
5153	switch (tp->mac_version) {
5154	/* RTL8169 */
5155	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5156		return JUMBO_7K;
5157	/* RTL8168b */
5158	case RTL_GIGA_MAC_VER_11:
5159	case RTL_GIGA_MAC_VER_17:
5160		return JUMBO_4K;
5161	/* RTL8168c */
5162	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
5163		return JUMBO_6K;
5164	default:
5165		return JUMBO_9K;
5166	}
5167}
5168
5169static void rtl_init_mac_address(struct rtl8169_private *tp)
5170{
5171	u8 mac_addr[ETH_ALEN] __aligned(2) = {};
5172	struct net_device *dev = tp->dev;
5173	int rc;
5174
5175	rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
5176	if (!rc)
5177		goto done;
5178
5179	rtl_read_mac_address(tp, mac_addr);
5180	if (is_valid_ether_addr(mac_addr))
5181		goto done;
5182
5183	rtl_read_mac_from_reg(tp, mac_addr, MAC0);
5184	if (is_valid_ether_addr(mac_addr))
5185		goto done;
5186
5187	eth_random_addr(mac_addr);
5188	dev->addr_assign_type = NET_ADDR_RANDOM;
5189	dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
5190done:
5191	eth_hw_addr_set(dev, mac_addr);
5192	rtl_rar_set(tp, mac_addr);
5193}
5194
5195/* register is set if system vendor successfully tested ASPM 1.2 */
5196static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
5197{
5198	if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
5199	    r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
5200		return true;
5201
5202	return false;
5203}
5204
5205static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5206{
5207	struct rtl8169_private *tp;
5208	int jumbo_max, region, rc;
5209	enum mac_version chipset;
5210	struct net_device *dev;
5211	u32 txconfig;
5212	u16 xid;
5213
5214	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
5215	if (!dev)
5216		return -ENOMEM;
5217
5218	SET_NETDEV_DEV(dev, &pdev->dev);
5219	dev->netdev_ops = &rtl_netdev_ops;
5220	tp = netdev_priv(dev);
5221	tp->dev = dev;
5222	tp->pci_dev = pdev;
5223	tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
5224	tp->eee_adv = -1;
5225	tp->ocp_base = OCP_STD_PHY_BASE;
5226
5227	raw_spin_lock_init(&tp->cfg9346_usage_lock);
5228	raw_spin_lock_init(&tp->config25_lock);
5229	raw_spin_lock_init(&tp->mac_ocp_lock);
5230
5231	dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
5232						   struct pcpu_sw_netstats);
5233	if (!dev->tstats)
5234		return -ENOMEM;
5235
5236	/* Get the *optional* external "ether_clk" used on some boards */
5237	tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
5238	if (IS_ERR(tp->clk))
5239		return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n");
5240
5241	/* enable device (incl. PCI PM wakeup and hotplug setup) */
5242	rc = pcim_enable_device(pdev);
5243	if (rc < 0)
5244		return dev_err_probe(&pdev->dev, rc, "enable failure\n");
5245
5246	if (pcim_set_mwi(pdev) < 0)
5247		dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
5248
5249	/* use first MMIO region */
5250	region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
5251	if (region < 0)
5252		return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
5253
5254	rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
5255	if (rc < 0)
5256		return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
5257
5258	tp->mmio_addr = pcim_iomap_table(pdev)[region];
5259
5260	txconfig = RTL_R32(tp, TxConfig);
5261	if (txconfig == ~0U)
5262		return dev_err_probe(&pdev->dev, -EIO, "PCI read failed\n");
5263
5264	xid = (txconfig >> 20) & 0xfcf;
5265
5266	/* Identify chip attached to board */
5267	chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
5268	if (chipset == RTL_GIGA_MAC_NONE)
5269		return dev_err_probe(&pdev->dev, -ENODEV,
5270				     "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
5271				     xid);
5272	tp->mac_version = chipset;
5273
5274	/* Disable ASPM L1 as that cause random device stop working
5275	 * problems as well as full system hangs for some PCIe devices users.
5276	 */
5277	if (rtl_aspm_is_safe(tp))
5278		rc = 0;
5279	else
5280		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
5281	tp->aspm_manageable = !rc;
5282
5283	tp->dash_type = rtl_get_dash_type(tp);
5284	tp->dash_enabled = rtl_dash_is_enabled(tp);
5285
5286	tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
5287
5288	if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
5289	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
5290		dev->features |= NETIF_F_HIGHDMA;
5291
5292	rtl_init_rxcfg(tp);
5293
5294	rtl8169_irq_mask_and_ack(tp);
5295
5296	rtl_hw_initialize(tp);
5297
5298	rtl_hw_reset(tp);
5299
5300	rc = rtl_alloc_irq(tp);
5301	if (rc < 0)
5302		return dev_err_probe(&pdev->dev, rc, "Can't allocate interrupt\n");
5303
5304	tp->irq = pci_irq_vector(pdev, 0);
5305
5306	INIT_WORK(&tp->wk.work, rtl_task);
5307
5308	rtl_init_mac_address(tp);
5309
5310	dev->ethtool_ops = &rtl8169_ethtool_ops;
5311
5312	netif_napi_add(dev, &tp->napi, rtl8169_poll);
5313
5314	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
5315			   NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5316	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
5317	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5318
5319	/*
5320	 * Pretend we are using VLANs; This bypasses a nasty bug where
5321	 * Interrupts stop flowing on high load on 8110SCd controllers.
5322	 */
5323	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5324		/* Disallow toggling */
5325		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
5326
5327	if (rtl_chip_supports_csum_v2(tp))
5328		dev->hw_features |= NETIF_F_IPV6_CSUM;
5329
5330	dev->features |= dev->hw_features;
5331
5332	/* There has been a number of reports that using SG/TSO results in
5333	 * tx timeouts. However for a lot of people SG/TSO works fine.
5334	 * Therefore disable both features by default, but allow users to
5335	 * enable them. Use at own risk!
5336	 */
5337	if (rtl_chip_supports_csum_v2(tp)) {
5338		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
5339		netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
5340		netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
5341	} else {
5342		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
5343		netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
5344		netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
5345	}
5346
5347	dev->hw_features |= NETIF_F_RXALL;
5348	dev->hw_features |= NETIF_F_RXFCS;
5349
5350	netdev_sw_irq_coalesce_default_on(dev);
5351
5352	/* configure chip for default features */
5353	rtl8169_set_features(dev, dev->features);
5354
5355	if (!tp->dash_enabled) {
5356		rtl_set_d3_pll_down(tp, true);
5357	} else {
5358		rtl_set_d3_pll_down(tp, false);
5359		dev->wol_enabled = 1;
5360	}
5361
5362	jumbo_max = rtl_jumbo_max(tp);
5363	if (jumbo_max)
5364		dev->max_mtu = jumbo_max;
5365
5366	rtl_set_irq_mask(tp);
5367
5368	tp->fw_name = rtl_chip_infos[chipset].fw_name;
5369
5370	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
5371					    &tp->counters_phys_addr,
5372					    GFP_KERNEL);
5373	if (!tp->counters)
5374		return -ENOMEM;
5375
5376	pci_set_drvdata(pdev, tp);
5377
5378	rc = r8169_mdio_register(tp);
5379	if (rc)
5380		return rc;
5381
5382	rc = register_netdev(dev);
5383	if (rc)
5384		return rc;
5385
5386	netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
5387		    rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
5388
5389	if (jumbo_max)
5390		netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
5391			    jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
5392			    "ok" : "ko");
5393
5394	if (tp->dash_type != RTL_DASH_NONE) {
5395		netdev_info(dev, "DASH %s\n",
5396			    tp->dash_enabled ? "enabled" : "disabled");
5397		rtl8168_driver_start(tp);
5398	}
5399
5400	if (pci_dev_run_wake(pdev))
5401		pm_runtime_put_sync(&pdev->dev);
5402
5403	return 0;
5404}
5405
5406static struct pci_driver rtl8169_pci_driver = {
5407	.name		= KBUILD_MODNAME,
5408	.id_table	= rtl8169_pci_tbl,
5409	.probe		= rtl_init_one,
5410	.remove		= rtl_remove_one,
5411	.shutdown	= rtl_shutdown,
5412	.driver.pm	= pm_ptr(&rtl8169_pm_ops),
5413};
5414
5415module_pci_driver(rtl8169_pci_driver);
5416