xref: /kernel/linux/linux-6.6/drivers/net/usb/lan78xx.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2015 Microchip Technology
4 */
5#include <linux/module.h>
6#include <linux/netdevice.h>
7#include <linux/etherdevice.h>
8#include <linux/ethtool.h>
9#include <linux/usb.h>
10#include <linux/crc32.h>
11#include <linux/signal.h>
12#include <linux/slab.h>
13#include <linux/if_vlan.h>
14#include <linux/uaccess.h>
15#include <linux/linkmode.h>
16#include <linux/list.h>
17#include <linux/ip.h>
18#include <linux/ipv6.h>
19#include <linux/mdio.h>
20#include <linux/phy.h>
21#include <net/ip6_checksum.h>
22#include <net/vxlan.h>
23#include <linux/interrupt.h>
24#include <linux/irqdomain.h>
25#include <linux/irq.h>
26#include <linux/irqchip/chained_irq.h>
27#include <linux/microchipphy.h>
28#include <linux/phy_fixed.h>
29#include <linux/of_mdio.h>
30#include <linux/of_net.h>
31#include "lan78xx.h"
32
33#define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34#define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35#define DRIVER_NAME	"lan78xx"
36
37#define TX_TIMEOUT_JIFFIES		(5 * HZ)
38#define THROTTLE_JIFFIES		(HZ / 8)
39#define UNLINK_TIMEOUT_MS		3
40
41#define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42
43#define SS_USB_PKT_SIZE			(1024)
44#define HS_USB_PKT_SIZE			(512)
45#define FS_USB_PKT_SIZE			(64)
46
47#define MAX_RX_FIFO_SIZE		(12 * 1024)
48#define MAX_TX_FIFO_SIZE		(12 * 1024)
49
50#define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51#define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52					 (FLOW_THRESHOLD(off) << 8))
53
54/* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55#define FLOW_ON_SS			9216
56#define FLOW_ON_HS			8704
57
58/* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59#define FLOW_OFF_SS			4096
60#define FLOW_OFF_HS			1024
61
62#define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63#define DEFAULT_BULK_IN_DELAY		(0x0800)
64#define MAX_SINGLE_PACKET_SIZE		(9000)
65#define DEFAULT_TX_CSUM_ENABLE		(true)
66#define DEFAULT_RX_CSUM_ENABLE		(true)
67#define DEFAULT_TSO_CSUM_ENABLE		(true)
68#define DEFAULT_VLAN_FILTER_ENABLE	(true)
69#define DEFAULT_VLAN_RX_OFFLOAD		(true)
70#define TX_ALIGNMENT			(4)
71#define RXW_PADDING			2
72
73#define LAN78XX_USB_VENDOR_ID		(0x0424)
74#define LAN7800_USB_PRODUCT_ID		(0x7800)
75#define LAN7850_USB_PRODUCT_ID		(0x7850)
76#define LAN7801_USB_PRODUCT_ID		(0x7801)
77#define LAN78XX_EEPROM_MAGIC		(0x78A5)
78#define LAN78XX_OTP_MAGIC		(0x78F3)
79#define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80#define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81
82#define	MII_READ			1
83#define	MII_WRITE			0
84
85#define EEPROM_INDICATOR		(0xA5)
86#define EEPROM_MAC_OFFSET		(0x01)
87#define MAX_EEPROM_SIZE			512
88#define OTP_INDICATOR_1			(0xF3)
89#define OTP_INDICATOR_2			(0xF7)
90
91#define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92					 WAKE_MCAST | WAKE_BCAST | \
93					 WAKE_ARP | WAKE_MAGIC)
94
95#define TX_URB_NUM			10
96#define TX_SS_URB_NUM			TX_URB_NUM
97#define TX_HS_URB_NUM			TX_URB_NUM
98#define TX_FS_URB_NUM			TX_URB_NUM
99
100/* A single URB buffer must be large enough to hold a complete jumbo packet
101 */
102#define TX_SS_URB_SIZE			(32 * 1024)
103#define TX_HS_URB_SIZE			(16 * 1024)
104#define TX_FS_URB_SIZE			(10 * 1024)
105
106#define RX_SS_URB_NUM			30
107#define RX_HS_URB_NUM			10
108#define RX_FS_URB_NUM			10
109#define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110#define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111#define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112
113#define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114#define SS_BULK_IN_DELAY		0x2000
115#define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116#define HS_BULK_IN_DELAY		0x2000
117#define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118#define FS_BULK_IN_DELAY		0x2000
119
120#define TX_CMD_LEN			8
121#define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122#define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123
124#define RX_CMD_LEN			10
125#define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126#define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127
128/* USB related defines */
129#define BULK_IN_PIPE			1
130#define BULK_OUT_PIPE			2
131
132/* default autosuspend delay (mSec)*/
133#define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134
135/* statistic update interval (mSec) */
136#define STAT_UPDATE_TIMER		(1 * 1000)
137
138/* time to wait for MAC or FCT to stop (jiffies) */
139#define HW_DISABLE_TIMEOUT		(HZ / 10)
140
141/* time to wait between polling MAC or FCT state (ms) */
142#define HW_DISABLE_DELAY_MS		1
143
144/* defines interrupts from interrupt EP */
145#define MAX_INT_EP			(32)
146#define INT_EP_INTEP			(31)
147#define INT_EP_OTP_WR_DONE		(28)
148#define INT_EP_EEE_TX_LPI_START		(26)
149#define INT_EP_EEE_TX_LPI_STOP		(25)
150#define INT_EP_EEE_RX_LPI		(24)
151#define INT_EP_MAC_RESET_TIMEOUT	(23)
152#define INT_EP_RDFO			(22)
153#define INT_EP_TXE			(21)
154#define INT_EP_USB_STATUS		(20)
155#define INT_EP_TX_DIS			(19)
156#define INT_EP_RX_DIS			(18)
157#define INT_EP_PHY			(17)
158#define INT_EP_DP			(16)
159#define INT_EP_MAC_ERR			(15)
160#define INT_EP_TDFU			(14)
161#define INT_EP_TDFO			(13)
162#define INT_EP_UTX			(12)
163#define INT_EP_GPIO_11			(11)
164#define INT_EP_GPIO_10			(10)
165#define INT_EP_GPIO_9			(9)
166#define INT_EP_GPIO_8			(8)
167#define INT_EP_GPIO_7			(7)
168#define INT_EP_GPIO_6			(6)
169#define INT_EP_GPIO_5			(5)
170#define INT_EP_GPIO_4			(4)
171#define INT_EP_GPIO_3			(3)
172#define INT_EP_GPIO_2			(2)
173#define INT_EP_GPIO_1			(1)
174#define INT_EP_GPIO_0			(0)
175
176static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177	"RX FCS Errors",
178	"RX Alignment Errors",
179	"Rx Fragment Errors",
180	"RX Jabber Errors",
181	"RX Undersize Frame Errors",
182	"RX Oversize Frame Errors",
183	"RX Dropped Frames",
184	"RX Unicast Byte Count",
185	"RX Broadcast Byte Count",
186	"RX Multicast Byte Count",
187	"RX Unicast Frames",
188	"RX Broadcast Frames",
189	"RX Multicast Frames",
190	"RX Pause Frames",
191	"RX 64 Byte Frames",
192	"RX 65 - 127 Byte Frames",
193	"RX 128 - 255 Byte Frames",
194	"RX 256 - 511 Bytes Frames",
195	"RX 512 - 1023 Byte Frames",
196	"RX 1024 - 1518 Byte Frames",
197	"RX Greater 1518 Byte Frames",
198	"EEE RX LPI Transitions",
199	"EEE RX LPI Time",
200	"TX FCS Errors",
201	"TX Excess Deferral Errors",
202	"TX Carrier Errors",
203	"TX Bad Byte Count",
204	"TX Single Collisions",
205	"TX Multiple Collisions",
206	"TX Excessive Collision",
207	"TX Late Collisions",
208	"TX Unicast Byte Count",
209	"TX Broadcast Byte Count",
210	"TX Multicast Byte Count",
211	"TX Unicast Frames",
212	"TX Broadcast Frames",
213	"TX Multicast Frames",
214	"TX Pause Frames",
215	"TX 64 Byte Frames",
216	"TX 65 - 127 Byte Frames",
217	"TX 128 - 255 Byte Frames",
218	"TX 256 - 511 Bytes Frames",
219	"TX 512 - 1023 Byte Frames",
220	"TX 1024 - 1518 Byte Frames",
221	"TX Greater 1518 Byte Frames",
222	"EEE TX LPI Transitions",
223	"EEE TX LPI Time",
224};
225
226struct lan78xx_statstage {
227	u32 rx_fcs_errors;
228	u32 rx_alignment_errors;
229	u32 rx_fragment_errors;
230	u32 rx_jabber_errors;
231	u32 rx_undersize_frame_errors;
232	u32 rx_oversize_frame_errors;
233	u32 rx_dropped_frames;
234	u32 rx_unicast_byte_count;
235	u32 rx_broadcast_byte_count;
236	u32 rx_multicast_byte_count;
237	u32 rx_unicast_frames;
238	u32 rx_broadcast_frames;
239	u32 rx_multicast_frames;
240	u32 rx_pause_frames;
241	u32 rx_64_byte_frames;
242	u32 rx_65_127_byte_frames;
243	u32 rx_128_255_byte_frames;
244	u32 rx_256_511_bytes_frames;
245	u32 rx_512_1023_byte_frames;
246	u32 rx_1024_1518_byte_frames;
247	u32 rx_greater_1518_byte_frames;
248	u32 eee_rx_lpi_transitions;
249	u32 eee_rx_lpi_time;
250	u32 tx_fcs_errors;
251	u32 tx_excess_deferral_errors;
252	u32 tx_carrier_errors;
253	u32 tx_bad_byte_count;
254	u32 tx_single_collisions;
255	u32 tx_multiple_collisions;
256	u32 tx_excessive_collision;
257	u32 tx_late_collisions;
258	u32 tx_unicast_byte_count;
259	u32 tx_broadcast_byte_count;
260	u32 tx_multicast_byte_count;
261	u32 tx_unicast_frames;
262	u32 tx_broadcast_frames;
263	u32 tx_multicast_frames;
264	u32 tx_pause_frames;
265	u32 tx_64_byte_frames;
266	u32 tx_65_127_byte_frames;
267	u32 tx_128_255_byte_frames;
268	u32 tx_256_511_bytes_frames;
269	u32 tx_512_1023_byte_frames;
270	u32 tx_1024_1518_byte_frames;
271	u32 tx_greater_1518_byte_frames;
272	u32 eee_tx_lpi_transitions;
273	u32 eee_tx_lpi_time;
274};
275
276struct lan78xx_statstage64 {
277	u64 rx_fcs_errors;
278	u64 rx_alignment_errors;
279	u64 rx_fragment_errors;
280	u64 rx_jabber_errors;
281	u64 rx_undersize_frame_errors;
282	u64 rx_oversize_frame_errors;
283	u64 rx_dropped_frames;
284	u64 rx_unicast_byte_count;
285	u64 rx_broadcast_byte_count;
286	u64 rx_multicast_byte_count;
287	u64 rx_unicast_frames;
288	u64 rx_broadcast_frames;
289	u64 rx_multicast_frames;
290	u64 rx_pause_frames;
291	u64 rx_64_byte_frames;
292	u64 rx_65_127_byte_frames;
293	u64 rx_128_255_byte_frames;
294	u64 rx_256_511_bytes_frames;
295	u64 rx_512_1023_byte_frames;
296	u64 rx_1024_1518_byte_frames;
297	u64 rx_greater_1518_byte_frames;
298	u64 eee_rx_lpi_transitions;
299	u64 eee_rx_lpi_time;
300	u64 tx_fcs_errors;
301	u64 tx_excess_deferral_errors;
302	u64 tx_carrier_errors;
303	u64 tx_bad_byte_count;
304	u64 tx_single_collisions;
305	u64 tx_multiple_collisions;
306	u64 tx_excessive_collision;
307	u64 tx_late_collisions;
308	u64 tx_unicast_byte_count;
309	u64 tx_broadcast_byte_count;
310	u64 tx_multicast_byte_count;
311	u64 tx_unicast_frames;
312	u64 tx_broadcast_frames;
313	u64 tx_multicast_frames;
314	u64 tx_pause_frames;
315	u64 tx_64_byte_frames;
316	u64 tx_65_127_byte_frames;
317	u64 tx_128_255_byte_frames;
318	u64 tx_256_511_bytes_frames;
319	u64 tx_512_1023_byte_frames;
320	u64 tx_1024_1518_byte_frames;
321	u64 tx_greater_1518_byte_frames;
322	u64 eee_tx_lpi_transitions;
323	u64 eee_tx_lpi_time;
324};
325
326static u32 lan78xx_regs[] = {
327	ID_REV,
328	INT_STS,
329	HW_CFG,
330	PMT_CTL,
331	E2P_CMD,
332	E2P_DATA,
333	USB_STATUS,
334	VLAN_TYPE,
335	MAC_CR,
336	MAC_RX,
337	MAC_TX,
338	FLOW,
339	ERR_STS,
340	MII_ACC,
341	MII_DATA,
342	EEE_TX_LPI_REQ_DLY,
343	EEE_TW_TX_SYS,
344	EEE_TX_LPI_REM_DLY,
345	WUCSR
346};
347
348#define PHY_REG_SIZE (32 * sizeof(u32))
349
350struct lan78xx_net;
351
352struct lan78xx_priv {
353	struct lan78xx_net *dev;
354	u32 rfe_ctl;
355	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358	struct mutex dataport_mutex; /* for dataport access */
359	spinlock_t rfe_ctl_lock; /* for rfe register access */
360	struct work_struct set_multicast;
361	struct work_struct set_vlan;
362	u32 wol;
363};
364
365enum skb_state {
366	illegal = 0,
367	tx_start,
368	tx_done,
369	rx_start,
370	rx_done,
371	rx_cleanup,
372	unlink_start
373};
374
375struct skb_data {		/* skb->cb is one of these */
376	struct urb *urb;
377	struct lan78xx_net *dev;
378	enum skb_state state;
379	size_t length;
380	int num_of_packet;
381};
382
383struct usb_context {
384	struct usb_ctrlrequest req;
385	struct lan78xx_net *dev;
386};
387
388#define EVENT_TX_HALT			0
389#define EVENT_RX_HALT			1
390#define EVENT_RX_MEMORY			2
391#define EVENT_STS_SPLIT			3
392#define EVENT_LINK_RESET		4
393#define EVENT_RX_PAUSED			5
394#define EVENT_DEV_WAKING		6
395#define EVENT_DEV_ASLEEP		7
396#define EVENT_DEV_OPEN			8
397#define EVENT_STAT_UPDATE		9
398#define EVENT_DEV_DISCONNECT		10
399
400struct statstage {
401	struct mutex			access_lock;	/* for stats access */
402	struct lan78xx_statstage	saved;
403	struct lan78xx_statstage	rollover_count;
404	struct lan78xx_statstage	rollover_max;
405	struct lan78xx_statstage64	curr_stat;
406};
407
408struct irq_domain_data {
409	struct irq_domain	*irqdomain;
410	unsigned int		phyirq;
411	struct irq_chip		*irqchip;
412	irq_flow_handler_t	irq_handler;
413	u32			irqenable;
414	struct mutex		irq_lock;		/* for irq bus access */
415};
416
417struct lan78xx_net {
418	struct net_device	*net;
419	struct usb_device	*udev;
420	struct usb_interface	*intf;
421	void			*driver_priv;
422
423	unsigned int		tx_pend_data_len;
424	size_t			n_tx_urbs;
425	size_t			n_rx_urbs;
426	size_t			tx_urb_size;
427	size_t			rx_urb_size;
428
429	struct sk_buff_head	rxq_free;
430	struct sk_buff_head	rxq;
431	struct sk_buff_head	rxq_done;
432	struct sk_buff_head	rxq_overflow;
433	struct sk_buff_head	txq_free;
434	struct sk_buff_head	txq;
435	struct sk_buff_head	txq_pend;
436
437	struct napi_struct	napi;
438
439	struct delayed_work	wq;
440
441	int			msg_enable;
442
443	struct urb		*urb_intr;
444	struct usb_anchor	deferred;
445
446	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
447	struct mutex		phy_mutex; /* for phy access */
448	unsigned int		pipe_in, pipe_out, pipe_intr;
449
450	unsigned int		bulk_in_delay;
451	unsigned int		burst_cap;
452
453	unsigned long		flags;
454
455	wait_queue_head_t	*wait;
456	unsigned char		suspend_count;
457
458	unsigned int		maxpacket;
459	struct timer_list	stat_monitor;
460
461	unsigned long		data[5];
462
463	int			link_on;
464	u8			mdix_ctrl;
465
466	u32			chipid;
467	u32			chiprev;
468	struct mii_bus		*mdiobus;
469	phy_interface_t		interface;
470
471	int			fc_autoneg;
472	u8			fc_request_control;
473
474	int			delta;
475	struct statstage	stats;
476
477	struct irq_domain_data	domain_data;
478};
479
480/* define external phy id */
481#define	PHY_LAN8835			(0x0007C130)
482#define	PHY_KSZ9031RNX			(0x00221620)
483
484/* use ethtool to change the level for any given device */
485static int msg_level = -1;
486module_param(msg_level, int, 0);
487MODULE_PARM_DESC(msg_level, "Override default message level");
488
489static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490{
491	if (skb_queue_empty(buf_pool))
492		return NULL;
493
494	return skb_dequeue(buf_pool);
495}
496
497static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498				struct sk_buff *buf)
499{
500	buf->data = buf->head;
501	skb_reset_tail_pointer(buf);
502
503	buf->len = 0;
504	buf->data_len = 0;
505
506	skb_queue_tail(buf_pool, buf);
507}
508
509static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510{
511	struct skb_data *entry;
512	struct sk_buff *buf;
513
514	while (!skb_queue_empty(buf_pool)) {
515		buf = skb_dequeue(buf_pool);
516		if (buf) {
517			entry = (struct skb_data *)buf->cb;
518			usb_free_urb(entry->urb);
519			dev_kfree_skb_any(buf);
520		}
521	}
522}
523
524static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525				  size_t n_urbs, size_t urb_size,
526				  struct lan78xx_net *dev)
527{
528	struct skb_data *entry;
529	struct sk_buff *buf;
530	struct urb *urb;
531	int i;
532
533	skb_queue_head_init(buf_pool);
534
535	for (i = 0; i < n_urbs; i++) {
536		buf = alloc_skb(urb_size, GFP_ATOMIC);
537		if (!buf)
538			goto error;
539
540		if (skb_linearize(buf) != 0) {
541			dev_kfree_skb_any(buf);
542			goto error;
543		}
544
545		urb = usb_alloc_urb(0, GFP_ATOMIC);
546		if (!urb) {
547			dev_kfree_skb_any(buf);
548			goto error;
549		}
550
551		entry = (struct skb_data *)buf->cb;
552		entry->urb = urb;
553		entry->dev = dev;
554		entry->length = 0;
555		entry->num_of_packet = 0;
556
557		skb_queue_tail(buf_pool, buf);
558	}
559
560	return 0;
561
562error:
563	lan78xx_free_buf_pool(buf_pool);
564
565	return -ENOMEM;
566}
567
568static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569{
570	return lan78xx_get_buf(&dev->rxq_free);
571}
572
573static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574				   struct sk_buff *rx_buf)
575{
576	lan78xx_release_buf(&dev->rxq_free, rx_buf);
577}
578
579static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580{
581	lan78xx_free_buf_pool(&dev->rxq_free);
582}
583
584static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585{
586	return lan78xx_alloc_buf_pool(&dev->rxq_free,
587				      dev->n_rx_urbs, dev->rx_urb_size, dev);
588}
589
590static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591{
592	return lan78xx_get_buf(&dev->txq_free);
593}
594
595static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596				   struct sk_buff *tx_buf)
597{
598	lan78xx_release_buf(&dev->txq_free, tx_buf);
599}
600
601static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602{
603	lan78xx_free_buf_pool(&dev->txq_free);
604}
605
606static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607{
608	return lan78xx_alloc_buf_pool(&dev->txq_free,
609				      dev->n_tx_urbs, dev->tx_urb_size, dev);
610}
611
612static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
613{
614	u32 *buf;
615	int ret;
616
617	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
618		return -ENODEV;
619
620	buf = kmalloc(sizeof(u32), GFP_KERNEL);
621	if (!buf)
622		return -ENOMEM;
623
624	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
625			      USB_VENDOR_REQUEST_READ_REGISTER,
626			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
627			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
628	if (likely(ret >= 0)) {
629		le32_to_cpus(buf);
630		*data = *buf;
631	} else if (net_ratelimit()) {
632		netdev_warn(dev->net,
633			    "Failed to read register index 0x%08x. ret = %d",
634			    index, ret);
635	}
636
637	kfree(buf);
638
639	return ret;
640}
641
642static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
643{
644	u32 *buf;
645	int ret;
646
647	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
648		return -ENODEV;
649
650	buf = kmalloc(sizeof(u32), GFP_KERNEL);
651	if (!buf)
652		return -ENOMEM;
653
654	*buf = data;
655	cpu_to_le32s(buf);
656
657	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
658			      USB_VENDOR_REQUEST_WRITE_REGISTER,
659			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
660			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
661	if (unlikely(ret < 0) &&
662	    net_ratelimit()) {
663		netdev_warn(dev->net,
664			    "Failed to write register index 0x%08x. ret = %d",
665			    index, ret);
666	}
667
668	kfree(buf);
669
670	return ret;
671}
672
673static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
674			      u32 data)
675{
676	int ret;
677	u32 buf;
678
679	ret = lan78xx_read_reg(dev, reg, &buf);
680	if (ret < 0)
681		return ret;
682
683	buf &= ~mask;
684	buf |= (mask & data);
685
686	ret = lan78xx_write_reg(dev, reg, buf);
687	if (ret < 0)
688		return ret;
689
690	return 0;
691}
692
693static int lan78xx_read_stats(struct lan78xx_net *dev,
694			      struct lan78xx_statstage *data)
695{
696	int ret = 0;
697	int i;
698	struct lan78xx_statstage *stats;
699	u32 *src;
700	u32 *dst;
701
702	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
703	if (!stats)
704		return -ENOMEM;
705
706	ret = usb_control_msg(dev->udev,
707			      usb_rcvctrlpipe(dev->udev, 0),
708			      USB_VENDOR_REQUEST_GET_STATS,
709			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
710			      0,
711			      0,
712			      (void *)stats,
713			      sizeof(*stats),
714			      USB_CTRL_SET_TIMEOUT);
715	if (likely(ret >= 0)) {
716		src = (u32 *)stats;
717		dst = (u32 *)data;
718		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
719			le32_to_cpus(&src[i]);
720			dst[i] = src[i];
721		}
722	} else {
723		netdev_warn(dev->net,
724			    "Failed to read stat ret = %d", ret);
725	}
726
727	kfree(stats);
728
729	return ret;
730}
731
732#define check_counter_rollover(struct1, dev_stats, member)		\
733	do {								\
734		if ((struct1)->member < (dev_stats).saved.member)	\
735			(dev_stats).rollover_count.member++;		\
736	} while (0)
737
738static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
739					struct lan78xx_statstage *stats)
740{
741	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
742	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
743	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
744	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
745	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
746	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
747	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
748	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
749	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
750	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
751	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
752	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
753	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
754	check_counter_rollover(stats, dev->stats, rx_pause_frames);
755	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
756	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
757	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
758	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
759	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
760	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
761	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
762	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
763	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
764	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
765	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
766	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
767	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
768	check_counter_rollover(stats, dev->stats, tx_single_collisions);
769	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
770	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
771	check_counter_rollover(stats, dev->stats, tx_late_collisions);
772	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
773	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
774	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
775	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
776	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
777	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
778	check_counter_rollover(stats, dev->stats, tx_pause_frames);
779	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
780	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
781	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
782	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
783	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
784	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
785	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
786	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
787	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
788
789	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
790}
791
792static void lan78xx_update_stats(struct lan78xx_net *dev)
793{
794	u32 *p, *count, *max;
795	u64 *data;
796	int i;
797	struct lan78xx_statstage lan78xx_stats;
798
799	if (usb_autopm_get_interface(dev->intf) < 0)
800		return;
801
802	p = (u32 *)&lan78xx_stats;
803	count = (u32 *)&dev->stats.rollover_count;
804	max = (u32 *)&dev->stats.rollover_max;
805	data = (u64 *)&dev->stats.curr_stat;
806
807	mutex_lock(&dev->stats.access_lock);
808
809	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
810		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
811
812	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
813		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
814
815	mutex_unlock(&dev->stats.access_lock);
816
817	usb_autopm_put_interface(dev->intf);
818}
819
820/* Loop until the read is completed with timeout called with phy_mutex held */
821static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
822{
823	unsigned long start_time = jiffies;
824	u32 val;
825	int ret;
826
827	do {
828		ret = lan78xx_read_reg(dev, MII_ACC, &val);
829		if (unlikely(ret < 0))
830			return -EIO;
831
832		if (!(val & MII_ACC_MII_BUSY_))
833			return 0;
834	} while (!time_after(jiffies, start_time + HZ));
835
836	return -EIO;
837}
838
839static inline u32 mii_access(int id, int index, int read)
840{
841	u32 ret;
842
843	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
844	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
845	if (read)
846		ret |= MII_ACC_MII_READ_;
847	else
848		ret |= MII_ACC_MII_WRITE_;
849	ret |= MII_ACC_MII_BUSY_;
850
851	return ret;
852}
853
854static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
855{
856	unsigned long start_time = jiffies;
857	u32 val;
858	int ret;
859
860	do {
861		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
862		if (unlikely(ret < 0))
863			return -EIO;
864
865		if (!(val & E2P_CMD_EPC_BUSY_) ||
866		    (val & E2P_CMD_EPC_TIMEOUT_))
867			break;
868		usleep_range(40, 100);
869	} while (!time_after(jiffies, start_time + HZ));
870
871	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
872		netdev_warn(dev->net, "EEPROM read operation timeout");
873		return -EIO;
874	}
875
876	return 0;
877}
878
879static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
880{
881	unsigned long start_time = jiffies;
882	u32 val;
883	int ret;
884
885	do {
886		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
887		if (unlikely(ret < 0))
888			return -EIO;
889
890		if (!(val & E2P_CMD_EPC_BUSY_))
891			return 0;
892
893		usleep_range(40, 100);
894	} while (!time_after(jiffies, start_time + HZ));
895
896	netdev_warn(dev->net, "EEPROM is busy");
897	return -EIO;
898}
899
900static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
901				   u32 length, u8 *data)
902{
903	u32 val;
904	u32 saved;
905	int i, ret;
906	int retval;
907
908	/* depends on chip, some EEPROM pins are muxed with LED function.
909	 * disable & restore LED function to access EEPROM.
910	 */
911	ret = lan78xx_read_reg(dev, HW_CFG, &val);
912	saved = val;
913	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
914		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
915		ret = lan78xx_write_reg(dev, HW_CFG, val);
916	}
917
918	retval = lan78xx_eeprom_confirm_not_busy(dev);
919	if (retval)
920		return retval;
921
922	for (i = 0; i < length; i++) {
923		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
924		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
925		ret = lan78xx_write_reg(dev, E2P_CMD, val);
926		if (unlikely(ret < 0)) {
927			retval = -EIO;
928			goto exit;
929		}
930
931		retval = lan78xx_wait_eeprom(dev);
932		if (retval < 0)
933			goto exit;
934
935		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
936		if (unlikely(ret < 0)) {
937			retval = -EIO;
938			goto exit;
939		}
940
941		data[i] = val & 0xFF;
942		offset++;
943	}
944
945	retval = 0;
946exit:
947	if (dev->chipid == ID_REV_CHIP_ID_7800_)
948		ret = lan78xx_write_reg(dev, HW_CFG, saved);
949
950	return retval;
951}
952
953static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
954			       u32 length, u8 *data)
955{
956	u8 sig;
957	int ret;
958
959	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
960	if ((ret == 0) && (sig == EEPROM_INDICATOR))
961		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
962	else
963		ret = -EINVAL;
964
965	return ret;
966}
967
968static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
969				    u32 length, u8 *data)
970{
971	u32 val;
972	u32 saved;
973	int i, ret;
974	int retval;
975
976	/* depends on chip, some EEPROM pins are muxed with LED function.
977	 * disable & restore LED function to access EEPROM.
978	 */
979	ret = lan78xx_read_reg(dev, HW_CFG, &val);
980	saved = val;
981	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
982		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
983		ret = lan78xx_write_reg(dev, HW_CFG, val);
984	}
985
986	retval = lan78xx_eeprom_confirm_not_busy(dev);
987	if (retval)
988		goto exit;
989
990	/* Issue write/erase enable command */
991	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
992	ret = lan78xx_write_reg(dev, E2P_CMD, val);
993	if (unlikely(ret < 0)) {
994		retval = -EIO;
995		goto exit;
996	}
997
998	retval = lan78xx_wait_eeprom(dev);
999	if (retval < 0)
1000		goto exit;
1001
1002	for (i = 0; i < length; i++) {
1003		/* Fill data register */
1004		val = data[i];
1005		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1006		if (ret < 0) {
1007			retval = -EIO;
1008			goto exit;
1009		}
1010
1011		/* Send "write" command */
1012		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1013		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1014		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1015		if (ret < 0) {
1016			retval = -EIO;
1017			goto exit;
1018		}
1019
1020		retval = lan78xx_wait_eeprom(dev);
1021		if (retval < 0)
1022			goto exit;
1023
1024		offset++;
1025	}
1026
1027	retval = 0;
1028exit:
1029	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1030		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1031
1032	return retval;
1033}
1034
1035static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1036				u32 length, u8 *data)
1037{
1038	int i;
1039	u32 buf;
1040	unsigned long timeout;
1041
1042	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1043
1044	if (buf & OTP_PWR_DN_PWRDN_N_) {
1045		/* clear it and wait to be cleared */
1046		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1047
1048		timeout = jiffies + HZ;
1049		do {
1050			usleep_range(1, 10);
1051			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1052			if (time_after(jiffies, timeout)) {
1053				netdev_warn(dev->net,
1054					    "timeout on OTP_PWR_DN");
1055				return -EIO;
1056			}
1057		} while (buf & OTP_PWR_DN_PWRDN_N_);
1058	}
1059
1060	for (i = 0; i < length; i++) {
1061		lan78xx_write_reg(dev, OTP_ADDR1,
1062				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1063		lan78xx_write_reg(dev, OTP_ADDR2,
1064				  ((offset + i) & OTP_ADDR2_10_3));
1065
1066		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1067		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1068
1069		timeout = jiffies + HZ;
1070		do {
1071			udelay(1);
1072			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1073			if (time_after(jiffies, timeout)) {
1074				netdev_warn(dev->net,
1075					    "timeout on OTP_STATUS");
1076				return -EIO;
1077			}
1078		} while (buf & OTP_STATUS_BUSY_);
1079
1080		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1081
1082		data[i] = (u8)(buf & 0xFF);
1083	}
1084
1085	return 0;
1086}
1087
1088static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1089				 u32 length, u8 *data)
1090{
1091	int i;
1092	u32 buf;
1093	unsigned long timeout;
1094
1095	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096
1097	if (buf & OTP_PWR_DN_PWRDN_N_) {
1098		/* clear it and wait to be cleared */
1099		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1100
1101		timeout = jiffies + HZ;
1102		do {
1103			udelay(1);
1104			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1105			if (time_after(jiffies, timeout)) {
1106				netdev_warn(dev->net,
1107					    "timeout on OTP_PWR_DN completion");
1108				return -EIO;
1109			}
1110		} while (buf & OTP_PWR_DN_PWRDN_N_);
1111	}
1112
1113	/* set to BYTE program mode */
1114	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1115
1116	for (i = 0; i < length; i++) {
1117		lan78xx_write_reg(dev, OTP_ADDR1,
1118				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1119		lan78xx_write_reg(dev, OTP_ADDR2,
1120				  ((offset + i) & OTP_ADDR2_10_3));
1121		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1122		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1123		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1124
1125		timeout = jiffies + HZ;
1126		do {
1127			udelay(1);
1128			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1129			if (time_after(jiffies, timeout)) {
1130				netdev_warn(dev->net,
1131					    "Timeout on OTP_STATUS completion");
1132				return -EIO;
1133			}
1134		} while (buf & OTP_STATUS_BUSY_);
1135	}
1136
1137	return 0;
1138}
1139
1140static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1141			    u32 length, u8 *data)
1142{
1143	u8 sig;
1144	int ret;
1145
1146	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1147
1148	if (ret == 0) {
1149		if (sig == OTP_INDICATOR_2)
1150			offset += 0x100;
1151		else if (sig != OTP_INDICATOR_1)
1152			ret = -EINVAL;
1153		if (!ret)
1154			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1155	}
1156
1157	return ret;
1158}
1159
1160static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1161{
1162	int i, ret;
1163
1164	for (i = 0; i < 100; i++) {
1165		u32 dp_sel;
1166
1167		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1168		if (unlikely(ret < 0))
1169			return -EIO;
1170
1171		if (dp_sel & DP_SEL_DPRDY_)
1172			return 0;
1173
1174		usleep_range(40, 100);
1175	}
1176
1177	netdev_warn(dev->net, "%s timed out", __func__);
1178
1179	return -EIO;
1180}
1181
1182static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1183				  u32 addr, u32 length, u32 *buf)
1184{
1185	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1186	u32 dp_sel;
1187	int i, ret;
1188
1189	if (usb_autopm_get_interface(dev->intf) < 0)
1190		return 0;
1191
1192	mutex_lock(&pdata->dataport_mutex);
1193
1194	ret = lan78xx_dataport_wait_not_busy(dev);
1195	if (ret < 0)
1196		goto done;
1197
1198	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1199
1200	dp_sel &= ~DP_SEL_RSEL_MASK_;
1201	dp_sel |= ram_select;
1202	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1203
1204	for (i = 0; i < length; i++) {
1205		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1206
1207		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1208
1209		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1210
1211		ret = lan78xx_dataport_wait_not_busy(dev);
1212		if (ret < 0)
1213			goto done;
1214	}
1215
1216done:
1217	mutex_unlock(&pdata->dataport_mutex);
1218	usb_autopm_put_interface(dev->intf);
1219
1220	return ret;
1221}
1222
1223static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1224				    int index, u8 addr[ETH_ALEN])
1225{
1226	u32 temp;
1227
1228	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1229		temp = addr[3];
1230		temp = addr[2] | (temp << 8);
1231		temp = addr[1] | (temp << 8);
1232		temp = addr[0] | (temp << 8);
1233		pdata->pfilter_table[index][1] = temp;
1234		temp = addr[5];
1235		temp = addr[4] | (temp << 8);
1236		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1237		pdata->pfilter_table[index][0] = temp;
1238	}
1239}
1240
1241/* returns hash bit number for given MAC address */
1242static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1243{
1244	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1245}
1246
1247static void lan78xx_deferred_multicast_write(struct work_struct *param)
1248{
1249	struct lan78xx_priv *pdata =
1250			container_of(param, struct lan78xx_priv, set_multicast);
1251	struct lan78xx_net *dev = pdata->dev;
1252	int i;
1253
1254	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1255		  pdata->rfe_ctl);
1256
1257	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1258			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1259
1260	for (i = 1; i < NUM_OF_MAF; i++) {
1261		lan78xx_write_reg(dev, MAF_HI(i), 0);
1262		lan78xx_write_reg(dev, MAF_LO(i),
1263				  pdata->pfilter_table[i][1]);
1264		lan78xx_write_reg(dev, MAF_HI(i),
1265				  pdata->pfilter_table[i][0]);
1266	}
1267
1268	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1269}
1270
1271static void lan78xx_set_multicast(struct net_device *netdev)
1272{
1273	struct lan78xx_net *dev = netdev_priv(netdev);
1274	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275	unsigned long flags;
1276	int i;
1277
1278	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1279
1280	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1281			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1282
1283	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1284		pdata->mchash_table[i] = 0;
1285
1286	/* pfilter_table[0] has own HW address */
1287	for (i = 1; i < NUM_OF_MAF; i++) {
1288		pdata->pfilter_table[i][0] = 0;
1289		pdata->pfilter_table[i][1] = 0;
1290	}
1291
1292	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1293
1294	if (dev->net->flags & IFF_PROMISC) {
1295		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1296		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1297	} else {
1298		if (dev->net->flags & IFF_ALLMULTI) {
1299			netif_dbg(dev, drv, dev->net,
1300				  "receive all multicast enabled");
1301			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1302		}
1303	}
1304
1305	if (netdev_mc_count(dev->net)) {
1306		struct netdev_hw_addr *ha;
1307		int i;
1308
1309		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1310
1311		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1312
1313		i = 1;
1314		netdev_for_each_mc_addr(ha, netdev) {
1315			/* set first 32 into Perfect Filter */
1316			if (i < 33) {
1317				lan78xx_set_addr_filter(pdata, i, ha->addr);
1318			} else {
1319				u32 bitnum = lan78xx_hash(ha->addr);
1320
1321				pdata->mchash_table[bitnum / 32] |=
1322							(1 << (bitnum % 32));
1323				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1324			}
1325			i++;
1326		}
1327	}
1328
1329	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1330
1331	/* defer register writes to a sleepable context */
1332	schedule_work(&pdata->set_multicast);
1333}
1334
1335static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1336				      u16 lcladv, u16 rmtadv)
1337{
1338	u32 flow = 0, fct_flow = 0;
1339	u8 cap;
1340
1341	if (dev->fc_autoneg)
1342		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1343	else
1344		cap = dev->fc_request_control;
1345
1346	if (cap & FLOW_CTRL_TX)
1347		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1348
1349	if (cap & FLOW_CTRL_RX)
1350		flow |= FLOW_CR_RX_FCEN_;
1351
1352	if (dev->udev->speed == USB_SPEED_SUPER)
1353		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1354	else if (dev->udev->speed == USB_SPEED_HIGH)
1355		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1356
1357	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1358		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1359		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1360
1361	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1362
1363	/* threshold value should be set before enabling flow */
1364	lan78xx_write_reg(dev, FLOW, flow);
1365
1366	return 0;
1367}
1368
1369static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1370
1371static int lan78xx_mac_reset(struct lan78xx_net *dev)
1372{
1373	unsigned long start_time = jiffies;
1374	u32 val;
1375	int ret;
1376
1377	mutex_lock(&dev->phy_mutex);
1378
1379	/* Resetting the device while there is activity on the MDIO
1380	 * bus can result in the MAC interface locking up and not
1381	 * completing register access transactions.
1382	 */
1383	ret = lan78xx_phy_wait_not_busy(dev);
1384	if (ret < 0)
1385		goto done;
1386
1387	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1388	if (ret < 0)
1389		goto done;
1390
1391	val |= MAC_CR_RST_;
1392	ret = lan78xx_write_reg(dev, MAC_CR, val);
1393	if (ret < 0)
1394		goto done;
1395
1396	/* Wait for the reset to complete before allowing any further
1397	 * MAC register accesses otherwise the MAC may lock up.
1398	 */
1399	do {
1400		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1401		if (ret < 0)
1402			goto done;
1403
1404		if (!(val & MAC_CR_RST_)) {
1405			ret = 0;
1406			goto done;
1407		}
1408	} while (!time_after(jiffies, start_time + HZ));
1409
1410	ret = -ETIMEDOUT;
1411done:
1412	mutex_unlock(&dev->phy_mutex);
1413
1414	return ret;
1415}
1416
1417static int lan78xx_link_reset(struct lan78xx_net *dev)
1418{
1419	struct phy_device *phydev = dev->net->phydev;
1420	struct ethtool_link_ksettings ecmd;
1421	int ladv, radv, ret, link;
1422	u32 buf;
1423
1424	/* clear LAN78xx interrupt status */
1425	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1426	if (unlikely(ret < 0))
1427		return ret;
1428
1429	mutex_lock(&phydev->lock);
1430	phy_read_status(phydev);
1431	link = phydev->link;
1432	mutex_unlock(&phydev->lock);
1433
1434	if (!link && dev->link_on) {
1435		dev->link_on = false;
1436
1437		/* reset MAC */
1438		ret = lan78xx_mac_reset(dev);
1439		if (ret < 0)
1440			return ret;
1441
1442		del_timer(&dev->stat_monitor);
1443	} else if (link && !dev->link_on) {
1444		dev->link_on = true;
1445
1446		phy_ethtool_ksettings_get(phydev, &ecmd);
1447
1448		if (dev->udev->speed == USB_SPEED_SUPER) {
1449			if (ecmd.base.speed == 1000) {
1450				/* disable U2 */
1451				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1452				if (ret < 0)
1453					return ret;
1454				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1455				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1456				if (ret < 0)
1457					return ret;
1458				/* enable U1 */
1459				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1460				if (ret < 0)
1461					return ret;
1462				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1463				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1464				if (ret < 0)
1465					return ret;
1466			} else {
1467				/* enable U1 & U2 */
1468				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469				if (ret < 0)
1470					return ret;
1471				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1472				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1473				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1474				if (ret < 0)
1475					return ret;
1476			}
1477		}
1478
1479		ladv = phy_read(phydev, MII_ADVERTISE);
1480		if (ladv < 0)
1481			return ladv;
1482
1483		radv = phy_read(phydev, MII_LPA);
1484		if (radv < 0)
1485			return radv;
1486
1487		netif_dbg(dev, link, dev->net,
1488			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1489			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1490
1491		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1492						 radv);
1493		if (ret < 0)
1494			return ret;
1495
1496		if (!timer_pending(&dev->stat_monitor)) {
1497			dev->delta = 1;
1498			mod_timer(&dev->stat_monitor,
1499				  jiffies + STAT_UPDATE_TIMER);
1500		}
1501
1502		lan78xx_rx_urb_submit_all(dev);
1503
1504		local_bh_disable();
1505		napi_schedule(&dev->napi);
1506		local_bh_enable();
1507	}
1508
1509	return 0;
1510}
1511
1512/* some work can't be done in tasklets, so we use keventd
1513 *
1514 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1515 * but tasklet_schedule() doesn't.	hope the failure is rare.
1516 */
1517static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1518{
1519	set_bit(work, &dev->flags);
1520	if (!schedule_delayed_work(&dev->wq, 0))
1521		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1522}
1523
1524static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1525{
1526	u32 intdata;
1527
1528	if (urb->actual_length != 4) {
1529		netdev_warn(dev->net,
1530			    "unexpected urb length %d", urb->actual_length);
1531		return;
1532	}
1533
1534	intdata = get_unaligned_le32(urb->transfer_buffer);
1535
1536	if (intdata & INT_ENP_PHY_INT) {
1537		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1538		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1539
1540		if (dev->domain_data.phyirq > 0)
1541			generic_handle_irq_safe(dev->domain_data.phyirq);
1542	} else {
1543		netdev_warn(dev->net,
1544			    "unexpected interrupt: 0x%08x\n", intdata);
1545	}
1546}
1547
1548static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1549{
1550	return MAX_EEPROM_SIZE;
1551}
1552
1553static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1554				      struct ethtool_eeprom *ee, u8 *data)
1555{
1556	struct lan78xx_net *dev = netdev_priv(netdev);
1557	int ret;
1558
1559	ret = usb_autopm_get_interface(dev->intf);
1560	if (ret)
1561		return ret;
1562
1563	ee->magic = LAN78XX_EEPROM_MAGIC;
1564
1565	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1566
1567	usb_autopm_put_interface(dev->intf);
1568
1569	return ret;
1570}
1571
1572static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1573				      struct ethtool_eeprom *ee, u8 *data)
1574{
1575	struct lan78xx_net *dev = netdev_priv(netdev);
1576	int ret;
1577
1578	ret = usb_autopm_get_interface(dev->intf);
1579	if (ret)
1580		return ret;
1581
1582	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1583	 * to load data from EEPROM
1584	 */
1585	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1586		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1587	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1588		 (ee->offset == 0) &&
1589		 (ee->len == 512) &&
1590		 (data[0] == OTP_INDICATOR_1))
1591		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1592
1593	usb_autopm_put_interface(dev->intf);
1594
1595	return ret;
1596}
1597
1598static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1599				u8 *data)
1600{
1601	if (stringset == ETH_SS_STATS)
1602		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1603}
1604
1605static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1606{
1607	if (sset == ETH_SS_STATS)
1608		return ARRAY_SIZE(lan78xx_gstrings);
1609	else
1610		return -EOPNOTSUPP;
1611}
1612
1613static void lan78xx_get_stats(struct net_device *netdev,
1614			      struct ethtool_stats *stats, u64 *data)
1615{
1616	struct lan78xx_net *dev = netdev_priv(netdev);
1617
1618	lan78xx_update_stats(dev);
1619
1620	mutex_lock(&dev->stats.access_lock);
1621	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1622	mutex_unlock(&dev->stats.access_lock);
1623}
1624
1625static void lan78xx_get_wol(struct net_device *netdev,
1626			    struct ethtool_wolinfo *wol)
1627{
1628	struct lan78xx_net *dev = netdev_priv(netdev);
1629	int ret;
1630	u32 buf;
1631	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1632
1633	if (usb_autopm_get_interface(dev->intf) < 0)
1634		return;
1635
1636	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1637	if (unlikely(ret < 0)) {
1638		wol->supported = 0;
1639		wol->wolopts = 0;
1640	} else {
1641		if (buf & USB_CFG_RMT_WKP_) {
1642			wol->supported = WAKE_ALL;
1643			wol->wolopts = pdata->wol;
1644		} else {
1645			wol->supported = 0;
1646			wol->wolopts = 0;
1647		}
1648	}
1649
1650	usb_autopm_put_interface(dev->intf);
1651}
1652
1653static int lan78xx_set_wol(struct net_device *netdev,
1654			   struct ethtool_wolinfo *wol)
1655{
1656	struct lan78xx_net *dev = netdev_priv(netdev);
1657	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1658	int ret;
1659
1660	ret = usb_autopm_get_interface(dev->intf);
1661	if (ret < 0)
1662		return ret;
1663
1664	if (wol->wolopts & ~WAKE_ALL)
1665		return -EINVAL;
1666
1667	pdata->wol = wol->wolopts;
1668
1669	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1670
1671	phy_ethtool_set_wol(netdev->phydev, wol);
1672
1673	usb_autopm_put_interface(dev->intf);
1674
1675	return ret;
1676}
1677
1678static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1679{
1680	struct lan78xx_net *dev = netdev_priv(net);
1681	struct phy_device *phydev = net->phydev;
1682	int ret;
1683	u32 buf;
1684
1685	ret = usb_autopm_get_interface(dev->intf);
1686	if (ret < 0)
1687		return ret;
1688
1689	ret = phy_ethtool_get_eee(phydev, edata);
1690	if (ret < 0)
1691		goto exit;
1692
1693	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1694	if (buf & MAC_CR_EEE_EN_) {
1695		edata->eee_enabled = true;
1696		edata->eee_active = !!(edata->advertised &
1697				       edata->lp_advertised);
1698		edata->tx_lpi_enabled = true;
1699		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1700		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1701		edata->tx_lpi_timer = buf;
1702	} else {
1703		edata->eee_enabled = false;
1704		edata->eee_active = false;
1705		edata->tx_lpi_enabled = false;
1706		edata->tx_lpi_timer = 0;
1707	}
1708
1709	ret = 0;
1710exit:
1711	usb_autopm_put_interface(dev->intf);
1712
1713	return ret;
1714}
1715
1716static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1717{
1718	struct lan78xx_net *dev = netdev_priv(net);
1719	int ret;
1720	u32 buf;
1721
1722	ret = usb_autopm_get_interface(dev->intf);
1723	if (ret < 0)
1724		return ret;
1725
1726	if (edata->eee_enabled) {
1727		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1728		buf |= MAC_CR_EEE_EN_;
1729		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1730
1731		phy_ethtool_set_eee(net->phydev, edata);
1732
1733		buf = (u32)edata->tx_lpi_timer;
1734		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1735	} else {
1736		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1737		buf &= ~MAC_CR_EEE_EN_;
1738		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1739	}
1740
1741	usb_autopm_put_interface(dev->intf);
1742
1743	return 0;
1744}
1745
1746static u32 lan78xx_get_link(struct net_device *net)
1747{
1748	u32 link;
1749
1750	mutex_lock(&net->phydev->lock);
1751	phy_read_status(net->phydev);
1752	link = net->phydev->link;
1753	mutex_unlock(&net->phydev->lock);
1754
1755	return link;
1756}
1757
1758static void lan78xx_get_drvinfo(struct net_device *net,
1759				struct ethtool_drvinfo *info)
1760{
1761	struct lan78xx_net *dev = netdev_priv(net);
1762
1763	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1764	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1765}
1766
1767static u32 lan78xx_get_msglevel(struct net_device *net)
1768{
1769	struct lan78xx_net *dev = netdev_priv(net);
1770
1771	return dev->msg_enable;
1772}
1773
1774static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1775{
1776	struct lan78xx_net *dev = netdev_priv(net);
1777
1778	dev->msg_enable = level;
1779}
1780
1781static int lan78xx_get_link_ksettings(struct net_device *net,
1782				      struct ethtool_link_ksettings *cmd)
1783{
1784	struct lan78xx_net *dev = netdev_priv(net);
1785	struct phy_device *phydev = net->phydev;
1786	int ret;
1787
1788	ret = usb_autopm_get_interface(dev->intf);
1789	if (ret < 0)
1790		return ret;
1791
1792	phy_ethtool_ksettings_get(phydev, cmd);
1793
1794	usb_autopm_put_interface(dev->intf);
1795
1796	return ret;
1797}
1798
1799static int lan78xx_set_link_ksettings(struct net_device *net,
1800				      const struct ethtool_link_ksettings *cmd)
1801{
1802	struct lan78xx_net *dev = netdev_priv(net);
1803	struct phy_device *phydev = net->phydev;
1804	int ret = 0;
1805	int temp;
1806
1807	ret = usb_autopm_get_interface(dev->intf);
1808	if (ret < 0)
1809		return ret;
1810
1811	/* change speed & duplex */
1812	ret = phy_ethtool_ksettings_set(phydev, cmd);
1813
1814	if (!cmd->base.autoneg) {
1815		/* force link down */
1816		temp = phy_read(phydev, MII_BMCR);
1817		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1818		mdelay(1);
1819		phy_write(phydev, MII_BMCR, temp);
1820	}
1821
1822	usb_autopm_put_interface(dev->intf);
1823
1824	return ret;
1825}
1826
1827static void lan78xx_get_pause(struct net_device *net,
1828			      struct ethtool_pauseparam *pause)
1829{
1830	struct lan78xx_net *dev = netdev_priv(net);
1831	struct phy_device *phydev = net->phydev;
1832	struct ethtool_link_ksettings ecmd;
1833
1834	phy_ethtool_ksettings_get(phydev, &ecmd);
1835
1836	pause->autoneg = dev->fc_autoneg;
1837
1838	if (dev->fc_request_control & FLOW_CTRL_TX)
1839		pause->tx_pause = 1;
1840
1841	if (dev->fc_request_control & FLOW_CTRL_RX)
1842		pause->rx_pause = 1;
1843}
1844
1845static int lan78xx_set_pause(struct net_device *net,
1846			     struct ethtool_pauseparam *pause)
1847{
1848	struct lan78xx_net *dev = netdev_priv(net);
1849	struct phy_device *phydev = net->phydev;
1850	struct ethtool_link_ksettings ecmd;
1851	int ret;
1852
1853	phy_ethtool_ksettings_get(phydev, &ecmd);
1854
1855	if (pause->autoneg && !ecmd.base.autoneg) {
1856		ret = -EINVAL;
1857		goto exit;
1858	}
1859
1860	dev->fc_request_control = 0;
1861	if (pause->rx_pause)
1862		dev->fc_request_control |= FLOW_CTRL_RX;
1863
1864	if (pause->tx_pause)
1865		dev->fc_request_control |= FLOW_CTRL_TX;
1866
1867	if (ecmd.base.autoneg) {
1868		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1869		u32 mii_adv;
1870
1871		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1872				   ecmd.link_modes.advertising);
1873		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1874				   ecmd.link_modes.advertising);
1875		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1876		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1877		linkmode_or(ecmd.link_modes.advertising, fc,
1878			    ecmd.link_modes.advertising);
1879
1880		phy_ethtool_ksettings_set(phydev, &ecmd);
1881	}
1882
1883	dev->fc_autoneg = pause->autoneg;
1884
1885	ret = 0;
1886exit:
1887	return ret;
1888}
1889
1890static int lan78xx_get_regs_len(struct net_device *netdev)
1891{
1892	if (!netdev->phydev)
1893		return (sizeof(lan78xx_regs));
1894	else
1895		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1896}
1897
1898static void
1899lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1900		 void *buf)
1901{
1902	u32 *data = buf;
1903	int i, j;
1904	struct lan78xx_net *dev = netdev_priv(netdev);
1905
1906	/* Read Device/MAC registers */
1907	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1908		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1909
1910	if (!netdev->phydev)
1911		return;
1912
1913	/* Read PHY registers */
1914	for (j = 0; j < 32; i++, j++)
1915		data[i] = phy_read(netdev->phydev, j);
1916}
1917
1918static const struct ethtool_ops lan78xx_ethtool_ops = {
1919	.get_link	= lan78xx_get_link,
1920	.nway_reset	= phy_ethtool_nway_reset,
1921	.get_drvinfo	= lan78xx_get_drvinfo,
1922	.get_msglevel	= lan78xx_get_msglevel,
1923	.set_msglevel	= lan78xx_set_msglevel,
1924	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1925	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1926	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1927	.get_ethtool_stats = lan78xx_get_stats,
1928	.get_sset_count = lan78xx_get_sset_count,
1929	.get_strings	= lan78xx_get_strings,
1930	.get_wol	= lan78xx_get_wol,
1931	.set_wol	= lan78xx_set_wol,
1932	.get_ts_info	= ethtool_op_get_ts_info,
1933	.get_eee	= lan78xx_get_eee,
1934	.set_eee	= lan78xx_set_eee,
1935	.get_pauseparam	= lan78xx_get_pause,
1936	.set_pauseparam	= lan78xx_set_pause,
1937	.get_link_ksettings = lan78xx_get_link_ksettings,
1938	.set_link_ksettings = lan78xx_set_link_ksettings,
1939	.get_regs_len	= lan78xx_get_regs_len,
1940	.get_regs	= lan78xx_get_regs,
1941};
1942
1943static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1944{
1945	u32 addr_lo, addr_hi;
1946	u8 addr[6];
1947
1948	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1949	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1950
1951	addr[0] = addr_lo & 0xFF;
1952	addr[1] = (addr_lo >> 8) & 0xFF;
1953	addr[2] = (addr_lo >> 16) & 0xFF;
1954	addr[3] = (addr_lo >> 24) & 0xFF;
1955	addr[4] = addr_hi & 0xFF;
1956	addr[5] = (addr_hi >> 8) & 0xFF;
1957
1958	if (!is_valid_ether_addr(addr)) {
1959		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1960			/* valid address present in Device Tree */
1961			netif_dbg(dev, ifup, dev->net,
1962				  "MAC address read from Device Tree");
1963		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1964						 ETH_ALEN, addr) == 0) ||
1965			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1966					      ETH_ALEN, addr) == 0)) &&
1967			   is_valid_ether_addr(addr)) {
1968			/* eeprom values are valid so use them */
1969			netif_dbg(dev, ifup, dev->net,
1970				  "MAC address read from EEPROM");
1971		} else {
1972			/* generate random MAC */
1973			eth_random_addr(addr);
1974			netif_dbg(dev, ifup, dev->net,
1975				  "MAC address set to random addr");
1976		}
1977
1978		addr_lo = addr[0] | (addr[1] << 8) |
1979			  (addr[2] << 16) | (addr[3] << 24);
1980		addr_hi = addr[4] | (addr[5] << 8);
1981
1982		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1983		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1984	}
1985
1986	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1987	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1988
1989	eth_hw_addr_set(dev->net, addr);
1990}
1991
1992/* MDIO read and write wrappers for phylib */
1993static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1994{
1995	struct lan78xx_net *dev = bus->priv;
1996	u32 val, addr;
1997	int ret;
1998
1999	ret = usb_autopm_get_interface(dev->intf);
2000	if (ret < 0)
2001		return ret;
2002
2003	mutex_lock(&dev->phy_mutex);
2004
2005	/* confirm MII not busy */
2006	ret = lan78xx_phy_wait_not_busy(dev);
2007	if (ret < 0)
2008		goto done;
2009
2010	/* set the address, index & direction (read from PHY) */
2011	addr = mii_access(phy_id, idx, MII_READ);
2012	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2013
2014	ret = lan78xx_phy_wait_not_busy(dev);
2015	if (ret < 0)
2016		goto done;
2017
2018	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2019
2020	ret = (int)(val & 0xFFFF);
2021
2022done:
2023	mutex_unlock(&dev->phy_mutex);
2024	usb_autopm_put_interface(dev->intf);
2025
2026	return ret;
2027}
2028
2029static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2030				 u16 regval)
2031{
2032	struct lan78xx_net *dev = bus->priv;
2033	u32 val, addr;
2034	int ret;
2035
2036	ret = usb_autopm_get_interface(dev->intf);
2037	if (ret < 0)
2038		return ret;
2039
2040	mutex_lock(&dev->phy_mutex);
2041
2042	/* confirm MII not busy */
2043	ret = lan78xx_phy_wait_not_busy(dev);
2044	if (ret < 0)
2045		goto done;
2046
2047	val = (u32)regval;
2048	ret = lan78xx_write_reg(dev, MII_DATA, val);
2049
2050	/* set the address, index & direction (write to PHY) */
2051	addr = mii_access(phy_id, idx, MII_WRITE);
2052	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2053
2054	ret = lan78xx_phy_wait_not_busy(dev);
2055	if (ret < 0)
2056		goto done;
2057
2058done:
2059	mutex_unlock(&dev->phy_mutex);
2060	usb_autopm_put_interface(dev->intf);
2061	return 0;
2062}
2063
2064static int lan78xx_mdio_init(struct lan78xx_net *dev)
2065{
2066	struct device_node *node;
2067	int ret;
2068
2069	dev->mdiobus = mdiobus_alloc();
2070	if (!dev->mdiobus) {
2071		netdev_err(dev->net, "can't allocate MDIO bus\n");
2072		return -ENOMEM;
2073	}
2074
2075	dev->mdiobus->priv = (void *)dev;
2076	dev->mdiobus->read = lan78xx_mdiobus_read;
2077	dev->mdiobus->write = lan78xx_mdiobus_write;
2078	dev->mdiobus->name = "lan78xx-mdiobus";
2079	dev->mdiobus->parent = &dev->udev->dev;
2080
2081	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2082		 dev->udev->bus->busnum, dev->udev->devnum);
2083
2084	switch (dev->chipid) {
2085	case ID_REV_CHIP_ID_7800_:
2086	case ID_REV_CHIP_ID_7850_:
2087		/* set to internal PHY id */
2088		dev->mdiobus->phy_mask = ~(1 << 1);
2089		break;
2090	case ID_REV_CHIP_ID_7801_:
2091		/* scan thru PHYAD[2..0] */
2092		dev->mdiobus->phy_mask = ~(0xFF);
2093		break;
2094	}
2095
2096	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2097	ret = of_mdiobus_register(dev->mdiobus, node);
2098	of_node_put(node);
2099	if (ret) {
2100		netdev_err(dev->net, "can't register MDIO bus\n");
2101		goto exit1;
2102	}
2103
2104	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2105	return 0;
2106exit1:
2107	mdiobus_free(dev->mdiobus);
2108	return ret;
2109}
2110
2111static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2112{
2113	mdiobus_unregister(dev->mdiobus);
2114	mdiobus_free(dev->mdiobus);
2115}
2116
2117static void lan78xx_link_status_change(struct net_device *net)
2118{
2119	struct phy_device *phydev = net->phydev;
2120
2121	phy_print_status(phydev);
2122}
2123
2124static int irq_map(struct irq_domain *d, unsigned int irq,
2125		   irq_hw_number_t hwirq)
2126{
2127	struct irq_domain_data *data = d->host_data;
2128
2129	irq_set_chip_data(irq, data);
2130	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2131	irq_set_noprobe(irq);
2132
2133	return 0;
2134}
2135
2136static void irq_unmap(struct irq_domain *d, unsigned int irq)
2137{
2138	irq_set_chip_and_handler(irq, NULL, NULL);
2139	irq_set_chip_data(irq, NULL);
2140}
2141
2142static const struct irq_domain_ops chip_domain_ops = {
2143	.map	= irq_map,
2144	.unmap	= irq_unmap,
2145};
2146
2147static void lan78xx_irq_mask(struct irq_data *irqd)
2148{
2149	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2150
2151	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2152}
2153
2154static void lan78xx_irq_unmask(struct irq_data *irqd)
2155{
2156	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2157
2158	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2159}
2160
2161static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2162{
2163	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2164
2165	mutex_lock(&data->irq_lock);
2166}
2167
2168static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2169{
2170	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2171	struct lan78xx_net *dev =
2172			container_of(data, struct lan78xx_net, domain_data);
2173	u32 buf;
2174
2175	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2176	 * are only two callbacks executed in non-atomic contex.
2177	 */
2178	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2179	if (buf != data->irqenable)
2180		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2181
2182	mutex_unlock(&data->irq_lock);
2183}
2184
2185static struct irq_chip lan78xx_irqchip = {
2186	.name			= "lan78xx-irqs",
2187	.irq_mask		= lan78xx_irq_mask,
2188	.irq_unmask		= lan78xx_irq_unmask,
2189	.irq_bus_lock		= lan78xx_irq_bus_lock,
2190	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2191};
2192
2193static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2194{
2195	struct device_node *of_node;
2196	struct irq_domain *irqdomain;
2197	unsigned int irqmap = 0;
2198	u32 buf;
2199	int ret = 0;
2200
2201	of_node = dev->udev->dev.parent->of_node;
2202
2203	mutex_init(&dev->domain_data.irq_lock);
2204
2205	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2206	dev->domain_data.irqenable = buf;
2207
2208	dev->domain_data.irqchip = &lan78xx_irqchip;
2209	dev->domain_data.irq_handler = handle_simple_irq;
2210
2211	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2212					  &chip_domain_ops, &dev->domain_data);
2213	if (irqdomain) {
2214		/* create mapping for PHY interrupt */
2215		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2216		if (!irqmap) {
2217			irq_domain_remove(irqdomain);
2218
2219			irqdomain = NULL;
2220			ret = -EINVAL;
2221		}
2222	} else {
2223		ret = -EINVAL;
2224	}
2225
2226	dev->domain_data.irqdomain = irqdomain;
2227	dev->domain_data.phyirq = irqmap;
2228
2229	return ret;
2230}
2231
2232static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2233{
2234	if (dev->domain_data.phyirq > 0) {
2235		irq_dispose_mapping(dev->domain_data.phyirq);
2236
2237		if (dev->domain_data.irqdomain)
2238			irq_domain_remove(dev->domain_data.irqdomain);
2239	}
2240	dev->domain_data.phyirq = 0;
2241	dev->domain_data.irqdomain = NULL;
2242}
2243
2244static int lan8835_fixup(struct phy_device *phydev)
2245{
2246	int buf;
2247	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2248
2249	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2250	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2251	buf &= ~0x1800;
2252	buf |= 0x0800;
2253	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2254
2255	/* RGMII MAC TXC Delay Enable */
2256	lan78xx_write_reg(dev, MAC_RGMII_ID,
2257			  MAC_RGMII_ID_TXC_DELAY_EN_);
2258
2259	/* RGMII TX DLL Tune Adjust */
2260	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2261
2262	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2263
2264	return 1;
2265}
2266
2267static int ksz9031rnx_fixup(struct phy_device *phydev)
2268{
2269	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2270
2271	/* Micrel9301RNX PHY configuration */
2272	/* RGMII Control Signal Pad Skew */
2273	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2274	/* RGMII RX Data Pad Skew */
2275	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2276	/* RGMII RX Clock Pad Skew */
2277	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2278
2279	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2280
2281	return 1;
2282}
2283
2284static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2285{
2286	u32 buf;
2287	int ret;
2288	struct fixed_phy_status fphy_status = {
2289		.link = 1,
2290		.speed = SPEED_1000,
2291		.duplex = DUPLEX_FULL,
2292	};
2293	struct phy_device *phydev;
2294
2295	phydev = phy_find_first(dev->mdiobus);
2296	if (!phydev) {
2297		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2298		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2299		if (IS_ERR(phydev)) {
2300			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2301			return NULL;
2302		}
2303		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2304		dev->interface = PHY_INTERFACE_MODE_RGMII;
2305		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2306					MAC_RGMII_ID_TXC_DELAY_EN_);
2307		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2308		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2309		buf |= HW_CFG_CLK125_EN_;
2310		buf |= HW_CFG_REFCLK25_EN_;
2311		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2312	} else {
2313		if (!phydev->drv) {
2314			netdev_err(dev->net, "no PHY driver found\n");
2315			return NULL;
2316		}
2317		dev->interface = PHY_INTERFACE_MODE_RGMII;
2318		/* external PHY fixup for KSZ9031RNX */
2319		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2320						 ksz9031rnx_fixup);
2321		if (ret < 0) {
2322			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2323			return NULL;
2324		}
2325		/* external PHY fixup for LAN8835 */
2326		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2327						 lan8835_fixup);
2328		if (ret < 0) {
2329			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2330			return NULL;
2331		}
2332		/* add more external PHY fixup here if needed */
2333
2334		phydev->is_internal = false;
2335	}
2336	return phydev;
2337}
2338
2339static int lan78xx_phy_init(struct lan78xx_net *dev)
2340{
2341	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2342	int ret;
2343	u32 mii_adv;
2344	struct phy_device *phydev;
2345
2346	switch (dev->chipid) {
2347	case ID_REV_CHIP_ID_7801_:
2348		phydev = lan7801_phy_init(dev);
2349		if (!phydev) {
2350			netdev_err(dev->net, "lan7801: PHY Init Failed");
2351			return -EIO;
2352		}
2353		break;
2354
2355	case ID_REV_CHIP_ID_7800_:
2356	case ID_REV_CHIP_ID_7850_:
2357		phydev = phy_find_first(dev->mdiobus);
2358		if (!phydev) {
2359			netdev_err(dev->net, "no PHY found\n");
2360			return -EIO;
2361		}
2362		phydev->is_internal = true;
2363		dev->interface = PHY_INTERFACE_MODE_GMII;
2364		break;
2365
2366	default:
2367		netdev_err(dev->net, "Unknown CHIP ID found\n");
2368		return -EIO;
2369	}
2370
2371	/* if phyirq is not set, use polling mode in phylib */
2372	if (dev->domain_data.phyirq > 0)
2373		phydev->irq = dev->domain_data.phyirq;
2374	else
2375		phydev->irq = PHY_POLL;
2376	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2377
2378	/* set to AUTOMDIX */
2379	phydev->mdix = ETH_TP_MDI_AUTO;
2380
2381	ret = phy_connect_direct(dev->net, phydev,
2382				 lan78xx_link_status_change,
2383				 dev->interface);
2384	if (ret) {
2385		netdev_err(dev->net, "can't attach PHY to %s\n",
2386			   dev->mdiobus->id);
2387		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2388			if (phy_is_pseudo_fixed_link(phydev)) {
2389				fixed_phy_unregister(phydev);
2390			} else {
2391				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2392							     0xfffffff0);
2393				phy_unregister_fixup_for_uid(PHY_LAN8835,
2394							     0xfffffff0);
2395			}
2396		}
2397		return -EIO;
2398	}
2399
2400	/* MAC doesn't support 1000T Half */
2401	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2402
2403	/* support both flow controls */
2404	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2405	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2406			   phydev->advertising);
2407	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2408			   phydev->advertising);
2409	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2410	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2411	linkmode_or(phydev->advertising, fc, phydev->advertising);
2412
2413	if (phydev->mdio.dev.of_node) {
2414		u32 reg;
2415		int len;
2416
2417		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2418						      "microchip,led-modes",
2419						      sizeof(u32));
2420		if (len >= 0) {
2421			/* Ensure the appropriate LEDs are enabled */
2422			lan78xx_read_reg(dev, HW_CFG, &reg);
2423			reg &= ~(HW_CFG_LED0_EN_ |
2424				 HW_CFG_LED1_EN_ |
2425				 HW_CFG_LED2_EN_ |
2426				 HW_CFG_LED3_EN_);
2427			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2428				(len > 1) * HW_CFG_LED1_EN_ |
2429				(len > 2) * HW_CFG_LED2_EN_ |
2430				(len > 3) * HW_CFG_LED3_EN_;
2431			lan78xx_write_reg(dev, HW_CFG, reg);
2432		}
2433	}
2434
2435	genphy_config_aneg(phydev);
2436
2437	dev->fc_autoneg = phydev->autoneg;
2438
2439	return 0;
2440}
2441
2442static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2443{
2444	u32 buf;
2445	bool rxenabled;
2446
2447	lan78xx_read_reg(dev, MAC_RX, &buf);
2448
2449	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2450
2451	if (rxenabled) {
2452		buf &= ~MAC_RX_RXEN_;
2453		lan78xx_write_reg(dev, MAC_RX, buf);
2454	}
2455
2456	/* add 4 to size for FCS */
2457	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2458	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2459
2460	lan78xx_write_reg(dev, MAC_RX, buf);
2461
2462	if (rxenabled) {
2463		buf |= MAC_RX_RXEN_;
2464		lan78xx_write_reg(dev, MAC_RX, buf);
2465	}
2466
2467	return 0;
2468}
2469
2470static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2471{
2472	struct sk_buff *skb;
2473	unsigned long flags;
2474	int count = 0;
2475
2476	spin_lock_irqsave(&q->lock, flags);
2477	while (!skb_queue_empty(q)) {
2478		struct skb_data	*entry;
2479		struct urb *urb;
2480		int ret;
2481
2482		skb_queue_walk(q, skb) {
2483			entry = (struct skb_data *)skb->cb;
2484			if (entry->state != unlink_start)
2485				goto found;
2486		}
2487		break;
2488found:
2489		entry->state = unlink_start;
2490		urb = entry->urb;
2491
2492		/* Get reference count of the URB to avoid it to be
2493		 * freed during usb_unlink_urb, which may trigger
2494		 * use-after-free problem inside usb_unlink_urb since
2495		 * usb_unlink_urb is always racing with .complete
2496		 * handler(include defer_bh).
2497		 */
2498		usb_get_urb(urb);
2499		spin_unlock_irqrestore(&q->lock, flags);
2500		/* during some PM-driven resume scenarios,
2501		 * these (async) unlinks complete immediately
2502		 */
2503		ret = usb_unlink_urb(urb);
2504		if (ret != -EINPROGRESS && ret != 0)
2505			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2506		else
2507			count++;
2508		usb_put_urb(urb);
2509		spin_lock_irqsave(&q->lock, flags);
2510	}
2511	spin_unlock_irqrestore(&q->lock, flags);
2512	return count;
2513}
2514
2515static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2516{
2517	struct lan78xx_net *dev = netdev_priv(netdev);
2518	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2519	int ret;
2520
2521	/* no second zero-length packet read wanted after mtu-sized packets */
2522	if ((max_frame_len % dev->maxpacket) == 0)
2523		return -EDOM;
2524
2525	ret = usb_autopm_get_interface(dev->intf);
2526	if (ret < 0)
2527		return ret;
2528
2529	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2530	if (!ret)
2531		netdev->mtu = new_mtu;
2532
2533	usb_autopm_put_interface(dev->intf);
2534
2535	return ret;
2536}
2537
2538static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2539{
2540	struct lan78xx_net *dev = netdev_priv(netdev);
2541	struct sockaddr *addr = p;
2542	u32 addr_lo, addr_hi;
2543
2544	if (netif_running(netdev))
2545		return -EBUSY;
2546
2547	if (!is_valid_ether_addr(addr->sa_data))
2548		return -EADDRNOTAVAIL;
2549
2550	eth_hw_addr_set(netdev, addr->sa_data);
2551
2552	addr_lo = netdev->dev_addr[0] |
2553		  netdev->dev_addr[1] << 8 |
2554		  netdev->dev_addr[2] << 16 |
2555		  netdev->dev_addr[3] << 24;
2556	addr_hi = netdev->dev_addr[4] |
2557		  netdev->dev_addr[5] << 8;
2558
2559	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2560	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2561
2562	/* Added to support MAC address changes */
2563	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2564	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2565
2566	return 0;
2567}
2568
2569/* Enable or disable Rx checksum offload engine */
2570static int lan78xx_set_features(struct net_device *netdev,
2571				netdev_features_t features)
2572{
2573	struct lan78xx_net *dev = netdev_priv(netdev);
2574	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2575	unsigned long flags;
2576
2577	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2578
2579	if (features & NETIF_F_RXCSUM) {
2580		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2581		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2582	} else {
2583		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2584		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2585	}
2586
2587	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2588		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2589	else
2590		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2591
2592	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2593		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2594	else
2595		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2596
2597	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2598
2599	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2600
2601	return 0;
2602}
2603
2604static void lan78xx_deferred_vlan_write(struct work_struct *param)
2605{
2606	struct lan78xx_priv *pdata =
2607			container_of(param, struct lan78xx_priv, set_vlan);
2608	struct lan78xx_net *dev = pdata->dev;
2609
2610	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2611			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2612}
2613
2614static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2615				   __be16 proto, u16 vid)
2616{
2617	struct lan78xx_net *dev = netdev_priv(netdev);
2618	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2619	u16 vid_bit_index;
2620	u16 vid_dword_index;
2621
2622	vid_dword_index = (vid >> 5) & 0x7F;
2623	vid_bit_index = vid & 0x1F;
2624
2625	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2626
2627	/* defer register writes to a sleepable context */
2628	schedule_work(&pdata->set_vlan);
2629
2630	return 0;
2631}
2632
2633static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2634				    __be16 proto, u16 vid)
2635{
2636	struct lan78xx_net *dev = netdev_priv(netdev);
2637	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2638	u16 vid_bit_index;
2639	u16 vid_dword_index;
2640
2641	vid_dword_index = (vid >> 5) & 0x7F;
2642	vid_bit_index = vid & 0x1F;
2643
2644	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2645
2646	/* defer register writes to a sleepable context */
2647	schedule_work(&pdata->set_vlan);
2648
2649	return 0;
2650}
2651
2652static void lan78xx_init_ltm(struct lan78xx_net *dev)
2653{
2654	int ret;
2655	u32 buf;
2656	u32 regs[6] = { 0 };
2657
2658	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2659	if (buf & USB_CFG1_LTM_ENABLE_) {
2660		u8 temp[2];
2661		/* Get values from EEPROM first */
2662		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2663			if (temp[0] == 24) {
2664				ret = lan78xx_read_raw_eeprom(dev,
2665							      temp[1] * 2,
2666							      24,
2667							      (u8 *)regs);
2668				if (ret < 0)
2669					return;
2670			}
2671		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2672			if (temp[0] == 24) {
2673				ret = lan78xx_read_raw_otp(dev,
2674							   temp[1] * 2,
2675							   24,
2676							   (u8 *)regs);
2677				if (ret < 0)
2678					return;
2679			}
2680		}
2681	}
2682
2683	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2684	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2685	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2686	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2687	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2688	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2689}
2690
2691static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2692{
2693	int result = 0;
2694
2695	switch (dev->udev->speed) {
2696	case USB_SPEED_SUPER:
2697		dev->rx_urb_size = RX_SS_URB_SIZE;
2698		dev->tx_urb_size = TX_SS_URB_SIZE;
2699		dev->n_rx_urbs = RX_SS_URB_NUM;
2700		dev->n_tx_urbs = TX_SS_URB_NUM;
2701		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2702		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2703		break;
2704	case USB_SPEED_HIGH:
2705		dev->rx_urb_size = RX_HS_URB_SIZE;
2706		dev->tx_urb_size = TX_HS_URB_SIZE;
2707		dev->n_rx_urbs = RX_HS_URB_NUM;
2708		dev->n_tx_urbs = TX_HS_URB_NUM;
2709		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2710		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2711		break;
2712	case USB_SPEED_FULL:
2713		dev->rx_urb_size = RX_FS_URB_SIZE;
2714		dev->tx_urb_size = TX_FS_URB_SIZE;
2715		dev->n_rx_urbs = RX_FS_URB_NUM;
2716		dev->n_tx_urbs = TX_FS_URB_NUM;
2717		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2718		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2719		break;
2720	default:
2721		netdev_warn(dev->net, "USB bus speed not supported\n");
2722		result = -EIO;
2723		break;
2724	}
2725
2726	return result;
2727}
2728
2729static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2730{
2731	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2732}
2733
2734static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2735			   u32 hw_disabled)
2736{
2737	unsigned long timeout;
2738	bool stopped = true;
2739	int ret;
2740	u32 buf;
2741
2742	/* Stop the h/w block (if not already stopped) */
2743
2744	ret = lan78xx_read_reg(dev, reg, &buf);
2745	if (ret < 0)
2746		return ret;
2747
2748	if (buf & hw_enabled) {
2749		buf &= ~hw_enabled;
2750
2751		ret = lan78xx_write_reg(dev, reg, buf);
2752		if (ret < 0)
2753			return ret;
2754
2755		stopped = false;
2756		timeout = jiffies + HW_DISABLE_TIMEOUT;
2757		do  {
2758			ret = lan78xx_read_reg(dev, reg, &buf);
2759			if (ret < 0)
2760				return ret;
2761
2762			if (buf & hw_disabled)
2763				stopped = true;
2764			else
2765				msleep(HW_DISABLE_DELAY_MS);
2766		} while (!stopped && !time_after(jiffies, timeout));
2767	}
2768
2769	ret = stopped ? 0 : -ETIME;
2770
2771	return ret;
2772}
2773
2774static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2775{
2776	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2777}
2778
2779static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2780{
2781	int ret;
2782
2783	netif_dbg(dev, drv, dev->net, "start tx path");
2784
2785	/* Start the MAC transmitter */
2786
2787	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2788	if (ret < 0)
2789		return ret;
2790
2791	/* Start the Tx FIFO */
2792
2793	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2794	if (ret < 0)
2795		return ret;
2796
2797	return 0;
2798}
2799
2800static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2801{
2802	int ret;
2803
2804	netif_dbg(dev, drv, dev->net, "stop tx path");
2805
2806	/* Stop the Tx FIFO */
2807
2808	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2809	if (ret < 0)
2810		return ret;
2811
2812	/* Stop the MAC transmitter */
2813
2814	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2815	if (ret < 0)
2816		return ret;
2817
2818	return 0;
2819}
2820
2821/* The caller must ensure the Tx path is stopped before calling
2822 * lan78xx_flush_tx_fifo().
2823 */
2824static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2825{
2826	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2827}
2828
2829static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2830{
2831	int ret;
2832
2833	netif_dbg(dev, drv, dev->net, "start rx path");
2834
2835	/* Start the Rx FIFO */
2836
2837	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2838	if (ret < 0)
2839		return ret;
2840
2841	/* Start the MAC receiver*/
2842
2843	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2844	if (ret < 0)
2845		return ret;
2846
2847	return 0;
2848}
2849
2850static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2851{
2852	int ret;
2853
2854	netif_dbg(dev, drv, dev->net, "stop rx path");
2855
2856	/* Stop the MAC receiver */
2857
2858	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2859	if (ret < 0)
2860		return ret;
2861
2862	/* Stop the Rx FIFO */
2863
2864	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2865	if (ret < 0)
2866		return ret;
2867
2868	return 0;
2869}
2870
2871/* The caller must ensure the Rx path is stopped before calling
2872 * lan78xx_flush_rx_fifo().
2873 */
2874static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2875{
2876	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2877}
2878
2879static int lan78xx_reset(struct lan78xx_net *dev)
2880{
2881	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2882	unsigned long timeout;
2883	int ret;
2884	u32 buf;
2885	u8 sig;
2886
2887	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2888	if (ret < 0)
2889		return ret;
2890
2891	buf |= HW_CFG_LRST_;
2892
2893	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2894	if (ret < 0)
2895		return ret;
2896
2897	timeout = jiffies + HZ;
2898	do {
2899		mdelay(1);
2900		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2901		if (ret < 0)
2902			return ret;
2903
2904		if (time_after(jiffies, timeout)) {
2905			netdev_warn(dev->net,
2906				    "timeout on completion of LiteReset");
2907			ret = -ETIMEDOUT;
2908			return ret;
2909		}
2910	} while (buf & HW_CFG_LRST_);
2911
2912	lan78xx_init_mac_address(dev);
2913
2914	/* save DEVID for later usage */
2915	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2916	if (ret < 0)
2917		return ret;
2918
2919	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2920	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2921
2922	/* Respond to the IN token with a NAK */
2923	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2924	if (ret < 0)
2925		return ret;
2926
2927	buf |= USB_CFG_BIR_;
2928
2929	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2930	if (ret < 0)
2931		return ret;
2932
2933	/* Init LTM */
2934	lan78xx_init_ltm(dev);
2935
2936	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2937	if (ret < 0)
2938		return ret;
2939
2940	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2941	if (ret < 0)
2942		return ret;
2943
2944	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2945	if (ret < 0)
2946		return ret;
2947
2948	buf |= HW_CFG_MEF_;
2949
2950	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2951	if (ret < 0)
2952		return ret;
2953
2954	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2955	if (ret < 0)
2956		return ret;
2957
2958	buf |= USB_CFG_BCE_;
2959
2960	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2961	if (ret < 0)
2962		return ret;
2963
2964	/* set FIFO sizes */
2965	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2966
2967	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2968	if (ret < 0)
2969		return ret;
2970
2971	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2972
2973	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2974	if (ret < 0)
2975		return ret;
2976
2977	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2978	if (ret < 0)
2979		return ret;
2980
2981	ret = lan78xx_write_reg(dev, FLOW, 0);
2982	if (ret < 0)
2983		return ret;
2984
2985	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2986	if (ret < 0)
2987		return ret;
2988
2989	/* Don't need rfe_ctl_lock during initialisation */
2990	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2991	if (ret < 0)
2992		return ret;
2993
2994	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2995
2996	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2997	if (ret < 0)
2998		return ret;
2999
3000	/* Enable or disable checksum offload engines */
3001	ret = lan78xx_set_features(dev->net, dev->net->features);
3002	if (ret < 0)
3003		return ret;
3004
3005	lan78xx_set_multicast(dev->net);
3006
3007	/* reset PHY */
3008	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3009	if (ret < 0)
3010		return ret;
3011
3012	buf |= PMT_CTL_PHY_RST_;
3013
3014	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3015	if (ret < 0)
3016		return ret;
3017
3018	timeout = jiffies + HZ;
3019	do {
3020		mdelay(1);
3021		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3022		if (ret < 0)
3023			return ret;
3024
3025		if (time_after(jiffies, timeout)) {
3026			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3027			ret = -ETIMEDOUT;
3028			return ret;
3029		}
3030	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3031
3032	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3033	if (ret < 0)
3034		return ret;
3035
3036	/* LAN7801 only has RGMII mode */
3037	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3038		buf &= ~MAC_CR_GMII_EN_;
3039
3040	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3041	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3042		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3043		if (!ret && sig != EEPROM_INDICATOR) {
3044			/* Implies there is no external eeprom. Set mac speed */
3045			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3046			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3047		}
3048	}
3049	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3050	if (ret < 0)
3051		return ret;
3052
3053	ret = lan78xx_set_rx_max_frame_length(dev,
3054					      RX_MAX_FRAME_LEN(dev->net->mtu));
3055
3056	return ret;
3057}
3058
3059static void lan78xx_init_stats(struct lan78xx_net *dev)
3060{
3061	u32 *p;
3062	int i;
3063
3064	/* initialize for stats update
3065	 * some counters are 20bits and some are 32bits
3066	 */
3067	p = (u32 *)&dev->stats.rollover_max;
3068	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3069		p[i] = 0xFFFFF;
3070
3071	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3072	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3073	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3074	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3075	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3076	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3077	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3078	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3079	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3080	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3081
3082	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3083}
3084
3085static int lan78xx_open(struct net_device *net)
3086{
3087	struct lan78xx_net *dev = netdev_priv(net);
3088	int ret;
3089
3090	netif_dbg(dev, ifup, dev->net, "open device");
3091
3092	ret = usb_autopm_get_interface(dev->intf);
3093	if (ret < 0)
3094		return ret;
3095
3096	mutex_lock(&dev->dev_mutex);
3097
3098	phy_start(net->phydev);
3099
3100	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3101
3102	/* for Link Check */
3103	if (dev->urb_intr) {
3104		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3105		if (ret < 0) {
3106			netif_err(dev, ifup, dev->net,
3107				  "intr submit %d\n", ret);
3108			goto done;
3109		}
3110	}
3111
3112	ret = lan78xx_flush_rx_fifo(dev);
3113	if (ret < 0)
3114		goto done;
3115	ret = lan78xx_flush_tx_fifo(dev);
3116	if (ret < 0)
3117		goto done;
3118
3119	ret = lan78xx_start_tx_path(dev);
3120	if (ret < 0)
3121		goto done;
3122	ret = lan78xx_start_rx_path(dev);
3123	if (ret < 0)
3124		goto done;
3125
3126	lan78xx_init_stats(dev);
3127
3128	set_bit(EVENT_DEV_OPEN, &dev->flags);
3129
3130	netif_start_queue(net);
3131
3132	dev->link_on = false;
3133
3134	napi_enable(&dev->napi);
3135
3136	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3137done:
3138	mutex_unlock(&dev->dev_mutex);
3139
3140	if (ret < 0)
3141		usb_autopm_put_interface(dev->intf);
3142
3143	return ret;
3144}
3145
3146static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3147{
3148	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3149	DECLARE_WAITQUEUE(wait, current);
3150	int temp;
3151
3152	/* ensure there are no more active urbs */
3153	add_wait_queue(&unlink_wakeup, &wait);
3154	set_current_state(TASK_UNINTERRUPTIBLE);
3155	dev->wait = &unlink_wakeup;
3156	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3157
3158	/* maybe wait for deletions to finish. */
3159	while (!skb_queue_empty(&dev->rxq) ||
3160	       !skb_queue_empty(&dev->txq)) {
3161		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3162		set_current_state(TASK_UNINTERRUPTIBLE);
3163		netif_dbg(dev, ifdown, dev->net,
3164			  "waited for %d urb completions", temp);
3165	}
3166	set_current_state(TASK_RUNNING);
3167	dev->wait = NULL;
3168	remove_wait_queue(&unlink_wakeup, &wait);
3169
3170	/* empty Rx done, Rx overflow and Tx pend queues
3171	 */
3172	while (!skb_queue_empty(&dev->rxq_done)) {
3173		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3174
3175		lan78xx_release_rx_buf(dev, skb);
3176	}
3177
3178	skb_queue_purge(&dev->rxq_overflow);
3179	skb_queue_purge(&dev->txq_pend);
3180}
3181
3182static int lan78xx_stop(struct net_device *net)
3183{
3184	struct lan78xx_net *dev = netdev_priv(net);
3185
3186	netif_dbg(dev, ifup, dev->net, "stop device");
3187
3188	mutex_lock(&dev->dev_mutex);
3189
3190	if (timer_pending(&dev->stat_monitor))
3191		del_timer_sync(&dev->stat_monitor);
3192
3193	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3194	netif_stop_queue(net);
3195	napi_disable(&dev->napi);
3196
3197	lan78xx_terminate_urbs(dev);
3198
3199	netif_info(dev, ifdown, dev->net,
3200		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3201		   net->stats.rx_packets, net->stats.tx_packets,
3202		   net->stats.rx_errors, net->stats.tx_errors);
3203
3204	/* ignore errors that occur stopping the Tx and Rx data paths */
3205	lan78xx_stop_tx_path(dev);
3206	lan78xx_stop_rx_path(dev);
3207
3208	if (net->phydev)
3209		phy_stop(net->phydev);
3210
3211	usb_kill_urb(dev->urb_intr);
3212
3213	/* deferred work (task, timer, softirq) must also stop.
3214	 * can't flush_scheduled_work() until we drop rtnl (later),
3215	 * else workers could deadlock; so make workers a NOP.
3216	 */
3217	clear_bit(EVENT_TX_HALT, &dev->flags);
3218	clear_bit(EVENT_RX_HALT, &dev->flags);
3219	clear_bit(EVENT_LINK_RESET, &dev->flags);
3220	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3221
3222	cancel_delayed_work_sync(&dev->wq);
3223
3224	usb_autopm_put_interface(dev->intf);
3225
3226	mutex_unlock(&dev->dev_mutex);
3227
3228	return 0;
3229}
3230
3231static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3232			       struct sk_buff_head *list, enum skb_state state)
3233{
3234	unsigned long flags;
3235	enum skb_state old_state;
3236	struct skb_data *entry = (struct skb_data *)skb->cb;
3237
3238	spin_lock_irqsave(&list->lock, flags);
3239	old_state = entry->state;
3240	entry->state = state;
3241
3242	__skb_unlink(skb, list);
3243	spin_unlock(&list->lock);
3244	spin_lock(&dev->rxq_done.lock);
3245
3246	__skb_queue_tail(&dev->rxq_done, skb);
3247	if (skb_queue_len(&dev->rxq_done) == 1)
3248		napi_schedule(&dev->napi);
3249
3250	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3251
3252	return old_state;
3253}
3254
3255static void tx_complete(struct urb *urb)
3256{
3257	struct sk_buff *skb = (struct sk_buff *)urb->context;
3258	struct skb_data *entry = (struct skb_data *)skb->cb;
3259	struct lan78xx_net *dev = entry->dev;
3260
3261	if (urb->status == 0) {
3262		dev->net->stats.tx_packets += entry->num_of_packet;
3263		dev->net->stats.tx_bytes += entry->length;
3264	} else {
3265		dev->net->stats.tx_errors += entry->num_of_packet;
3266
3267		switch (urb->status) {
3268		case -EPIPE:
3269			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3270			break;
3271
3272		/* software-driven interface shutdown */
3273		case -ECONNRESET:
3274		case -ESHUTDOWN:
3275			netif_dbg(dev, tx_err, dev->net,
3276				  "tx err interface gone %d\n",
3277				  entry->urb->status);
3278			break;
3279
3280		case -EPROTO:
3281		case -ETIME:
3282		case -EILSEQ:
3283			netif_stop_queue(dev->net);
3284			netif_dbg(dev, tx_err, dev->net,
3285				  "tx err queue stopped %d\n",
3286				  entry->urb->status);
3287			break;
3288		default:
3289			netif_dbg(dev, tx_err, dev->net,
3290				  "unknown tx err %d\n",
3291				  entry->urb->status);
3292			break;
3293		}
3294	}
3295
3296	usb_autopm_put_interface_async(dev->intf);
3297
3298	skb_unlink(skb, &dev->txq);
3299
3300	lan78xx_release_tx_buf(dev, skb);
3301
3302	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3303	 */
3304	if (skb_queue_empty(&dev->txq) &&
3305	    !skb_queue_empty(&dev->txq_pend))
3306		napi_schedule(&dev->napi);
3307}
3308
3309static void lan78xx_queue_skb(struct sk_buff_head *list,
3310			      struct sk_buff *newsk, enum skb_state state)
3311{
3312	struct skb_data *entry = (struct skb_data *)newsk->cb;
3313
3314	__skb_queue_tail(list, newsk);
3315	entry->state = state;
3316}
3317
3318static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3319{
3320	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3321}
3322
3323static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3324{
3325	return dev->tx_pend_data_len;
3326}
3327
3328static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3329				    struct sk_buff *skb,
3330				    unsigned int *tx_pend_data_len)
3331{
3332	unsigned long flags;
3333
3334	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3335
3336	__skb_queue_tail(&dev->txq_pend, skb);
3337
3338	dev->tx_pend_data_len += skb->len;
3339	*tx_pend_data_len = dev->tx_pend_data_len;
3340
3341	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3342}
3343
3344static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3345					 struct sk_buff *skb,
3346					 unsigned int *tx_pend_data_len)
3347{
3348	unsigned long flags;
3349
3350	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3351
3352	__skb_queue_head(&dev->txq_pend, skb);
3353
3354	dev->tx_pend_data_len += skb->len;
3355	*tx_pend_data_len = dev->tx_pend_data_len;
3356
3357	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3358}
3359
3360static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3361				    struct sk_buff **skb,
3362				    unsigned int *tx_pend_data_len)
3363{
3364	unsigned long flags;
3365
3366	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3367
3368	*skb = __skb_dequeue(&dev->txq_pend);
3369	if (*skb)
3370		dev->tx_pend_data_len -= (*skb)->len;
3371	*tx_pend_data_len = dev->tx_pend_data_len;
3372
3373	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3374}
3375
3376static netdev_tx_t
3377lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3378{
3379	struct lan78xx_net *dev = netdev_priv(net);
3380	unsigned int tx_pend_data_len;
3381
3382	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3383		schedule_delayed_work(&dev->wq, 0);
3384
3385	skb_tx_timestamp(skb);
3386
3387	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3388
3389	/* Set up a Tx URB if none is in progress */
3390
3391	if (skb_queue_empty(&dev->txq))
3392		napi_schedule(&dev->napi);
3393
3394	/* Stop stack Tx queue if we have enough data to fill
3395	 * all the free Tx URBs.
3396	 */
3397	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3398		netif_stop_queue(net);
3399
3400		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3401			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3402
3403		/* Kick off transmission of pending data */
3404
3405		if (!skb_queue_empty(&dev->txq_free))
3406			napi_schedule(&dev->napi);
3407	}
3408
3409	return NETDEV_TX_OK;
3410}
3411
3412static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3413{
3414	struct lan78xx_priv *pdata = NULL;
3415	int ret;
3416	int i;
3417
3418	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3419
3420	pdata = (struct lan78xx_priv *)(dev->data[0]);
3421	if (!pdata) {
3422		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3423		return -ENOMEM;
3424	}
3425
3426	pdata->dev = dev;
3427
3428	spin_lock_init(&pdata->rfe_ctl_lock);
3429	mutex_init(&pdata->dataport_mutex);
3430
3431	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3432
3433	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3434		pdata->vlan_table[i] = 0;
3435
3436	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3437
3438	dev->net->features = 0;
3439
3440	if (DEFAULT_TX_CSUM_ENABLE)
3441		dev->net->features |= NETIF_F_HW_CSUM;
3442
3443	if (DEFAULT_RX_CSUM_ENABLE)
3444		dev->net->features |= NETIF_F_RXCSUM;
3445
3446	if (DEFAULT_TSO_CSUM_ENABLE)
3447		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3448
3449	if (DEFAULT_VLAN_RX_OFFLOAD)
3450		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3451
3452	if (DEFAULT_VLAN_FILTER_ENABLE)
3453		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3454
3455	dev->net->hw_features = dev->net->features;
3456
3457	ret = lan78xx_setup_irq_domain(dev);
3458	if (ret < 0) {
3459		netdev_warn(dev->net,
3460			    "lan78xx_setup_irq_domain() failed : %d", ret);
3461		goto out1;
3462	}
3463
3464	/* Init all registers */
3465	ret = lan78xx_reset(dev);
3466	if (ret) {
3467		netdev_warn(dev->net, "Registers INIT FAILED....");
3468		goto out2;
3469	}
3470
3471	ret = lan78xx_mdio_init(dev);
3472	if (ret) {
3473		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3474		goto out2;
3475	}
3476
3477	dev->net->flags |= IFF_MULTICAST;
3478
3479	pdata->wol = WAKE_MAGIC;
3480
3481	return ret;
3482
3483out2:
3484	lan78xx_remove_irq_domain(dev);
3485
3486out1:
3487	netdev_warn(dev->net, "Bind routine FAILED");
3488	cancel_work_sync(&pdata->set_multicast);
3489	cancel_work_sync(&pdata->set_vlan);
3490	kfree(pdata);
3491	return ret;
3492}
3493
3494static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3495{
3496	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3497
3498	lan78xx_remove_irq_domain(dev);
3499
3500	lan78xx_remove_mdio(dev);
3501
3502	if (pdata) {
3503		cancel_work_sync(&pdata->set_multicast);
3504		cancel_work_sync(&pdata->set_vlan);
3505		netif_dbg(dev, ifdown, dev->net, "free pdata");
3506		kfree(pdata);
3507		pdata = NULL;
3508		dev->data[0] = 0;
3509	}
3510}
3511
3512static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3513				    struct sk_buff *skb,
3514				    u32 rx_cmd_a, u32 rx_cmd_b)
3515{
3516	/* HW Checksum offload appears to be flawed if used when not stripping
3517	 * VLAN headers. Drop back to S/W checksums under these conditions.
3518	 */
3519	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3520	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3521	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3522	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3523		skb->ip_summed = CHECKSUM_NONE;
3524	} else {
3525		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3526		skb->ip_summed = CHECKSUM_COMPLETE;
3527	}
3528}
3529
3530static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3531				    struct sk_buff *skb,
3532				    u32 rx_cmd_a, u32 rx_cmd_b)
3533{
3534	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3535	    (rx_cmd_a & RX_CMD_A_FVTG_))
3536		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3537				       (rx_cmd_b & 0xffff));
3538}
3539
3540static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3541{
3542	dev->net->stats.rx_packets++;
3543	dev->net->stats.rx_bytes += skb->len;
3544
3545	skb->protocol = eth_type_trans(skb, dev->net);
3546
3547	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3548		  skb->len + sizeof(struct ethhdr), skb->protocol);
3549	memset(skb->cb, 0, sizeof(struct skb_data));
3550
3551	if (skb_defer_rx_timestamp(skb))
3552		return;
3553
3554	napi_gro_receive(&dev->napi, skb);
3555}
3556
3557static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3558		      int budget, int *work_done)
3559{
3560	if (skb->len < RX_SKB_MIN_LEN)
3561		return 0;
3562
3563	/* Extract frames from the URB buffer and pass each one to
3564	 * the stack in a new NAPI SKB.
3565	 */
3566	while (skb->len > 0) {
3567		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3568		u16 rx_cmd_c;
3569		unsigned char *packet;
3570
3571		rx_cmd_a = get_unaligned_le32(skb->data);
3572		skb_pull(skb, sizeof(rx_cmd_a));
3573
3574		rx_cmd_b = get_unaligned_le32(skb->data);
3575		skb_pull(skb, sizeof(rx_cmd_b));
3576
3577		rx_cmd_c = get_unaligned_le16(skb->data);
3578		skb_pull(skb, sizeof(rx_cmd_c));
3579
3580		packet = skb->data;
3581
3582		/* get the packet length */
3583		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3584		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3585
3586		if (unlikely(size > skb->len)) {
3587			netif_dbg(dev, rx_err, dev->net,
3588				  "size err rx_cmd_a=0x%08x\n",
3589				  rx_cmd_a);
3590			return 0;
3591		}
3592
3593		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3594			netif_dbg(dev, rx_err, dev->net,
3595				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3596		} else {
3597			u32 frame_len;
3598			struct sk_buff *skb2;
3599
3600			if (unlikely(size < ETH_FCS_LEN)) {
3601				netif_dbg(dev, rx_err, dev->net,
3602					  "size err rx_cmd_a=0x%08x\n",
3603					  rx_cmd_a);
3604				return 0;
3605			}
3606
3607			frame_len = size - ETH_FCS_LEN;
3608
3609			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3610			if (!skb2)
3611				return 0;
3612
3613			memcpy(skb2->data, packet, frame_len);
3614
3615			skb_put(skb2, frame_len);
3616
3617			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3618			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3619
3620			/* Processing of the URB buffer must complete once
3621			 * it has started. If the NAPI work budget is exhausted
3622			 * while frames remain they are added to the overflow
3623			 * queue for delivery in the next NAPI polling cycle.
3624			 */
3625			if (*work_done < budget) {
3626				lan78xx_skb_return(dev, skb2);
3627				++(*work_done);
3628			} else {
3629				skb_queue_tail(&dev->rxq_overflow, skb2);
3630			}
3631		}
3632
3633		skb_pull(skb, size);
3634
3635		/* skip padding bytes before the next frame starts */
3636		if (skb->len)
3637			skb_pull(skb, align_count);
3638	}
3639
3640	return 1;
3641}
3642
3643static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3644			      int budget, int *work_done)
3645{
3646	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3647		netif_dbg(dev, rx_err, dev->net, "drop\n");
3648		dev->net->stats.rx_errors++;
3649	}
3650}
3651
3652static void rx_complete(struct urb *urb)
3653{
3654	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3655	struct skb_data	*entry = (struct skb_data *)skb->cb;
3656	struct lan78xx_net *dev = entry->dev;
3657	int urb_status = urb->status;
3658	enum skb_state state;
3659
3660	netif_dbg(dev, rx_status, dev->net,
3661		  "rx done: status %d", urb->status);
3662
3663	skb_put(skb, urb->actual_length);
3664	state = rx_done;
3665
3666	if (urb != entry->urb)
3667		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3668
3669	switch (urb_status) {
3670	case 0:
3671		if (skb->len < RX_SKB_MIN_LEN) {
3672			state = rx_cleanup;
3673			dev->net->stats.rx_errors++;
3674			dev->net->stats.rx_length_errors++;
3675			netif_dbg(dev, rx_err, dev->net,
3676				  "rx length %d\n", skb->len);
3677		}
3678		usb_mark_last_busy(dev->udev);
3679		break;
3680	case -EPIPE:
3681		dev->net->stats.rx_errors++;
3682		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3683		fallthrough;
3684	case -ECONNRESET:				/* async unlink */
3685	case -ESHUTDOWN:				/* hardware gone */
3686		netif_dbg(dev, ifdown, dev->net,
3687			  "rx shutdown, code %d\n", urb_status);
3688		state = rx_cleanup;
3689		break;
3690	case -EPROTO:
3691	case -ETIME:
3692	case -EILSEQ:
3693		dev->net->stats.rx_errors++;
3694		state = rx_cleanup;
3695		break;
3696
3697	/* data overrun ... flush fifo? */
3698	case -EOVERFLOW:
3699		dev->net->stats.rx_over_errors++;
3700		fallthrough;
3701
3702	default:
3703		state = rx_cleanup;
3704		dev->net->stats.rx_errors++;
3705		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3706		break;
3707	}
3708
3709	state = defer_bh(dev, skb, &dev->rxq, state);
3710}
3711
3712static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3713{
3714	struct skb_data	*entry = (struct skb_data *)skb->cb;
3715	size_t size = dev->rx_urb_size;
3716	struct urb *urb = entry->urb;
3717	unsigned long lockflags;
3718	int ret = 0;
3719
3720	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3721			  skb->data, size, rx_complete, skb);
3722
3723	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3724
3725	if (netif_device_present(dev->net) &&
3726	    netif_running(dev->net) &&
3727	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3728	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3729		ret = usb_submit_urb(urb, flags);
3730		switch (ret) {
3731		case 0:
3732			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3733			break;
3734		case -EPIPE:
3735			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3736			break;
3737		case -ENODEV:
3738		case -ENOENT:
3739			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3740			netif_device_detach(dev->net);
3741			break;
3742		case -EHOSTUNREACH:
3743			ret = -ENOLINK;
3744			napi_schedule(&dev->napi);
3745			break;
3746		default:
3747			netif_dbg(dev, rx_err, dev->net,
3748				  "rx submit, %d\n", ret);
3749			napi_schedule(&dev->napi);
3750			break;
3751		}
3752	} else {
3753		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3754		ret = -ENOLINK;
3755	}
3756	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3757
3758	if (ret)
3759		lan78xx_release_rx_buf(dev, skb);
3760
3761	return ret;
3762}
3763
3764static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3765{
3766	struct sk_buff *rx_buf;
3767
3768	/* Ensure the maximum number of Rx URBs is submitted
3769	 */
3770	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3771		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3772			break;
3773	}
3774}
3775
3776static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3777				    struct sk_buff *rx_buf)
3778{
3779	/* reset SKB data pointers */
3780
3781	rx_buf->data = rx_buf->head;
3782	skb_reset_tail_pointer(rx_buf);
3783	rx_buf->len = 0;
3784	rx_buf->data_len = 0;
3785
3786	rx_submit(dev, rx_buf, GFP_ATOMIC);
3787}
3788
3789static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3790{
3791	u32 tx_cmd_a;
3792	u32 tx_cmd_b;
3793
3794	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3795
3796	if (skb->ip_summed == CHECKSUM_PARTIAL)
3797		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3798
3799	tx_cmd_b = 0;
3800	if (skb_is_gso(skb)) {
3801		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3802
3803		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3804
3805		tx_cmd_a |= TX_CMD_A_LSO_;
3806	}
3807
3808	if (skb_vlan_tag_present(skb)) {
3809		tx_cmd_a |= TX_CMD_A_IVTG_;
3810		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3811	}
3812
3813	put_unaligned_le32(tx_cmd_a, buffer);
3814	put_unaligned_le32(tx_cmd_b, buffer + 4);
3815}
3816
3817static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3818					    struct sk_buff *tx_buf)
3819{
3820	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3821	int remain = dev->tx_urb_size;
3822	u8 *tx_data = tx_buf->data;
3823	u32 urb_len = 0;
3824
3825	entry->num_of_packet = 0;
3826	entry->length = 0;
3827
3828	/* Work through the pending SKBs and copy the data of each SKB into
3829	 * the URB buffer if there room for all the SKB data.
3830	 *
3831	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3832	 */
3833	while (remain >= TX_SKB_MIN_LEN) {
3834		unsigned int pending_bytes;
3835		unsigned int align_bytes;
3836		struct sk_buff *skb;
3837		unsigned int len;
3838
3839		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3840
3841		if (!skb)
3842			break;
3843
3844		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3845			      TX_ALIGNMENT;
3846		len = align_bytes + TX_CMD_LEN + skb->len;
3847		if (len > remain) {
3848			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3849			break;
3850		}
3851
3852		tx_data += align_bytes;
3853
3854		lan78xx_fill_tx_cmd_words(skb, tx_data);
3855		tx_data += TX_CMD_LEN;
3856
3857		len = skb->len;
3858		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3859			struct net_device_stats *stats = &dev->net->stats;
3860
3861			stats->tx_dropped++;
3862			dev_kfree_skb_any(skb);
3863			tx_data -= TX_CMD_LEN;
3864			continue;
3865		}
3866
3867		tx_data += len;
3868		entry->length += len;
3869		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3870
3871		dev_kfree_skb_any(skb);
3872
3873		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3874
3875		remain = dev->tx_urb_size - urb_len;
3876	}
3877
3878	skb_put(tx_buf, urb_len);
3879
3880	return entry;
3881}
3882
3883static void lan78xx_tx_bh(struct lan78xx_net *dev)
3884{
3885	int ret;
3886
3887	/* Start the stack Tx queue if it was stopped
3888	 */
3889	netif_tx_lock(dev->net);
3890	if (netif_queue_stopped(dev->net)) {
3891		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3892			netif_wake_queue(dev->net);
3893	}
3894	netif_tx_unlock(dev->net);
3895
3896	/* Go through the Tx pending queue and set up URBs to transfer
3897	 * the data to the device. Stop if no more pending data or URBs,
3898	 * or if an error occurs when a URB is submitted.
3899	 */
3900	do {
3901		struct skb_data *entry;
3902		struct sk_buff *tx_buf;
3903		unsigned long flags;
3904
3905		if (skb_queue_empty(&dev->txq_pend))
3906			break;
3907
3908		tx_buf = lan78xx_get_tx_buf(dev);
3909		if (!tx_buf)
3910			break;
3911
3912		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3913
3914		spin_lock_irqsave(&dev->txq.lock, flags);
3915		ret = usb_autopm_get_interface_async(dev->intf);
3916		if (ret < 0) {
3917			spin_unlock_irqrestore(&dev->txq.lock, flags);
3918			goto out;
3919		}
3920
3921		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3922				  tx_buf->data, tx_buf->len, tx_complete,
3923				  tx_buf);
3924
3925		if (tx_buf->len % dev->maxpacket == 0) {
3926			/* send USB_ZERO_PACKET */
3927			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3928		}
3929
3930#ifdef CONFIG_PM
3931		/* if device is asleep stop outgoing packet processing */
3932		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3933			usb_anchor_urb(entry->urb, &dev->deferred);
3934			netif_stop_queue(dev->net);
3935			spin_unlock_irqrestore(&dev->txq.lock, flags);
3936			netdev_dbg(dev->net,
3937				   "Delaying transmission for resumption\n");
3938			return;
3939		}
3940#endif
3941		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3942		switch (ret) {
3943		case 0:
3944			netif_trans_update(dev->net);
3945			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3946			break;
3947		case -EPIPE:
3948			netif_stop_queue(dev->net);
3949			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3950			usb_autopm_put_interface_async(dev->intf);
3951			break;
3952		case -ENODEV:
3953		case -ENOENT:
3954			netif_dbg(dev, tx_err, dev->net,
3955				  "tx submit urb err %d (disconnected?)", ret);
3956			netif_device_detach(dev->net);
3957			break;
3958		default:
3959			usb_autopm_put_interface_async(dev->intf);
3960			netif_dbg(dev, tx_err, dev->net,
3961				  "tx submit urb err %d\n", ret);
3962			break;
3963		}
3964
3965		spin_unlock_irqrestore(&dev->txq.lock, flags);
3966
3967		if (ret) {
3968			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3969out:
3970			dev->net->stats.tx_dropped += entry->num_of_packet;
3971			lan78xx_release_tx_buf(dev, tx_buf);
3972		}
3973	} while (ret == 0);
3974}
3975
3976static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3977{
3978	struct sk_buff_head done;
3979	struct sk_buff *rx_buf;
3980	struct skb_data *entry;
3981	unsigned long flags;
3982	int work_done = 0;
3983
3984	/* Pass frames received in the last NAPI cycle before
3985	 * working on newly completed URBs.
3986	 */
3987	while (!skb_queue_empty(&dev->rxq_overflow)) {
3988		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3989		++work_done;
3990	}
3991
3992	/* Take a snapshot of the done queue and move items to a
3993	 * temporary queue. Rx URB completions will continue to add
3994	 * to the done queue.
3995	 */
3996	__skb_queue_head_init(&done);
3997
3998	spin_lock_irqsave(&dev->rxq_done.lock, flags);
3999	skb_queue_splice_init(&dev->rxq_done, &done);
4000	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4001
4002	/* Extract receive frames from completed URBs and
4003	 * pass them to the stack. Re-submit each completed URB.
4004	 */
4005	while ((work_done < budget) &&
4006	       (rx_buf = __skb_dequeue(&done))) {
4007		entry = (struct skb_data *)(rx_buf->cb);
4008		switch (entry->state) {
4009		case rx_done:
4010			rx_process(dev, rx_buf, budget, &work_done);
4011			break;
4012		case rx_cleanup:
4013			break;
4014		default:
4015			netdev_dbg(dev->net, "rx buf state %d\n",
4016				   entry->state);
4017			break;
4018		}
4019
4020		lan78xx_rx_urb_resubmit(dev, rx_buf);
4021	}
4022
4023	/* If budget was consumed before processing all the URBs put them
4024	 * back on the front of the done queue. They will be first to be
4025	 * processed in the next NAPI cycle.
4026	 */
4027	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4028	skb_queue_splice(&done, &dev->rxq_done);
4029	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4030
4031	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4032		/* reset update timer delta */
4033		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4034			dev->delta = 1;
4035			mod_timer(&dev->stat_monitor,
4036				  jiffies + STAT_UPDATE_TIMER);
4037		}
4038
4039		/* Submit all free Rx URBs */
4040
4041		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4042			lan78xx_rx_urb_submit_all(dev);
4043
4044		/* Submit new Tx URBs */
4045
4046		lan78xx_tx_bh(dev);
4047	}
4048
4049	return work_done;
4050}
4051
4052static int lan78xx_poll(struct napi_struct *napi, int budget)
4053{
4054	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4055	int result = budget;
4056	int work_done;
4057
4058	/* Don't do any work if the device is suspended */
4059
4060	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4061		napi_complete_done(napi, 0);
4062		return 0;
4063	}
4064
4065	/* Process completed URBs and submit new URBs */
4066
4067	work_done = lan78xx_bh(dev, budget);
4068
4069	if (work_done < budget) {
4070		napi_complete_done(napi, work_done);
4071
4072		/* Start a new polling cycle if data was received or
4073		 * data is waiting to be transmitted.
4074		 */
4075		if (!skb_queue_empty(&dev->rxq_done)) {
4076			napi_schedule(napi);
4077		} else if (netif_carrier_ok(dev->net)) {
4078			if (skb_queue_empty(&dev->txq) &&
4079			    !skb_queue_empty(&dev->txq_pend)) {
4080				napi_schedule(napi);
4081			} else {
4082				netif_tx_lock(dev->net);
4083				if (netif_queue_stopped(dev->net)) {
4084					netif_wake_queue(dev->net);
4085					napi_schedule(napi);
4086				}
4087				netif_tx_unlock(dev->net);
4088			}
4089		}
4090		result = work_done;
4091	}
4092
4093	return result;
4094}
4095
4096static void lan78xx_delayedwork(struct work_struct *work)
4097{
4098	int status;
4099	struct lan78xx_net *dev;
4100
4101	dev = container_of(work, struct lan78xx_net, wq.work);
4102
4103	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4104		return;
4105
4106	if (usb_autopm_get_interface(dev->intf) < 0)
4107		return;
4108
4109	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4110		unlink_urbs(dev, &dev->txq);
4111
4112		status = usb_clear_halt(dev->udev, dev->pipe_out);
4113		if (status < 0 &&
4114		    status != -EPIPE &&
4115		    status != -ESHUTDOWN) {
4116			if (netif_msg_tx_err(dev))
4117				netdev_err(dev->net,
4118					   "can't clear tx halt, status %d\n",
4119					   status);
4120		} else {
4121			clear_bit(EVENT_TX_HALT, &dev->flags);
4122			if (status != -ESHUTDOWN)
4123				netif_wake_queue(dev->net);
4124		}
4125	}
4126
4127	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4128		unlink_urbs(dev, &dev->rxq);
4129		status = usb_clear_halt(dev->udev, dev->pipe_in);
4130		if (status < 0 &&
4131		    status != -EPIPE &&
4132		    status != -ESHUTDOWN) {
4133			if (netif_msg_rx_err(dev))
4134				netdev_err(dev->net,
4135					   "can't clear rx halt, status %d\n",
4136					   status);
4137		} else {
4138			clear_bit(EVENT_RX_HALT, &dev->flags);
4139			napi_schedule(&dev->napi);
4140		}
4141	}
4142
4143	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4144		int ret = 0;
4145
4146		clear_bit(EVENT_LINK_RESET, &dev->flags);
4147		if (lan78xx_link_reset(dev) < 0) {
4148			netdev_info(dev->net, "link reset failed (%d)\n",
4149				    ret);
4150		}
4151	}
4152
4153	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4154		lan78xx_update_stats(dev);
4155
4156		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4157
4158		mod_timer(&dev->stat_monitor,
4159			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4160
4161		dev->delta = min((dev->delta * 2), 50);
4162	}
4163
4164	usb_autopm_put_interface(dev->intf);
4165}
4166
4167static void intr_complete(struct urb *urb)
4168{
4169	struct lan78xx_net *dev = urb->context;
4170	int status = urb->status;
4171
4172	switch (status) {
4173	/* success */
4174	case 0:
4175		lan78xx_status(dev, urb);
4176		break;
4177
4178	/* software-driven interface shutdown */
4179	case -ENOENT:			/* urb killed */
4180	case -ENODEV:			/* hardware gone */
4181	case -ESHUTDOWN:		/* hardware gone */
4182		netif_dbg(dev, ifdown, dev->net,
4183			  "intr shutdown, code %d\n", status);
4184		return;
4185
4186	/* NOTE:  not throttling like RX/TX, since this endpoint
4187	 * already polls infrequently
4188	 */
4189	default:
4190		netdev_dbg(dev->net, "intr status %d\n", status);
4191		break;
4192	}
4193
4194	if (!netif_device_present(dev->net) ||
4195	    !netif_running(dev->net)) {
4196		netdev_warn(dev->net, "not submitting new status URB");
4197		return;
4198	}
4199
4200	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4201	status = usb_submit_urb(urb, GFP_ATOMIC);
4202
4203	switch (status) {
4204	case  0:
4205		break;
4206	case -ENODEV:
4207	case -ENOENT:
4208		netif_dbg(dev, timer, dev->net,
4209			  "intr resubmit %d (disconnect?)", status);
4210		netif_device_detach(dev->net);
4211		break;
4212	default:
4213		netif_err(dev, timer, dev->net,
4214			  "intr resubmit --> %d\n", status);
4215		break;
4216	}
4217}
4218
4219static void lan78xx_disconnect(struct usb_interface *intf)
4220{
4221	struct lan78xx_net *dev;
4222	struct usb_device *udev;
4223	struct net_device *net;
4224	struct phy_device *phydev;
4225
4226	dev = usb_get_intfdata(intf);
4227	usb_set_intfdata(intf, NULL);
4228	if (!dev)
4229		return;
4230
4231	netif_napi_del(&dev->napi);
4232
4233	udev = interface_to_usbdev(intf);
4234	net = dev->net;
4235
4236	unregister_netdev(net);
4237
4238	timer_shutdown_sync(&dev->stat_monitor);
4239	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4240	cancel_delayed_work_sync(&dev->wq);
4241
4242	phydev = net->phydev;
4243
4244	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4245	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4246
4247	phy_disconnect(net->phydev);
4248
4249	if (phy_is_pseudo_fixed_link(phydev))
4250		fixed_phy_unregister(phydev);
4251
4252	usb_scuttle_anchored_urbs(&dev->deferred);
4253
4254	lan78xx_unbind(dev, intf);
4255
4256	lan78xx_free_tx_resources(dev);
4257	lan78xx_free_rx_resources(dev);
4258
4259	usb_kill_urb(dev->urb_intr);
4260	usb_free_urb(dev->urb_intr);
4261
4262	free_netdev(net);
4263	usb_put_dev(udev);
4264}
4265
4266static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4267{
4268	struct lan78xx_net *dev = netdev_priv(net);
4269
4270	unlink_urbs(dev, &dev->txq);
4271	napi_schedule(&dev->napi);
4272}
4273
4274static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4275						struct net_device *netdev,
4276						netdev_features_t features)
4277{
4278	struct lan78xx_net *dev = netdev_priv(netdev);
4279
4280	if (skb->len > LAN78XX_TSO_SIZE(dev))
4281		features &= ~NETIF_F_GSO_MASK;
4282
4283	features = vlan_features_check(skb, features);
4284	features = vxlan_features_check(skb, features);
4285
4286	return features;
4287}
4288
4289static const struct net_device_ops lan78xx_netdev_ops = {
4290	.ndo_open		= lan78xx_open,
4291	.ndo_stop		= lan78xx_stop,
4292	.ndo_start_xmit		= lan78xx_start_xmit,
4293	.ndo_tx_timeout		= lan78xx_tx_timeout,
4294	.ndo_change_mtu		= lan78xx_change_mtu,
4295	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4296	.ndo_validate_addr	= eth_validate_addr,
4297	.ndo_eth_ioctl		= phy_do_ioctl_running,
4298	.ndo_set_rx_mode	= lan78xx_set_multicast,
4299	.ndo_set_features	= lan78xx_set_features,
4300	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4301	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4302	.ndo_features_check	= lan78xx_features_check,
4303};
4304
4305static void lan78xx_stat_monitor(struct timer_list *t)
4306{
4307	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4308
4309	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4310}
4311
4312static int lan78xx_probe(struct usb_interface *intf,
4313			 const struct usb_device_id *id)
4314{
4315	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4316	struct lan78xx_net *dev;
4317	struct net_device *netdev;
4318	struct usb_device *udev;
4319	int ret;
4320	unsigned int maxp;
4321	unsigned int period;
4322	u8 *buf = NULL;
4323
4324	udev = interface_to_usbdev(intf);
4325	udev = usb_get_dev(udev);
4326
4327	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4328	if (!netdev) {
4329		dev_err(&intf->dev, "Error: OOM\n");
4330		ret = -ENOMEM;
4331		goto out1;
4332	}
4333
4334	/* netdev_printk() needs this */
4335	SET_NETDEV_DEV(netdev, &intf->dev);
4336
4337	dev = netdev_priv(netdev);
4338	dev->udev = udev;
4339	dev->intf = intf;
4340	dev->net = netdev;
4341	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4342					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4343
4344	skb_queue_head_init(&dev->rxq);
4345	skb_queue_head_init(&dev->txq);
4346	skb_queue_head_init(&dev->rxq_done);
4347	skb_queue_head_init(&dev->txq_pend);
4348	skb_queue_head_init(&dev->rxq_overflow);
4349	mutex_init(&dev->phy_mutex);
4350	mutex_init(&dev->dev_mutex);
4351
4352	ret = lan78xx_urb_config_init(dev);
4353	if (ret < 0)
4354		goto out2;
4355
4356	ret = lan78xx_alloc_tx_resources(dev);
4357	if (ret < 0)
4358		goto out2;
4359
4360	ret = lan78xx_alloc_rx_resources(dev);
4361	if (ret < 0)
4362		goto out3;
4363
4364	/* MTU range: 68 - 9000 */
4365	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4366
4367	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4368
4369	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4370
4371	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4372	init_usb_anchor(&dev->deferred);
4373
4374	netdev->netdev_ops = &lan78xx_netdev_ops;
4375	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4376	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4377
4378	dev->delta = 1;
4379	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4380
4381	mutex_init(&dev->stats.access_lock);
4382
4383	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4384		ret = -ENODEV;
4385		goto out4;
4386	}
4387
4388	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4389	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4390	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4391		ret = -ENODEV;
4392		goto out4;
4393	}
4394
4395	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4396	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4397	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4398		ret = -ENODEV;
4399		goto out4;
4400	}
4401
4402	ep_intr = &intf->cur_altsetting->endpoint[2];
4403	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4404		ret = -ENODEV;
4405		goto out4;
4406	}
4407
4408	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4409					usb_endpoint_num(&ep_intr->desc));
4410
4411	ret = lan78xx_bind(dev, intf);
4412	if (ret < 0)
4413		goto out4;
4414
4415	period = ep_intr->desc.bInterval;
4416	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4417	buf = kmalloc(maxp, GFP_KERNEL);
4418	if (!buf) {
4419		ret = -ENOMEM;
4420		goto out5;
4421	}
4422
4423	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4424	if (!dev->urb_intr) {
4425		ret = -ENOMEM;
4426		goto out6;
4427	} else {
4428		usb_fill_int_urb(dev->urb_intr, dev->udev,
4429				 dev->pipe_intr, buf, maxp,
4430				 intr_complete, dev, period);
4431		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4432	}
4433
4434	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4435
4436	/* Reject broken descriptors. */
4437	if (dev->maxpacket == 0) {
4438		ret = -ENODEV;
4439		goto out6;
4440	}
4441
4442	/* driver requires remote-wakeup capability during autosuspend. */
4443	intf->needs_remote_wakeup = 1;
4444
4445	ret = lan78xx_phy_init(dev);
4446	if (ret < 0)
4447		goto out7;
4448
4449	ret = register_netdev(netdev);
4450	if (ret != 0) {
4451		netif_err(dev, probe, netdev, "couldn't register the device\n");
4452		goto out8;
4453	}
4454
4455	usb_set_intfdata(intf, dev);
4456
4457	ret = device_set_wakeup_enable(&udev->dev, true);
4458
4459	 /* Default delay of 2sec has more overhead than advantage.
4460	  * Set to 10sec as default.
4461	  */
4462	pm_runtime_set_autosuspend_delay(&udev->dev,
4463					 DEFAULT_AUTOSUSPEND_DELAY);
4464
4465	return 0;
4466
4467out8:
4468	phy_disconnect(netdev->phydev);
4469out7:
4470	usb_free_urb(dev->urb_intr);
4471out6:
4472	kfree(buf);
4473out5:
4474	lan78xx_unbind(dev, intf);
4475out4:
4476	netif_napi_del(&dev->napi);
4477	lan78xx_free_rx_resources(dev);
4478out3:
4479	lan78xx_free_tx_resources(dev);
4480out2:
4481	free_netdev(netdev);
4482out1:
4483	usb_put_dev(udev);
4484
4485	return ret;
4486}
4487
4488static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4489{
4490	const u16 crc16poly = 0x8005;
4491	int i;
4492	u16 bit, crc, msb;
4493	u8 data;
4494
4495	crc = 0xFFFF;
4496	for (i = 0; i < len; i++) {
4497		data = *buf++;
4498		for (bit = 0; bit < 8; bit++) {
4499			msb = crc >> 15;
4500			crc <<= 1;
4501
4502			if (msb ^ (u16)(data & 1)) {
4503				crc ^= crc16poly;
4504				crc |= (u16)0x0001U;
4505			}
4506			data >>= 1;
4507		}
4508	}
4509
4510	return crc;
4511}
4512
4513static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4514{
4515	u32 buf;
4516	int ret;
4517
4518	ret = lan78xx_stop_tx_path(dev);
4519	if (ret < 0)
4520		return ret;
4521
4522	ret = lan78xx_stop_rx_path(dev);
4523	if (ret < 0)
4524		return ret;
4525
4526	/* auto suspend (selective suspend) */
4527
4528	ret = lan78xx_write_reg(dev, WUCSR, 0);
4529	if (ret < 0)
4530		return ret;
4531	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4532	if (ret < 0)
4533		return ret;
4534	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4535	if (ret < 0)
4536		return ret;
4537
4538	/* set goodframe wakeup */
4539
4540	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4541	if (ret < 0)
4542		return ret;
4543
4544	buf |= WUCSR_RFE_WAKE_EN_;
4545	buf |= WUCSR_STORE_WAKE_;
4546
4547	ret = lan78xx_write_reg(dev, WUCSR, buf);
4548	if (ret < 0)
4549		return ret;
4550
4551	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4552	if (ret < 0)
4553		return ret;
4554
4555	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4556	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4557	buf |= PMT_CTL_PHY_WAKE_EN_;
4558	buf |= PMT_CTL_WOL_EN_;
4559	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4560	buf |= PMT_CTL_SUS_MODE_3_;
4561
4562	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4563	if (ret < 0)
4564		return ret;
4565
4566	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4567	if (ret < 0)
4568		return ret;
4569
4570	buf |= PMT_CTL_WUPS_MASK_;
4571
4572	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4573	if (ret < 0)
4574		return ret;
4575
4576	ret = lan78xx_start_rx_path(dev);
4577
4578	return ret;
4579}
4580
4581static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4582{
4583	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4584	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4585	const u8 arp_type[2] = { 0x08, 0x06 };
4586	u32 temp_pmt_ctl;
4587	int mask_index;
4588	u32 temp_wucsr;
4589	u32 buf;
4590	u16 crc;
4591	int ret;
4592
4593	ret = lan78xx_stop_tx_path(dev);
4594	if (ret < 0)
4595		return ret;
4596	ret = lan78xx_stop_rx_path(dev);
4597	if (ret < 0)
4598		return ret;
4599
4600	ret = lan78xx_write_reg(dev, WUCSR, 0);
4601	if (ret < 0)
4602		return ret;
4603	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4604	if (ret < 0)
4605		return ret;
4606	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4607	if (ret < 0)
4608		return ret;
4609
4610	temp_wucsr = 0;
4611
4612	temp_pmt_ctl = 0;
4613
4614	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4615	if (ret < 0)
4616		return ret;
4617
4618	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4619	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4620
4621	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4622		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4623		if (ret < 0)
4624			return ret;
4625	}
4626
4627	mask_index = 0;
4628	if (wol & WAKE_PHY) {
4629		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4630
4631		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4632		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4633		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4634	}
4635	if (wol & WAKE_MAGIC) {
4636		temp_wucsr |= WUCSR_MPEN_;
4637
4638		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4639		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4640		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4641	}
4642	if (wol & WAKE_BCAST) {
4643		temp_wucsr |= WUCSR_BCST_EN_;
4644
4645		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4646		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4647		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4648	}
4649	if (wol & WAKE_MCAST) {
4650		temp_wucsr |= WUCSR_WAKE_EN_;
4651
4652		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4653		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4654		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4655					WUF_CFGX_EN_ |
4656					WUF_CFGX_TYPE_MCAST_ |
4657					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4658					(crc & WUF_CFGX_CRC16_MASK_));
4659		if (ret < 0)
4660			return ret;
4661
4662		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4663		if (ret < 0)
4664			return ret;
4665		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4666		if (ret < 0)
4667			return ret;
4668		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4669		if (ret < 0)
4670			return ret;
4671		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4672		if (ret < 0)
4673			return ret;
4674
4675		mask_index++;
4676
4677		/* for IPv6 Multicast */
4678		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4679		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4680					WUF_CFGX_EN_ |
4681					WUF_CFGX_TYPE_MCAST_ |
4682					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4683					(crc & WUF_CFGX_CRC16_MASK_));
4684		if (ret < 0)
4685			return ret;
4686
4687		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4688		if (ret < 0)
4689			return ret;
4690		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4691		if (ret < 0)
4692			return ret;
4693		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4694		if (ret < 0)
4695			return ret;
4696		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4697		if (ret < 0)
4698			return ret;
4699
4700		mask_index++;
4701
4702		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4703		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4704		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4705	}
4706	if (wol & WAKE_UCAST) {
4707		temp_wucsr |= WUCSR_PFDA_EN_;
4708
4709		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4710		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4711		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4712	}
4713	if (wol & WAKE_ARP) {
4714		temp_wucsr |= WUCSR_WAKE_EN_;
4715
4716		/* set WUF_CFG & WUF_MASK
4717		 * for packettype (offset 12,13) = ARP (0x0806)
4718		 */
4719		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4720		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4721					WUF_CFGX_EN_ |
4722					WUF_CFGX_TYPE_ALL_ |
4723					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4724					(crc & WUF_CFGX_CRC16_MASK_));
4725		if (ret < 0)
4726			return ret;
4727
4728		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4729		if (ret < 0)
4730			return ret;
4731		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4732		if (ret < 0)
4733			return ret;
4734		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4735		if (ret < 0)
4736			return ret;
4737		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4738		if (ret < 0)
4739			return ret;
4740
4741		mask_index++;
4742
4743		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4744		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4745		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4746	}
4747
4748	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4749	if (ret < 0)
4750		return ret;
4751
4752	/* when multiple WOL bits are set */
4753	if (hweight_long((unsigned long)wol) > 1) {
4754		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4755		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4756		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4757	}
4758	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4759	if (ret < 0)
4760		return ret;
4761
4762	/* clear WUPS */
4763	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4764	if (ret < 0)
4765		return ret;
4766
4767	buf |= PMT_CTL_WUPS_MASK_;
4768
4769	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4770	if (ret < 0)
4771		return ret;
4772
4773	ret = lan78xx_start_rx_path(dev);
4774
4775	return ret;
4776}
4777
4778static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4779{
4780	struct lan78xx_net *dev = usb_get_intfdata(intf);
4781	bool dev_open;
4782	int ret;
4783
4784	mutex_lock(&dev->dev_mutex);
4785
4786	netif_dbg(dev, ifdown, dev->net,
4787		  "suspending: pm event %#x", message.event);
4788
4789	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4790
4791	if (dev_open) {
4792		spin_lock_irq(&dev->txq.lock);
4793		/* don't autosuspend while transmitting */
4794		if ((skb_queue_len(&dev->txq) ||
4795		     skb_queue_len(&dev->txq_pend)) &&
4796		    PMSG_IS_AUTO(message)) {
4797			spin_unlock_irq(&dev->txq.lock);
4798			ret = -EBUSY;
4799			goto out;
4800		} else {
4801			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4802			spin_unlock_irq(&dev->txq.lock);
4803		}
4804
4805		/* stop RX */
4806		ret = lan78xx_stop_rx_path(dev);
4807		if (ret < 0)
4808			goto out;
4809
4810		ret = lan78xx_flush_rx_fifo(dev);
4811		if (ret < 0)
4812			goto out;
4813
4814		/* stop Tx */
4815		ret = lan78xx_stop_tx_path(dev);
4816		if (ret < 0)
4817			goto out;
4818
4819		/* empty out the Rx and Tx queues */
4820		netif_device_detach(dev->net);
4821		lan78xx_terminate_urbs(dev);
4822		usb_kill_urb(dev->urb_intr);
4823
4824		/* reattach */
4825		netif_device_attach(dev->net);
4826
4827		del_timer(&dev->stat_monitor);
4828
4829		if (PMSG_IS_AUTO(message)) {
4830			ret = lan78xx_set_auto_suspend(dev);
4831			if (ret < 0)
4832				goto out;
4833		} else {
4834			struct lan78xx_priv *pdata;
4835
4836			pdata = (struct lan78xx_priv *)(dev->data[0]);
4837			netif_carrier_off(dev->net);
4838			ret = lan78xx_set_suspend(dev, pdata->wol);
4839			if (ret < 0)
4840				goto out;
4841		}
4842	} else {
4843		/* Interface is down; don't allow WOL and PHY
4844		 * events to wake up the host
4845		 */
4846		u32 buf;
4847
4848		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4849
4850		ret = lan78xx_write_reg(dev, WUCSR, 0);
4851		if (ret < 0)
4852			goto out;
4853		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4854		if (ret < 0)
4855			goto out;
4856
4857		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4858		if (ret < 0)
4859			goto out;
4860
4861		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4862		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4863		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4864		buf |= PMT_CTL_SUS_MODE_3_;
4865
4866		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4867		if (ret < 0)
4868			goto out;
4869
4870		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4871		if (ret < 0)
4872			goto out;
4873
4874		buf |= PMT_CTL_WUPS_MASK_;
4875
4876		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4877		if (ret < 0)
4878			goto out;
4879	}
4880
4881	ret = 0;
4882out:
4883	mutex_unlock(&dev->dev_mutex);
4884
4885	return ret;
4886}
4887
4888static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4889{
4890	bool pipe_halted = false;
4891	struct urb *urb;
4892
4893	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4894		struct sk_buff *skb = urb->context;
4895		int ret;
4896
4897		if (!netif_device_present(dev->net) ||
4898		    !netif_carrier_ok(dev->net) ||
4899		    pipe_halted) {
4900			lan78xx_release_tx_buf(dev, skb);
4901			continue;
4902		}
4903
4904		ret = usb_submit_urb(urb, GFP_ATOMIC);
4905
4906		if (ret == 0) {
4907			netif_trans_update(dev->net);
4908			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4909		} else {
4910			if (ret == -EPIPE) {
4911				netif_stop_queue(dev->net);
4912				pipe_halted = true;
4913			} else if (ret == -ENODEV) {
4914				netif_device_detach(dev->net);
4915			}
4916
4917			lan78xx_release_tx_buf(dev, skb);
4918		}
4919	}
4920
4921	return pipe_halted;
4922}
4923
4924static int lan78xx_resume(struct usb_interface *intf)
4925{
4926	struct lan78xx_net *dev = usb_get_intfdata(intf);
4927	bool dev_open;
4928	int ret;
4929
4930	mutex_lock(&dev->dev_mutex);
4931
4932	netif_dbg(dev, ifup, dev->net, "resuming device");
4933
4934	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4935
4936	if (dev_open) {
4937		bool pipe_halted = false;
4938
4939		ret = lan78xx_flush_tx_fifo(dev);
4940		if (ret < 0)
4941			goto out;
4942
4943		if (dev->urb_intr) {
4944			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4945
4946			if (ret < 0) {
4947				if (ret == -ENODEV)
4948					netif_device_detach(dev->net);
4949				netdev_warn(dev->net, "Failed to submit intr URB");
4950			}
4951		}
4952
4953		spin_lock_irq(&dev->txq.lock);
4954
4955		if (netif_device_present(dev->net)) {
4956			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4957
4958			if (pipe_halted)
4959				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4960		}
4961
4962		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4963
4964		spin_unlock_irq(&dev->txq.lock);
4965
4966		if (!pipe_halted &&
4967		    netif_device_present(dev->net) &&
4968		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4969			netif_start_queue(dev->net);
4970
4971		ret = lan78xx_start_tx_path(dev);
4972		if (ret < 0)
4973			goto out;
4974
4975		napi_schedule(&dev->napi);
4976
4977		if (!timer_pending(&dev->stat_monitor)) {
4978			dev->delta = 1;
4979			mod_timer(&dev->stat_monitor,
4980				  jiffies + STAT_UPDATE_TIMER);
4981		}
4982
4983	} else {
4984		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4985	}
4986
4987	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4988	if (ret < 0)
4989		goto out;
4990	ret = lan78xx_write_reg(dev, WUCSR, 0);
4991	if (ret < 0)
4992		goto out;
4993	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4994	if (ret < 0)
4995		goto out;
4996
4997	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4998					     WUCSR2_ARP_RCD_ |
4999					     WUCSR2_IPV6_TCPSYN_RCD_ |
5000					     WUCSR2_IPV4_TCPSYN_RCD_);
5001	if (ret < 0)
5002		goto out;
5003
5004	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5005					    WUCSR_EEE_RX_WAKE_ |
5006					    WUCSR_PFDA_FR_ |
5007					    WUCSR_RFE_WAKE_FR_ |
5008					    WUCSR_WUFR_ |
5009					    WUCSR_MPR_ |
5010					    WUCSR_BCST_FR_);
5011	if (ret < 0)
5012		goto out;
5013
5014	ret = 0;
5015out:
5016	mutex_unlock(&dev->dev_mutex);
5017
5018	return ret;
5019}
5020
5021static int lan78xx_reset_resume(struct usb_interface *intf)
5022{
5023	struct lan78xx_net *dev = usb_get_intfdata(intf);
5024	int ret;
5025
5026	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5027
5028	ret = lan78xx_reset(dev);
5029	if (ret < 0)
5030		return ret;
5031
5032	phy_start(dev->net->phydev);
5033
5034	ret = lan78xx_resume(intf);
5035
5036	return ret;
5037}
5038
5039static const struct usb_device_id products[] = {
5040	{
5041	/* LAN7800 USB Gigabit Ethernet Device */
5042	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5043	},
5044	{
5045	/* LAN7850 USB Gigabit Ethernet Device */
5046	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5047	},
5048	{
5049	/* LAN7801 USB Gigabit Ethernet Device */
5050	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5051	},
5052	{
5053	/* ATM2-AF USB Gigabit Ethernet Device */
5054	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5055	},
5056	{},
5057};
5058MODULE_DEVICE_TABLE(usb, products);
5059
5060static struct usb_driver lan78xx_driver = {
5061	.name			= DRIVER_NAME,
5062	.id_table		= products,
5063	.probe			= lan78xx_probe,
5064	.disconnect		= lan78xx_disconnect,
5065	.suspend		= lan78xx_suspend,
5066	.resume			= lan78xx_resume,
5067	.reset_resume		= lan78xx_reset_resume,
5068	.supports_autosuspend	= 1,
5069	.disable_hub_initiated_lpm = 1,
5070};
5071
5072module_usb_driver(lan78xx_driver);
5073
5074MODULE_AUTHOR(DRIVER_AUTHOR);
5075MODULE_DESCRIPTION(DRIVER_DESC);
5076MODULE_LICENSE("GPL");
5077