1/* SPDX-License-Identifier: GPL-2.0+ */
2/* Copyright (C) 2018 Microchip Technology Inc. */
3
4#include <linux/module.h>
5#include <linux/pci.h>
6#include <linux/netdevice.h>
7#include <linux/etherdevice.h>
8#include <linux/crc32.h>
9#include <linux/microchipphy.h>
10#include <linux/net_tstamp.h>
11#include <linux/of_mdio.h>
12#include <linux/of_net.h>
13#include <linux/phy.h>
14#include <linux/phy_fixed.h>
15#include <linux/rtnetlink.h>
16#include <linux/iopoll.h>
17#include <linux/crc16.h>
18#include "lan743x_main.h"
19#include "lan743x_ethtool.h"
20
21static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
22{
23	pci_release_selected_regions(adapter->pdev,
24				     pci_select_bars(adapter->pdev,
25						     IORESOURCE_MEM));
26	pci_disable_device(adapter->pdev);
27}
28
29static int lan743x_pci_init(struct lan743x_adapter *adapter,
30			    struct pci_dev *pdev)
31{
32	unsigned long bars = 0;
33	int ret;
34
35	adapter->pdev = pdev;
36	ret = pci_enable_device_mem(pdev);
37	if (ret)
38		goto return_error;
39
40	netif_info(adapter, probe, adapter->netdev,
41		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
42		   pdev->vendor, pdev->device);
43	bars = pci_select_bars(pdev, IORESOURCE_MEM);
44	if (!test_bit(0, &bars))
45		goto disable_device;
46
47	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
48	if (ret)
49		goto disable_device;
50
51	pci_set_master(pdev);
52	return 0;
53
54disable_device:
55	pci_disable_device(adapter->pdev);
56
57return_error:
58	return ret;
59}
60
61u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
62{
63	return ioread32(&adapter->csr.csr_address[offset]);
64}
65
66void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
67		       u32 data)
68{
69	iowrite32(data, &adapter->csr.csr_address[offset]);
70}
71
72#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
73
74static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
75{
76	u32 data;
77
78	data = lan743x_csr_read(adapter, HW_CFG);
79	data |= HW_CFG_LRST_;
80	lan743x_csr_write(adapter, HW_CFG, data);
81
82	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
83				  !(data & HW_CFG_LRST_), 100000, 10000000);
84}
85
86static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
87					   int offset, u32 bit_mask,
88					   int target_value, int udelay_min,
89					   int udelay_max, int count)
90{
91	u32 data;
92
93	return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
94					 target_value == !!(data & bit_mask),
95					 udelay_max, udelay_min * count);
96}
97
98static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
99				    int offset, u32 bit_mask,
100				    int target_value, int usleep_min,
101				    int usleep_max, int count)
102{
103	u32 data;
104
105	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
106				  target_value == ((data & bit_mask) ? 1 : 0),
107				  usleep_max, usleep_min * count);
108}
109
110static int lan743x_csr_init(struct lan743x_adapter *adapter)
111{
112	struct lan743x_csr *csr = &adapter->csr;
113	resource_size_t bar_start, bar_length;
114	int result;
115
116	bar_start = pci_resource_start(adapter->pdev, 0);
117	bar_length = pci_resource_len(adapter->pdev, 0);
118	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
119					bar_start, bar_length);
120	if (!csr->csr_address) {
121		result = -ENOMEM;
122		goto clean_up;
123	}
124
125	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
126	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
127	netif_info(adapter, probe, adapter->netdev,
128		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
129		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
130		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
131	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
132		result = -ENODEV;
133		goto clean_up;
134	}
135
136	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
137	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
138	case ID_REV_CHIP_REV_A0_:
139		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
140		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
141		break;
142	case ID_REV_CHIP_REV_B0_:
143		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
144		break;
145	}
146
147	result = lan743x_csr_light_reset(adapter);
148	if (result)
149		goto clean_up;
150	return 0;
151clean_up:
152	return result;
153}
154
155static void lan743x_intr_software_isr(void *context)
156{
157	struct lan743x_adapter *adapter = context;
158	struct lan743x_intr *intr = &adapter->intr;
159	u32 int_sts;
160
161	int_sts = lan743x_csr_read(adapter, INT_STS);
162	if (int_sts & INT_BIT_SW_GP_) {
163		/* disable the interrupt to prevent repeated re-triggering */
164		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
165		intr->software_isr_flag = 1;
166	}
167}
168
169static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
170{
171	struct lan743x_tx *tx = context;
172	struct lan743x_adapter *adapter = tx->adapter;
173	bool enable_flag = true;
174
175	lan743x_csr_read(adapter, INT_EN_SET);
176	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
177		lan743x_csr_write(adapter, INT_EN_CLR,
178				  INT_BIT_DMA_TX_(tx->channel_number));
179	}
180
181	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
182		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
183		u32 dmac_int_sts;
184		u32 dmac_int_en;
185
186		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
187			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
188		else
189			dmac_int_sts = ioc_bit;
190		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
191			dmac_int_en = lan743x_csr_read(adapter,
192						       DMAC_INT_EN_SET);
193		else
194			dmac_int_en = ioc_bit;
195
196		dmac_int_en &= ioc_bit;
197		dmac_int_sts &= dmac_int_en;
198		if (dmac_int_sts & ioc_bit) {
199			napi_schedule(&tx->napi);
200			enable_flag = false;/* poll func will enable later */
201		}
202	}
203
204	if (enable_flag)
205		/* enable isr */
206		lan743x_csr_write(adapter, INT_EN_SET,
207				  INT_BIT_DMA_TX_(tx->channel_number));
208}
209
210static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
211{
212	struct lan743x_rx *rx = context;
213	struct lan743x_adapter *adapter = rx->adapter;
214	bool enable_flag = true;
215
216	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
217		lan743x_csr_write(adapter, INT_EN_CLR,
218				  INT_BIT_DMA_RX_(rx->channel_number));
219	}
220
221	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
222		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
223		u32 dmac_int_sts;
224		u32 dmac_int_en;
225
226		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
227			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
228		else
229			dmac_int_sts = rx_frame_bit;
230		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
231			dmac_int_en = lan743x_csr_read(adapter,
232						       DMAC_INT_EN_SET);
233		else
234			dmac_int_en = rx_frame_bit;
235
236		dmac_int_en &= rx_frame_bit;
237		dmac_int_sts &= dmac_int_en;
238		if (dmac_int_sts & rx_frame_bit) {
239			napi_schedule(&rx->napi);
240			enable_flag = false;/* poll funct will enable later */
241		}
242	}
243
244	if (enable_flag) {
245		/* enable isr */
246		lan743x_csr_write(adapter, INT_EN_SET,
247				  INT_BIT_DMA_RX_(rx->channel_number));
248	}
249}
250
251static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
252{
253	struct lan743x_adapter *adapter = context;
254	unsigned int channel;
255
256	if (int_sts & INT_BIT_ALL_RX_) {
257		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
258			channel++) {
259			u32 int_bit = INT_BIT_DMA_RX_(channel);
260
261			if (int_sts & int_bit) {
262				lan743x_rx_isr(&adapter->rx[channel],
263					       int_bit, flags);
264				int_sts &= ~int_bit;
265			}
266		}
267	}
268	if (int_sts & INT_BIT_ALL_TX_) {
269		for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
270			channel++) {
271			u32 int_bit = INT_BIT_DMA_TX_(channel);
272
273			if (int_sts & int_bit) {
274				lan743x_tx_isr(&adapter->tx[channel],
275					       int_bit, flags);
276				int_sts &= ~int_bit;
277			}
278		}
279	}
280	if (int_sts & INT_BIT_ALL_OTHER_) {
281		if (int_sts & INT_BIT_SW_GP_) {
282			lan743x_intr_software_isr(adapter);
283			int_sts &= ~INT_BIT_SW_GP_;
284		}
285		if (int_sts & INT_BIT_1588_) {
286			lan743x_ptp_isr(adapter);
287			int_sts &= ~INT_BIT_1588_;
288		}
289	}
290	if (int_sts)
291		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
292}
293
294static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
295{
296	struct lan743x_vector *vector = ptr;
297	struct lan743x_adapter *adapter = vector->adapter;
298	irqreturn_t result = IRQ_NONE;
299	u32 int_enables;
300	u32 int_sts;
301
302	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
303		int_sts = lan743x_csr_read(adapter, INT_STS);
304	} else if (vector->flags &
305		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
306		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
307		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
308	} else {
309		/* use mask as implied status */
310		int_sts = vector->int_mask | INT_BIT_MAS_;
311	}
312
313	if (!(int_sts & INT_BIT_MAS_))
314		goto irq_done;
315
316	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
317		/* disable vector interrupt */
318		lan743x_csr_write(adapter,
319				  INT_VEC_EN_CLR,
320				  INT_VEC_EN_(vector->vector_index));
321
322	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
323		/* disable master interrupt */
324		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
325
326	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
327		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
328	} else {
329		/*  use vector mask as implied enable mask */
330		int_enables = vector->int_mask;
331	}
332
333	int_sts &= int_enables;
334	int_sts &= vector->int_mask;
335	if (int_sts) {
336		if (vector->handler) {
337			vector->handler(vector->context,
338					int_sts, vector->flags);
339		} else {
340			/* disable interrupts on this vector */
341			lan743x_csr_write(adapter, INT_EN_CLR,
342					  vector->int_mask);
343		}
344		result = IRQ_HANDLED;
345	}
346
347	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
348		/* enable master interrupt */
349		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
350
351	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
352		/* enable vector interrupt */
353		lan743x_csr_write(adapter,
354				  INT_VEC_EN_SET,
355				  INT_VEC_EN_(vector->vector_index));
356irq_done:
357	return result;
358}
359
360static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
361{
362	struct lan743x_intr *intr = &adapter->intr;
363	int result = -ENODEV;
364	int timeout = 10;
365
366	intr->software_isr_flag = 0;
367
368	/* enable interrupt */
369	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
370
371	/* activate interrupt here */
372	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
373	while ((timeout > 0) && (!(intr->software_isr_flag))) {
374		usleep_range(1000, 20000);
375		timeout--;
376	}
377
378	if (intr->software_isr_flag)
379		result = 0;
380
381	/* disable interrupts */
382	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
383	return result;
384}
385
386static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
387				     int vector_index, u32 flags,
388				     u32 int_mask,
389				     lan743x_vector_handler handler,
390				     void *context)
391{
392	struct lan743x_vector *vector = &adapter->intr.vector_list
393					[vector_index];
394	int ret;
395
396	vector->adapter = adapter;
397	vector->flags = flags;
398	vector->vector_index = vector_index;
399	vector->int_mask = int_mask;
400	vector->handler = handler;
401	vector->context = context;
402
403	ret = request_irq(vector->irq,
404			  lan743x_intr_entry_isr,
405			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
406			  IRQF_SHARED : 0, DRIVER_NAME, vector);
407	if (ret) {
408		vector->handler = NULL;
409		vector->context = NULL;
410		vector->int_mask = 0;
411		vector->flags = 0;
412	}
413	return ret;
414}
415
416static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
417					int vector_index)
418{
419	struct lan743x_vector *vector = &adapter->intr.vector_list
420					[vector_index];
421
422	free_irq(vector->irq, vector);
423	vector->handler = NULL;
424	vector->context = NULL;
425	vector->int_mask = 0;
426	vector->flags = 0;
427}
428
429static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
430					 u32 int_mask)
431{
432	int index;
433
434	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
435		if (adapter->intr.vector_list[index].int_mask & int_mask)
436			return adapter->intr.vector_list[index].flags;
437	}
438	return 0;
439}
440
441static void lan743x_intr_close(struct lan743x_adapter *adapter)
442{
443	struct lan743x_intr *intr = &adapter->intr;
444	int index = 0;
445
446	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
447	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
448
449	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
450		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
451			lan743x_intr_unregister_isr(adapter, index);
452			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
453		}
454	}
455
456	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
457		pci_disable_msi(adapter->pdev);
458		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
459	}
460
461	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
462		pci_disable_msix(adapter->pdev);
463		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
464	}
465}
466
467static int lan743x_intr_open(struct lan743x_adapter *adapter)
468{
469	struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
470	struct lan743x_intr *intr = &adapter->intr;
471	u32 int_vec_en_auto_clr = 0;
472	u32 int_vec_map0 = 0;
473	u32 int_vec_map1 = 0;
474	int ret = -ENODEV;
475	int index = 0;
476	u32 flags = 0;
477
478	intr->number_of_vectors = 0;
479
480	/* Try to set up MSIX interrupts */
481	memset(&msix_entries[0], 0,
482	       sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
483	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
484		msix_entries[index].entry = index;
485	ret = pci_enable_msix_range(adapter->pdev,
486				    msix_entries, 1,
487				    1 + LAN743X_USED_TX_CHANNELS +
488				    LAN743X_USED_RX_CHANNELS);
489
490	if (ret > 0) {
491		intr->flags |= INTR_FLAG_MSIX_ENABLED;
492		intr->number_of_vectors = ret;
493		intr->using_vectors = true;
494		for (index = 0; index < intr->number_of_vectors; index++)
495			intr->vector_list[index].irq = msix_entries
496						       [index].vector;
497		netif_info(adapter, ifup, adapter->netdev,
498			   "using MSIX interrupts, number of vectors = %d\n",
499			   intr->number_of_vectors);
500	}
501
502	/* If MSIX failed try to setup using MSI interrupts */
503	if (!intr->number_of_vectors) {
504		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
505			if (!pci_enable_msi(adapter->pdev)) {
506				intr->flags |= INTR_FLAG_MSI_ENABLED;
507				intr->number_of_vectors = 1;
508				intr->using_vectors = true;
509				intr->vector_list[0].irq =
510					adapter->pdev->irq;
511				netif_info(adapter, ifup, adapter->netdev,
512					   "using MSI interrupts, number of vectors = %d\n",
513					   intr->number_of_vectors);
514			}
515		}
516	}
517
518	/* If MSIX, and MSI failed, setup using legacy interrupt */
519	if (!intr->number_of_vectors) {
520		intr->number_of_vectors = 1;
521		intr->using_vectors = false;
522		intr->vector_list[0].irq = intr->irq;
523		netif_info(adapter, ifup, adapter->netdev,
524			   "using legacy interrupts\n");
525	}
526
527	/* At this point we must have at least one irq */
528	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
529
530	/* map all interrupts to vector 0 */
531	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
532	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
533	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
534	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
535		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
536		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
537		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
538
539	if (intr->using_vectors) {
540		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
541			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
542	} else {
543		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
544			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
545			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
546	}
547
548	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
549		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
550		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
551		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
552		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
553		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
554		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
555	}
556
557	ret = lan743x_intr_register_isr(adapter, 0, flags,
558					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
559					INT_BIT_ALL_OTHER_,
560					lan743x_intr_shared_isr, adapter);
561	if (ret)
562		goto clean_up;
563	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
564
565	if (intr->using_vectors)
566		lan743x_csr_write(adapter, INT_VEC_EN_SET,
567				  INT_VEC_EN_(0));
568
569	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
570		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
571		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
572		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
573		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
574		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
575		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
576		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
577		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
578		lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
579		lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
580		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
581	}
582
583	/* enable interrupts */
584	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
585	ret = lan743x_intr_test_isr(adapter);
586	if (ret)
587		goto clean_up;
588
589	if (intr->number_of_vectors > 1) {
590		int number_of_tx_vectors = intr->number_of_vectors - 1;
591
592		if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
593			number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
594		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
595			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
596			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
597			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
598			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
599			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
600
601		if (adapter->csr.flags &
602		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
603			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
604				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
605				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
606				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
607		}
608
609		for (index = 0; index < number_of_tx_vectors; index++) {
610			u32 int_bit = INT_BIT_DMA_TX_(index);
611			int vector = index + 1;
612
613			/* map TX interrupt to vector */
614			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
615			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
616
617			/* Remove TX interrupt from shared mask */
618			intr->vector_list[0].int_mask &= ~int_bit;
619			ret = lan743x_intr_register_isr(adapter, vector, flags,
620							int_bit, lan743x_tx_isr,
621							&adapter->tx[index]);
622			if (ret)
623				goto clean_up;
624			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
625			if (!(flags &
626			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
627				lan743x_csr_write(adapter, INT_VEC_EN_SET,
628						  INT_VEC_EN_(vector));
629		}
630	}
631	if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
632		int number_of_rx_vectors = intr->number_of_vectors -
633					   LAN743X_USED_TX_CHANNELS - 1;
634
635		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
636			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
637
638		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
639			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
640			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
641			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
642			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
643			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
644
645		if (adapter->csr.flags &
646		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
647			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
648				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
649				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
650				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
651				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
652		}
653		for (index = 0; index < number_of_rx_vectors; index++) {
654			int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
655			u32 int_bit = INT_BIT_DMA_RX_(index);
656
657			/* map RX interrupt to vector */
658			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
659			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
660			if (flags &
661			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
662				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
663				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
664						  int_vec_en_auto_clr);
665			}
666
667			/* Remove RX interrupt from shared mask */
668			intr->vector_list[0].int_mask &= ~int_bit;
669			ret = lan743x_intr_register_isr(adapter, vector, flags,
670							int_bit, lan743x_rx_isr,
671							&adapter->rx[index]);
672			if (ret)
673				goto clean_up;
674			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
675
676			lan743x_csr_write(adapter, INT_VEC_EN_SET,
677					  INT_VEC_EN_(vector));
678		}
679	}
680	return 0;
681
682clean_up:
683	lan743x_intr_close(adapter);
684	return ret;
685}
686
687static int lan743x_dp_write(struct lan743x_adapter *adapter,
688			    u32 select, u32 addr, u32 length, u32 *buf)
689{
690	u32 dp_sel;
691	int i;
692
693	if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
694					    1, 40, 100, 100))
695		return -EIO;
696	dp_sel = lan743x_csr_read(adapter, DP_SEL);
697	dp_sel &= ~DP_SEL_MASK_;
698	dp_sel |= select;
699	lan743x_csr_write(adapter, DP_SEL, dp_sel);
700
701	for (i = 0; i < length; i++) {
702		lan743x_csr_write(adapter, DP_ADDR, addr + i);
703		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
704		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
705		if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
706						    DP_SEL_DPRDY_,
707						    1, 40, 100, 100))
708			return -EIO;
709	}
710
711	return 0;
712}
713
714static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
715{
716	u32 ret;
717
718	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
719		MAC_MII_ACC_PHY_ADDR_MASK_;
720	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
721		MAC_MII_ACC_MIIRINDA_MASK_;
722
723	if (read)
724		ret |= MAC_MII_ACC_MII_READ_;
725	else
726		ret |= MAC_MII_ACC_MII_WRITE_;
727	ret |= MAC_MII_ACC_MII_BUSY_;
728
729	return ret;
730}
731
732static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
733{
734	u32 data;
735
736	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
737				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
738}
739
740static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
741{
742	struct lan743x_adapter *adapter = bus->priv;
743	u32 val, mii_access;
744	int ret;
745
746	/* comfirm MII not busy */
747	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
748	if (ret < 0)
749		return ret;
750
751	/* set the address, index & direction (read from PHY) */
752	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
753	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
754	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
755	if (ret < 0)
756		return ret;
757
758	val = lan743x_csr_read(adapter, MAC_MII_DATA);
759	return (int)(val & 0xFFFF);
760}
761
762static int lan743x_mdiobus_write(struct mii_bus *bus,
763				 int phy_id, int index, u16 regval)
764{
765	struct lan743x_adapter *adapter = bus->priv;
766	u32 val, mii_access;
767	int ret;
768
769	/* confirm MII not busy */
770	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
771	if (ret < 0)
772		return ret;
773	val = (u32)regval;
774	lan743x_csr_write(adapter, MAC_MII_DATA, val);
775
776	/* set the address, index & direction (write to PHY) */
777	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
778	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
779	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
780	return ret;
781}
782
783static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
784				    u8 *addr)
785{
786	u32 addr_lo, addr_hi;
787
788	addr_lo = addr[0] |
789		addr[1] << 8 |
790		addr[2] << 16 |
791		addr[3] << 24;
792	addr_hi = addr[4] |
793		addr[5] << 8;
794	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
795	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
796
797	ether_addr_copy(adapter->mac_address, addr);
798	netif_info(adapter, drv, adapter->netdev,
799		   "MAC address set to %pM\n", addr);
800}
801
802static int lan743x_mac_init(struct lan743x_adapter *adapter)
803{
804	bool mac_address_valid = true;
805	struct net_device *netdev;
806	u32 mac_addr_hi = 0;
807	u32 mac_addr_lo = 0;
808	u32 data;
809
810	netdev = adapter->netdev;
811
812	/* disable auto duplex, and speed detection. Phylib does that */
813	data = lan743x_csr_read(adapter, MAC_CR);
814	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
815	data |= MAC_CR_CNTR_RST_;
816	lan743x_csr_write(adapter, MAC_CR, data);
817
818	if (!is_valid_ether_addr(adapter->mac_address)) {
819		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
820		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
821		adapter->mac_address[0] = mac_addr_lo & 0xFF;
822		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
823		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
824		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
825		adapter->mac_address[4] = mac_addr_hi & 0xFF;
826		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
827
828		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
829		    mac_addr_lo == 0xFFFFFFFF) {
830			mac_address_valid = false;
831		} else if (!is_valid_ether_addr(adapter->mac_address)) {
832			mac_address_valid = false;
833		}
834
835		if (!mac_address_valid)
836			eth_random_addr(adapter->mac_address);
837	}
838	lan743x_mac_set_address(adapter, adapter->mac_address);
839	ether_addr_copy(netdev->dev_addr, adapter->mac_address);
840
841	return 0;
842}
843
844static int lan743x_mac_open(struct lan743x_adapter *adapter)
845{
846	int ret = 0;
847	u32 temp;
848
849	temp = lan743x_csr_read(adapter, MAC_RX);
850	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
851	temp = lan743x_csr_read(adapter, MAC_TX);
852	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
853	return ret;
854}
855
856static void lan743x_mac_close(struct lan743x_adapter *adapter)
857{
858	u32 temp;
859
860	temp = lan743x_csr_read(adapter, MAC_TX);
861	temp &= ~MAC_TX_TXEN_;
862	lan743x_csr_write(adapter, MAC_TX, temp);
863	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
864				 1, 1000, 20000, 100);
865
866	temp = lan743x_csr_read(adapter, MAC_RX);
867	temp &= ~MAC_RX_RXEN_;
868	lan743x_csr_write(adapter, MAC_RX, temp);
869	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
870				 1, 1000, 20000, 100);
871}
872
873static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
874					      bool tx_enable, bool rx_enable)
875{
876	u32 flow_setting = 0;
877
878	/* set maximum pause time because when fifo space frees
879	 * up a zero value pause frame will be sent to release the pause
880	 */
881	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
882	if (tx_enable)
883		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
884	if (rx_enable)
885		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
886	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
887}
888
889static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
890{
891	int enabled = 0;
892	u32 mac_rx = 0;
893
894	mac_rx = lan743x_csr_read(adapter, MAC_RX);
895	if (mac_rx & MAC_RX_RXEN_) {
896		enabled = 1;
897		if (mac_rx & MAC_RX_RXD_) {
898			lan743x_csr_write(adapter, MAC_RX, mac_rx);
899			mac_rx &= ~MAC_RX_RXD_;
900		}
901		mac_rx &= ~MAC_RX_RXEN_;
902		lan743x_csr_write(adapter, MAC_RX, mac_rx);
903		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
904					 1, 1000, 20000, 100);
905		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
906	}
907
908	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
909	mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
910		  MAC_RX_MAX_SIZE_MASK_);
911	lan743x_csr_write(adapter, MAC_RX, mac_rx);
912
913	if (enabled) {
914		mac_rx |= MAC_RX_RXEN_;
915		lan743x_csr_write(adapter, MAC_RX, mac_rx);
916	}
917	return 0;
918}
919
920/* PHY */
921static int lan743x_phy_reset(struct lan743x_adapter *adapter)
922{
923	u32 data;
924
925	/* Only called with in probe, and before mdiobus_register */
926
927	data = lan743x_csr_read(adapter, PMT_CTL);
928	data |= PMT_CTL_ETH_PHY_RST_;
929	lan743x_csr_write(adapter, PMT_CTL, data);
930
931	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
932				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
933				  (data & PMT_CTL_READY_)),
934				  50000, 1000000);
935}
936
937static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
938					   u16 local_adv, u16 remote_adv)
939{
940	struct lan743x_phy *phy = &adapter->phy;
941	u8 cap;
942
943	if (phy->fc_autoneg)
944		cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
945	else
946		cap = phy->fc_request_control;
947
948	lan743x_mac_flow_ctrl_set_enables(adapter,
949					  cap & FLOW_CTRL_TX,
950					  cap & FLOW_CTRL_RX);
951}
952
953static int lan743x_phy_init(struct lan743x_adapter *adapter)
954{
955	return lan743x_phy_reset(adapter);
956}
957
958static void lan743x_phy_link_status_change(struct net_device *netdev)
959{
960	struct lan743x_adapter *adapter = netdev_priv(netdev);
961	struct phy_device *phydev = netdev->phydev;
962	u32 data;
963
964	phy_print_status(phydev);
965	if (phydev->state == PHY_RUNNING) {
966		int remote_advertisement = 0;
967		int local_advertisement = 0;
968
969		data = lan743x_csr_read(adapter, MAC_CR);
970
971		/* set interface mode */
972		if (phy_interface_mode_is_rgmii(adapter->phy_mode))
973			/* RGMII */
974			data &= ~MAC_CR_MII_EN_;
975		else
976			/* GMII */
977			data |= MAC_CR_MII_EN_;
978
979		/* set duplex mode */
980		if (phydev->duplex)
981			data |= MAC_CR_DPX_;
982		else
983			data &= ~MAC_CR_DPX_;
984
985		/* set bus speed */
986		switch (phydev->speed) {
987		case SPEED_10:
988			data &= ~MAC_CR_CFG_H_;
989			data &= ~MAC_CR_CFG_L_;
990		break;
991		case SPEED_100:
992			data &= ~MAC_CR_CFG_H_;
993			data |= MAC_CR_CFG_L_;
994		break;
995		case SPEED_1000:
996			data |= MAC_CR_CFG_H_;
997			data &= ~MAC_CR_CFG_L_;
998		break;
999		}
1000		lan743x_csr_write(adapter, MAC_CR, data);
1001
1002		local_advertisement =
1003			linkmode_adv_to_mii_adv_t(phydev->advertising);
1004		remote_advertisement =
1005			linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
1006
1007		lan743x_phy_update_flowcontrol(adapter, local_advertisement,
1008					       remote_advertisement);
1009		lan743x_ptp_update_latency(adapter, phydev->speed);
1010	}
1011}
1012
1013static void lan743x_phy_close(struct lan743x_adapter *adapter)
1014{
1015	struct net_device *netdev = adapter->netdev;
1016
1017	phy_stop(netdev->phydev);
1018	phy_disconnect(netdev->phydev);
1019	netdev->phydev = NULL;
1020}
1021
1022static int lan743x_phy_open(struct lan743x_adapter *adapter)
1023{
1024	struct lan743x_phy *phy = &adapter->phy;
1025	struct phy_device *phydev = NULL;
1026	struct device_node *phynode;
1027	struct net_device *netdev;
1028	int ret = -EIO;
1029
1030	netdev = adapter->netdev;
1031	phynode = of_node_get(adapter->pdev->dev.of_node);
1032
1033	if (phynode) {
1034		/* try devicetree phy, or fixed link */
1035		of_get_phy_mode(phynode, &adapter->phy_mode);
1036
1037		if (of_phy_is_fixed_link(phynode)) {
1038			ret = of_phy_register_fixed_link(phynode);
1039			if (ret) {
1040				netdev_err(netdev,
1041					   "cannot register fixed PHY\n");
1042				of_node_put(phynode);
1043				goto return_error;
1044			}
1045		}
1046		phydev = of_phy_connect(netdev, phynode,
1047					lan743x_phy_link_status_change, 0,
1048					adapter->phy_mode);
1049		of_node_put(phynode);
1050	}
1051
1052	if (!phydev) {
1053		/* try internal phy */
1054		phydev = phy_find_first(adapter->mdiobus);
1055		if (!phydev)
1056			goto return_error;
1057
1058		adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1059		ret = phy_connect_direct(netdev, phydev,
1060					 lan743x_phy_link_status_change,
1061					 adapter->phy_mode);
1062		if (ret)
1063			goto return_error;
1064	}
1065
1066	/* MAC doesn't support 1000T Half */
1067	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1068
1069	/* support both flow controls */
1070	phy_support_asym_pause(phydev);
1071	phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1072	phy->fc_autoneg = phydev->autoneg;
1073
1074	phy_start(phydev);
1075	phy_start_aneg(phydev);
1076	return 0;
1077
1078return_error:
1079	return ret;
1080}
1081
1082static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1083{
1084	lan743x_csr_write(adapter, RFE_RSS_CFG,
1085		RFE_RSS_CFG_UDP_IPV6_EX_ |
1086		RFE_RSS_CFG_TCP_IPV6_EX_ |
1087		RFE_RSS_CFG_IPV6_EX_ |
1088		RFE_RSS_CFG_UDP_IPV6_ |
1089		RFE_RSS_CFG_TCP_IPV6_ |
1090		RFE_RSS_CFG_IPV6_ |
1091		RFE_RSS_CFG_UDP_IPV4_ |
1092		RFE_RSS_CFG_TCP_IPV4_ |
1093		RFE_RSS_CFG_IPV4_ |
1094		RFE_RSS_CFG_VALID_HASH_BITS_ |
1095		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1096		RFE_RSS_CFG_RSS_HASH_STORE_ |
1097		RFE_RSS_CFG_RSS_ENABLE_);
1098}
1099
1100static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1101{
1102	u8 *mac_addr;
1103	u32 mac_addr_hi = 0;
1104	u32 mac_addr_lo = 0;
1105
1106	/* Add mac address to perfect Filter */
1107	mac_addr = adapter->mac_address;
1108	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1109		      (((u32)(mac_addr[1])) << 8) |
1110		      (((u32)(mac_addr[2])) << 16) |
1111		      (((u32)(mac_addr[3])) << 24));
1112	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1113		      (((u32)(mac_addr[5])) << 8));
1114
1115	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1116	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1117			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1118}
1119
1120static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1121{
1122	struct net_device *netdev = adapter->netdev;
1123	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1124	u32 rfctl;
1125	u32 data;
1126
1127	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1128	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1129		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1130	rfctl |= RFE_CTL_AB_;
1131	if (netdev->flags & IFF_PROMISC) {
1132		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1133	} else {
1134		if (netdev->flags & IFF_ALLMULTI)
1135			rfctl |= RFE_CTL_AM_;
1136	}
1137
1138	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1139	if (netdev_mc_count(netdev)) {
1140		struct netdev_hw_addr *ha;
1141		int i;
1142
1143		rfctl |= RFE_CTL_DA_PERFECT_;
1144		i = 1;
1145		netdev_for_each_mc_addr(ha, netdev) {
1146			/* set first 32 into Perfect Filter */
1147			if (i < 33) {
1148				lan743x_csr_write(adapter,
1149						  RFE_ADDR_FILT_HI(i), 0);
1150				data = ha->addr[3];
1151				data = ha->addr[2] | (data << 8);
1152				data = ha->addr[1] | (data << 8);
1153				data = ha->addr[0] | (data << 8);
1154				lan743x_csr_write(adapter,
1155						  RFE_ADDR_FILT_LO(i), data);
1156				data = ha->addr[5];
1157				data = ha->addr[4] | (data << 8);
1158				data |= RFE_ADDR_FILT_HI_VALID_;
1159				lan743x_csr_write(adapter,
1160						  RFE_ADDR_FILT_HI(i), data);
1161			} else {
1162				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1163					     23) & 0x1FF;
1164				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1165				rfctl |= RFE_CTL_MCAST_HASH_;
1166			}
1167			i++;
1168		}
1169	}
1170
1171	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1172			 DP_SEL_VHF_VLAN_LEN,
1173			 DP_SEL_VHF_HASH_LEN, hash_table);
1174	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1175}
1176
1177static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1178{
1179	u32 data = 0;
1180
1181	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1182	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1183				 0, 1000, 20000, 100);
1184	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1185	case DMA_DESCRIPTOR_SPACING_16:
1186		data = DMAC_CFG_MAX_DSPACE_16_;
1187		break;
1188	case DMA_DESCRIPTOR_SPACING_32:
1189		data = DMAC_CFG_MAX_DSPACE_32_;
1190		break;
1191	case DMA_DESCRIPTOR_SPACING_64:
1192		data = DMAC_CFG_MAX_DSPACE_64_;
1193		break;
1194	case DMA_DESCRIPTOR_SPACING_128:
1195		data = DMAC_CFG_MAX_DSPACE_128_;
1196		break;
1197	default:
1198		return -EPERM;
1199	}
1200	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1201		data |= DMAC_CFG_COAL_EN_;
1202	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1203	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1204	lan743x_csr_write(adapter, DMAC_CFG, data);
1205	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1206	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1207	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1208	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1209	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1210	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1211	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1212	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1213	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1214	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1215	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1216	return 0;
1217}
1218
1219static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1220				     int tx_channel)
1221{
1222	u32 dmac_cmd = 0;
1223
1224	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1225	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1226				      DMAC_CMD_START_T_(tx_channel)),
1227				      (dmac_cmd &
1228				      DMAC_CMD_STOP_T_(tx_channel)));
1229}
1230
1231static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1232					     int tx_channel)
1233{
1234	int timeout = 100;
1235	int result = 0;
1236
1237	while (timeout &&
1238	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1239	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1240		usleep_range(1000, 20000);
1241		timeout--;
1242	}
1243	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1244		result = -ENODEV;
1245	return result;
1246}
1247
1248static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1249				     int rx_channel)
1250{
1251	u32 dmac_cmd = 0;
1252
1253	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1254	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1255				      DMAC_CMD_START_R_(rx_channel)),
1256				      (dmac_cmd &
1257				      DMAC_CMD_STOP_R_(rx_channel)));
1258}
1259
1260static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1261					     int rx_channel)
1262{
1263	int timeout = 100;
1264	int result = 0;
1265
1266	while (timeout &&
1267	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1268	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1269		usleep_range(1000, 20000);
1270		timeout--;
1271	}
1272	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1273		result = -ENODEV;
1274	return result;
1275}
1276
1277static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1278				    int descriptor_index, bool cleanup)
1279{
1280	struct lan743x_tx_buffer_info *buffer_info = NULL;
1281	struct lan743x_tx_descriptor *descriptor = NULL;
1282	u32 descriptor_type = 0;
1283	bool ignore_sync;
1284
1285	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1286	buffer_info = &tx->buffer_info[descriptor_index];
1287	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1288		goto done;
1289
1290	descriptor_type = le32_to_cpu(descriptor->data0) &
1291			  TX_DESC_DATA0_DTYPE_MASK_;
1292	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1293		goto clean_up_data_descriptor;
1294	else
1295		goto clear_active;
1296
1297clean_up_data_descriptor:
1298	if (buffer_info->dma_ptr) {
1299		if (buffer_info->flags &
1300		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1301			dma_unmap_page(&tx->adapter->pdev->dev,
1302				       buffer_info->dma_ptr,
1303				       buffer_info->buffer_length,
1304				       DMA_TO_DEVICE);
1305		} else {
1306			dma_unmap_single(&tx->adapter->pdev->dev,
1307					 buffer_info->dma_ptr,
1308					 buffer_info->buffer_length,
1309					 DMA_TO_DEVICE);
1310		}
1311		buffer_info->dma_ptr = 0;
1312		buffer_info->buffer_length = 0;
1313	}
1314	if (!buffer_info->skb)
1315		goto clear_active;
1316
1317	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1318		dev_kfree_skb_any(buffer_info->skb);
1319		goto clear_skb;
1320	}
1321
1322	if (cleanup) {
1323		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1324		dev_kfree_skb_any(buffer_info->skb);
1325	} else {
1326		ignore_sync = (buffer_info->flags &
1327			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1328		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1329					     buffer_info->skb, ignore_sync);
1330	}
1331
1332clear_skb:
1333	buffer_info->skb = NULL;
1334
1335clear_active:
1336	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1337
1338done:
1339	memset(buffer_info, 0, sizeof(*buffer_info));
1340	memset(descriptor, 0, sizeof(*descriptor));
1341}
1342
1343static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1344{
1345	return ((++index) % tx->ring_size);
1346}
1347
1348static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1349{
1350	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1351		lan743x_tx_release_desc(tx, tx->last_head, false);
1352		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1353	}
1354}
1355
1356static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1357{
1358	u32 original_head = 0;
1359
1360	original_head = tx->last_head;
1361	do {
1362		lan743x_tx_release_desc(tx, tx->last_head, true);
1363		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1364	} while (tx->last_head != original_head);
1365	memset(tx->ring_cpu_ptr, 0,
1366	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1367	memset(tx->buffer_info, 0,
1368	       sizeof(*tx->buffer_info) * (tx->ring_size));
1369}
1370
1371static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1372				   struct sk_buff *skb)
1373{
1374	int result = 1; /* 1 for the main skb buffer */
1375	int nr_frags = 0;
1376
1377	if (skb_is_gso(skb))
1378		result++; /* requires an extension descriptor */
1379	nr_frags = skb_shinfo(skb)->nr_frags;
1380	result += nr_frags; /* 1 for each fragment buffer */
1381	return result;
1382}
1383
1384static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1385{
1386	int last_head = tx->last_head;
1387	int last_tail = tx->last_tail;
1388
1389	if (last_tail >= last_head)
1390		return tx->ring_size - last_tail + last_head - 1;
1391	else
1392		return last_head - last_tail - 1;
1393}
1394
1395void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1396				      bool enable_timestamping,
1397				      bool enable_onestep_sync)
1398{
1399	if (enable_timestamping)
1400		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1401	else
1402		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1403	if (enable_onestep_sync)
1404		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1405	else
1406		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1407}
1408
1409static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1410				  unsigned char *first_buffer,
1411				  unsigned int first_buffer_length,
1412				  unsigned int frame_length,
1413				  bool time_stamp,
1414				  bool check_sum)
1415{
1416	/* called only from within lan743x_tx_xmit_frame.
1417	 * assuming tx->ring_lock has already been acquired.
1418	 */
1419	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1420	struct lan743x_tx_buffer_info *buffer_info = NULL;
1421	struct lan743x_adapter *adapter = tx->adapter;
1422	struct device *dev = &adapter->pdev->dev;
1423	dma_addr_t dma_ptr;
1424
1425	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1426	tx->frame_first = tx->last_tail;
1427	tx->frame_tail = tx->frame_first;
1428
1429	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1430	buffer_info = &tx->buffer_info[tx->frame_tail];
1431	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1432				 DMA_TO_DEVICE);
1433	if (dma_mapping_error(dev, dma_ptr))
1434		return -ENOMEM;
1435
1436	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1437	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1438	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1439		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1440
1441	buffer_info->skb = NULL;
1442	buffer_info->dma_ptr = dma_ptr;
1443	buffer_info->buffer_length = first_buffer_length;
1444	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1445
1446	tx->frame_data0 = (first_buffer_length &
1447		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1448		TX_DESC_DATA0_DTYPE_DATA_ |
1449		TX_DESC_DATA0_FS_ |
1450		TX_DESC_DATA0_FCS_;
1451	if (time_stamp)
1452		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1453
1454	if (check_sum)
1455		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1456				   TX_DESC_DATA0_IPE_ |
1457				   TX_DESC_DATA0_TPE_;
1458
1459	/* data0 will be programmed in one of other frame assembler functions */
1460	return 0;
1461}
1462
1463static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1464				     unsigned int frame_length,
1465				     int nr_frags)
1466{
1467	/* called only from within lan743x_tx_xmit_frame.
1468	 * assuming tx->ring_lock has already been acquired.
1469	 */
1470	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1471	struct lan743x_tx_buffer_info *buffer_info = NULL;
1472
1473	/* wrap up previous descriptor */
1474	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1475	if (nr_frags <= 0) {
1476		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1477		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1478	}
1479	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1480	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1481
1482	/* move to next descriptor */
1483	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1484	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1485	buffer_info = &tx->buffer_info[tx->frame_tail];
1486
1487	/* add extension descriptor */
1488	tx_descriptor->data1 = 0;
1489	tx_descriptor->data2 = 0;
1490	tx_descriptor->data3 = 0;
1491
1492	buffer_info->skb = NULL;
1493	buffer_info->dma_ptr = 0;
1494	buffer_info->buffer_length = 0;
1495	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1496
1497	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1498			  TX_DESC_DATA0_DTYPE_EXT_ |
1499			  TX_DESC_DATA0_EXT_LSO_;
1500
1501	/* data0 will be programmed in one of other frame assembler functions */
1502}
1503
1504static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1505					 const skb_frag_t *fragment,
1506					 unsigned int frame_length)
1507{
1508	/* called only from within lan743x_tx_xmit_frame
1509	 * assuming tx->ring_lock has already been acquired
1510	 */
1511	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1512	struct lan743x_tx_buffer_info *buffer_info = NULL;
1513	struct lan743x_adapter *adapter = tx->adapter;
1514	struct device *dev = &adapter->pdev->dev;
1515	unsigned int fragment_length = 0;
1516	dma_addr_t dma_ptr;
1517
1518	fragment_length = skb_frag_size(fragment);
1519	if (!fragment_length)
1520		return 0;
1521
1522	/* wrap up previous descriptor */
1523	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1524	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1525
1526	/* move to next descriptor */
1527	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1528	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1529	buffer_info = &tx->buffer_info[tx->frame_tail];
1530	dma_ptr = skb_frag_dma_map(dev, fragment,
1531				   0, fragment_length,
1532				   DMA_TO_DEVICE);
1533	if (dma_mapping_error(dev, dma_ptr)) {
1534		int desc_index;
1535
1536		/* cleanup all previously setup descriptors */
1537		desc_index = tx->frame_first;
1538		while (desc_index != tx->frame_tail) {
1539			lan743x_tx_release_desc(tx, desc_index, true);
1540			desc_index = lan743x_tx_next_index(tx, desc_index);
1541		}
1542		dma_wmb();
1543		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1544		tx->frame_first = 0;
1545		tx->frame_data0 = 0;
1546		tx->frame_tail = 0;
1547		return -ENOMEM;
1548	}
1549
1550	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1551	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1552	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1553			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1554
1555	buffer_info->skb = NULL;
1556	buffer_info->dma_ptr = dma_ptr;
1557	buffer_info->buffer_length = fragment_length;
1558	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1559	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1560
1561	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1562			  TX_DESC_DATA0_DTYPE_DATA_ |
1563			  TX_DESC_DATA0_FCS_;
1564
1565	/* data0 will be programmed in one of other frame assembler functions */
1566	return 0;
1567}
1568
1569static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1570				 struct sk_buff *skb,
1571				 bool time_stamp,
1572				 bool ignore_sync)
1573{
1574	/* called only from within lan743x_tx_xmit_frame
1575	 * assuming tx->ring_lock has already been acquired
1576	 */
1577	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1578	struct lan743x_tx_buffer_info *buffer_info = NULL;
1579	struct lan743x_adapter *adapter = tx->adapter;
1580	u32 tx_tail_flags = 0;
1581
1582	/* wrap up previous descriptor */
1583	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
1584	    TX_DESC_DATA0_DTYPE_DATA_) {
1585		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1586		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1587	}
1588
1589	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1590	buffer_info = &tx->buffer_info[tx->frame_tail];
1591	buffer_info->skb = skb;
1592	if (time_stamp)
1593		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
1594	if (ignore_sync)
1595		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1596
1597	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1598	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1599	tx->last_tail = tx->frame_tail;
1600
1601	dma_wmb();
1602
1603	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1604		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1605	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1606		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1607		TX_TAIL_SET_TOP_INT_EN_;
1608
1609	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1610			  tx_tail_flags | tx->frame_tail);
1611	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1612}
1613
1614static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1615					 struct sk_buff *skb)
1616{
1617	int required_number_of_descriptors = 0;
1618	unsigned int start_frame_length = 0;
1619	unsigned int frame_length = 0;
1620	unsigned int head_length = 0;
1621	unsigned long irq_flags = 0;
1622	bool do_timestamp = false;
1623	bool ignore_sync = false;
1624	int nr_frags = 0;
1625	bool gso = false;
1626	int j;
1627
1628	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1629
1630	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1631	if (required_number_of_descriptors >
1632		lan743x_tx_get_avail_desc(tx)) {
1633		if (required_number_of_descriptors > (tx->ring_size - 1)) {
1634			dev_kfree_skb_irq(skb);
1635		} else {
1636			/* save to overflow buffer */
1637			tx->overflow_skb = skb;
1638			netif_stop_queue(tx->adapter->netdev);
1639		}
1640		goto unlock;
1641	}
1642
1643	/* space available, transmit skb  */
1644	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1645	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
1646	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
1647		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1648		do_timestamp = true;
1649		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
1650			ignore_sync = true;
1651	}
1652	head_length = skb_headlen(skb);
1653	frame_length = skb_pagelen(skb);
1654	nr_frags = skb_shinfo(skb)->nr_frags;
1655	start_frame_length = frame_length;
1656	gso = skb_is_gso(skb);
1657	if (gso) {
1658		start_frame_length = max(skb_shinfo(skb)->gso_size,
1659					 (unsigned short)8);
1660	}
1661
1662	if (lan743x_tx_frame_start(tx,
1663				   skb->data, head_length,
1664				   start_frame_length,
1665				   do_timestamp,
1666				   skb->ip_summed == CHECKSUM_PARTIAL)) {
1667		dev_kfree_skb_irq(skb);
1668		goto unlock;
1669	}
1670
1671	if (gso)
1672		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
1673
1674	if (nr_frags <= 0)
1675		goto finish;
1676
1677	for (j = 0; j < nr_frags; j++) {
1678		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
1679
1680		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
1681			/* upon error no need to call
1682			 *	lan743x_tx_frame_end
1683			 * frame assembler clean up was performed inside
1684			 *	lan743x_tx_frame_add_fragment
1685			 */
1686			dev_kfree_skb_irq(skb);
1687			goto unlock;
1688		}
1689	}
1690
1691finish:
1692	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
1693
1694unlock:
1695	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1696	return NETDEV_TX_OK;
1697}
1698
1699static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1700{
1701	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
1702	struct lan743x_adapter *adapter = tx->adapter;
1703	bool start_transmitter = false;
1704	unsigned long irq_flags = 0;
1705	u32 ioc_bit = 0;
1706
1707	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
1708	lan743x_csr_read(adapter, DMAC_INT_STS);
1709	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
1710		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
1711	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1712
1713	/* clean up tx ring */
1714	lan743x_tx_release_completed_descriptors(tx);
1715	if (netif_queue_stopped(adapter->netdev)) {
1716		if (tx->overflow_skb) {
1717			if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
1718				lan743x_tx_get_avail_desc(tx))
1719				start_transmitter = true;
1720		} else {
1721			netif_wake_queue(adapter->netdev);
1722		}
1723	}
1724	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1725
1726	if (start_transmitter) {
1727		/* space is now available, transmit overflow skb */
1728		lan743x_tx_xmit_frame(tx, tx->overflow_skb);
1729		tx->overflow_skb = NULL;
1730		netif_wake_queue(adapter->netdev);
1731	}
1732
1733	if (!napi_complete(napi))
1734		goto done;
1735
1736	/* enable isr */
1737	lan743x_csr_write(adapter, INT_EN_SET,
1738			  INT_BIT_DMA_TX_(tx->channel_number));
1739	lan743x_csr_read(adapter, INT_STS);
1740
1741done:
1742	return 0;
1743}
1744
1745static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
1746{
1747	if (tx->head_cpu_ptr) {
1748		dma_free_coherent(&tx->adapter->pdev->dev,
1749				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
1750				  tx->head_dma_ptr);
1751		tx->head_cpu_ptr = NULL;
1752		tx->head_dma_ptr = 0;
1753	}
1754	kfree(tx->buffer_info);
1755	tx->buffer_info = NULL;
1756
1757	if (tx->ring_cpu_ptr) {
1758		dma_free_coherent(&tx->adapter->pdev->dev,
1759				  tx->ring_allocation_size, tx->ring_cpu_ptr,
1760				  tx->ring_dma_ptr);
1761		tx->ring_allocation_size = 0;
1762		tx->ring_cpu_ptr = NULL;
1763		tx->ring_dma_ptr = 0;
1764	}
1765	tx->ring_size = 0;
1766}
1767
1768static int lan743x_tx_ring_init(struct lan743x_tx *tx)
1769{
1770	size_t ring_allocation_size = 0;
1771	void *cpu_ptr = NULL;
1772	dma_addr_t dma_ptr;
1773	int ret = -ENOMEM;
1774
1775	tx->ring_size = LAN743X_TX_RING_SIZE;
1776	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
1777		ret = -EINVAL;
1778		goto cleanup;
1779	}
1780	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
1781				      DMA_BIT_MASK(64))) {
1782		if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
1783					      DMA_BIT_MASK(32))) {
1784			dev_warn(&tx->adapter->pdev->dev,
1785				 "lan743x_: No suitable DMA available\n");
1786			ret = -ENOMEM;
1787			goto cleanup;
1788		}
1789	}
1790	ring_allocation_size = ALIGN(tx->ring_size *
1791				     sizeof(struct lan743x_tx_descriptor),
1792				     PAGE_SIZE);
1793	dma_ptr = 0;
1794	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1795				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
1796	if (!cpu_ptr) {
1797		ret = -ENOMEM;
1798		goto cleanup;
1799	}
1800
1801	tx->ring_allocation_size = ring_allocation_size;
1802	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
1803	tx->ring_dma_ptr = dma_ptr;
1804
1805	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
1806	if (!cpu_ptr) {
1807		ret = -ENOMEM;
1808		goto cleanup;
1809	}
1810	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
1811	dma_ptr = 0;
1812	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1813				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
1814				     GFP_KERNEL);
1815	if (!cpu_ptr) {
1816		ret = -ENOMEM;
1817		goto cleanup;
1818	}
1819
1820	tx->head_cpu_ptr = cpu_ptr;
1821	tx->head_dma_ptr = dma_ptr;
1822	if (tx->head_dma_ptr & 0x3) {
1823		ret = -ENOMEM;
1824		goto cleanup;
1825	}
1826
1827	return 0;
1828
1829cleanup:
1830	lan743x_tx_ring_cleanup(tx);
1831	return ret;
1832}
1833
1834static void lan743x_tx_close(struct lan743x_tx *tx)
1835{
1836	struct lan743x_adapter *adapter = tx->adapter;
1837
1838	lan743x_csr_write(adapter,
1839			  DMAC_CMD,
1840			  DMAC_CMD_STOP_T_(tx->channel_number));
1841	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
1842
1843	lan743x_csr_write(adapter,
1844			  DMAC_INT_EN_CLR,
1845			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1846	lan743x_csr_write(adapter, INT_EN_CLR,
1847			  INT_BIT_DMA_TX_(tx->channel_number));
1848	napi_disable(&tx->napi);
1849	netif_napi_del(&tx->napi);
1850
1851	lan743x_csr_write(adapter, FCT_TX_CTL,
1852			  FCT_TX_CTL_DIS_(tx->channel_number));
1853	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1854				 FCT_TX_CTL_EN_(tx->channel_number),
1855				 0, 1000, 20000, 100);
1856
1857	lan743x_tx_release_all_descriptors(tx);
1858
1859	if (tx->overflow_skb) {
1860		dev_kfree_skb(tx->overflow_skb);
1861		tx->overflow_skb = NULL;
1862	}
1863
1864	lan743x_tx_ring_cleanup(tx);
1865}
1866
1867static int lan743x_tx_open(struct lan743x_tx *tx)
1868{
1869	struct lan743x_adapter *adapter = NULL;
1870	u32 data = 0;
1871	int ret;
1872
1873	adapter = tx->adapter;
1874	ret = lan743x_tx_ring_init(tx);
1875	if (ret)
1876		return ret;
1877
1878	/* initialize fifo */
1879	lan743x_csr_write(adapter, FCT_TX_CTL,
1880			  FCT_TX_CTL_RESET_(tx->channel_number));
1881	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1882				 FCT_TX_CTL_RESET_(tx->channel_number),
1883				 0, 1000, 20000, 100);
1884
1885	/* enable fifo */
1886	lan743x_csr_write(adapter, FCT_TX_CTL,
1887			  FCT_TX_CTL_EN_(tx->channel_number));
1888
1889	/* reset tx channel */
1890	lan743x_csr_write(adapter, DMAC_CMD,
1891			  DMAC_CMD_TX_SWR_(tx->channel_number));
1892	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
1893				 DMAC_CMD_TX_SWR_(tx->channel_number),
1894				 0, 1000, 20000, 100);
1895
1896	/* Write TX_BASE_ADDR */
1897	lan743x_csr_write(adapter,
1898			  TX_BASE_ADDRH(tx->channel_number),
1899			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
1900	lan743x_csr_write(adapter,
1901			  TX_BASE_ADDRL(tx->channel_number),
1902			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
1903
1904	/* Write TX_CFG_B */
1905	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
1906	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
1907	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
1908	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1909		data |= TX_CFG_B_TDMABL_512_;
1910	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
1911
1912	/* Write TX_CFG_A */
1913	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
1914	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
1915		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
1916		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
1917		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
1918		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
1919	}
1920	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
1921
1922	/* Write TX_HEAD_WRITEBACK_ADDR */
1923	lan743x_csr_write(adapter,
1924			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
1925			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
1926	lan743x_csr_write(adapter,
1927			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
1928			  DMA_ADDR_LOW32(tx->head_dma_ptr));
1929
1930	/* set last head */
1931	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
1932
1933	/* write TX_TAIL */
1934	tx->last_tail = 0;
1935	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1936			  (u32)(tx->last_tail));
1937	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1938							 INT_BIT_DMA_TX_
1939							 (tx->channel_number));
1940	netif_tx_napi_add(adapter->netdev,
1941			  &tx->napi, lan743x_tx_napi_poll,
1942			  tx->ring_size - 1);
1943	napi_enable(&tx->napi);
1944
1945	data = 0;
1946	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
1947		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
1948	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
1949		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
1950	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
1951		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
1952	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
1953		data |= TX_CFG_C_TX_INT_EN_R2C_;
1954	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
1955
1956	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
1957		lan743x_csr_write(adapter, INT_EN_SET,
1958				  INT_BIT_DMA_TX_(tx->channel_number));
1959	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
1960			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1961
1962	/*  start dmac channel */
1963	lan743x_csr_write(adapter, DMAC_CMD,
1964			  DMAC_CMD_START_T_(tx->channel_number));
1965	return 0;
1966}
1967
1968static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
1969{
1970	return ((++index) % rx->ring_size);
1971}
1972
1973static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx, gfp_t gfp)
1974{
1975	int length = 0;
1976
1977	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
1978	return __netdev_alloc_skb(rx->adapter->netdev,
1979				  length, gfp);
1980}
1981
1982static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
1983{
1984	/* update the tail once per 8 descriptors */
1985	if ((index & 7) == 7)
1986		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
1987				  index);
1988}
1989
1990static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
1991					struct sk_buff *skb)
1992{
1993	struct lan743x_rx_buffer_info *buffer_info;
1994	struct lan743x_rx_descriptor *descriptor;
1995	int length = 0;
1996
1997	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
1998	descriptor = &rx->ring_cpu_ptr[index];
1999	buffer_info = &rx->buffer_info[index];
2000	buffer_info->skb = skb;
2001	if (!(buffer_info->skb))
2002		return -ENOMEM;
2003	buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
2004					      buffer_info->skb->data,
2005					      length,
2006					      DMA_FROM_DEVICE);
2007	if (dma_mapping_error(&rx->adapter->pdev->dev,
2008			      buffer_info->dma_ptr)) {
2009		buffer_info->dma_ptr = 0;
2010		return -ENOMEM;
2011	}
2012
2013	buffer_info->buffer_length = length;
2014	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2015	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2016	descriptor->data3 = 0;
2017	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2018			    (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2019	skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
2020	lan743x_rx_update_tail(rx, index);
2021
2022	return 0;
2023}
2024
2025static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2026{
2027	struct lan743x_rx_buffer_info *buffer_info;
2028	struct lan743x_rx_descriptor *descriptor;
2029
2030	descriptor = &rx->ring_cpu_ptr[index];
2031	buffer_info = &rx->buffer_info[index];
2032
2033	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2034	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2035	descriptor->data3 = 0;
2036	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2037			    ((buffer_info->buffer_length) &
2038			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2039	lan743x_rx_update_tail(rx, index);
2040}
2041
2042static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2043{
2044	struct lan743x_rx_buffer_info *buffer_info;
2045	struct lan743x_rx_descriptor *descriptor;
2046
2047	descriptor = &rx->ring_cpu_ptr[index];
2048	buffer_info = &rx->buffer_info[index];
2049
2050	memset(descriptor, 0, sizeof(*descriptor));
2051
2052	if (buffer_info->dma_ptr) {
2053		dma_unmap_single(&rx->adapter->pdev->dev,
2054				 buffer_info->dma_ptr,
2055				 buffer_info->buffer_length,
2056				 DMA_FROM_DEVICE);
2057		buffer_info->dma_ptr = 0;
2058	}
2059
2060	if (buffer_info->skb) {
2061		dev_kfree_skb(buffer_info->skb);
2062		buffer_info->skb = NULL;
2063	}
2064
2065	memset(buffer_info, 0, sizeof(*buffer_info));
2066}
2067
2068static int lan743x_rx_process_packet(struct lan743x_rx *rx)
2069{
2070	struct skb_shared_hwtstamps *hwtstamps = NULL;
2071	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2072	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2073	struct lan743x_rx_buffer_info *buffer_info;
2074	struct lan743x_rx_descriptor *descriptor;
2075	int extension_index = -1;
2076	int first_index = -1;
2077	int last_index = -1;
2078
2079	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2080		goto done;
2081
2082	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2083		goto done;
2084
2085	if (rx->last_head != current_head_index) {
2086		descriptor = &rx->ring_cpu_ptr[rx->last_head];
2087		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2088			goto done;
2089
2090		if (!(le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_))
2091			goto done;
2092
2093		first_index = rx->last_head;
2094		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
2095			last_index = rx->last_head;
2096		} else {
2097			int index;
2098
2099			index = lan743x_rx_next_index(rx, first_index);
2100			while (index != current_head_index) {
2101				descriptor = &rx->ring_cpu_ptr[index];
2102				if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2103					goto done;
2104
2105				if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) {
2106					last_index = index;
2107					break;
2108				}
2109				index = lan743x_rx_next_index(rx, index);
2110			}
2111		}
2112		if (last_index >= 0) {
2113			descriptor = &rx->ring_cpu_ptr[last_index];
2114			if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2115				/* extension is expected to follow */
2116				int index = lan743x_rx_next_index(rx,
2117								  last_index);
2118				if (index != current_head_index) {
2119					descriptor = &rx->ring_cpu_ptr[index];
2120					if (le32_to_cpu(descriptor->data0) &
2121					    RX_DESC_DATA0_OWN_) {
2122						goto done;
2123					}
2124					if (le32_to_cpu(descriptor->data0) &
2125					    RX_DESC_DATA0_EXT_) {
2126						extension_index = index;
2127					} else {
2128						goto done;
2129					}
2130				} else {
2131					/* extension is not yet available */
2132					/* prevent processing of this packet */
2133					first_index = -1;
2134					last_index = -1;
2135				}
2136			}
2137		}
2138	}
2139	if (first_index >= 0 && last_index >= 0) {
2140		int real_last_index = last_index;
2141		struct sk_buff *skb = NULL;
2142		u32 ts_sec = 0;
2143		u32 ts_nsec = 0;
2144
2145		/* packet is available */
2146		if (first_index == last_index) {
2147			/* single buffer packet */
2148			struct sk_buff *new_skb = NULL;
2149			int packet_length;
2150
2151			new_skb = lan743x_rx_allocate_skb(rx,
2152							  GFP_ATOMIC | GFP_DMA);
2153			if (!new_skb) {
2154				/* failed to allocate next skb.
2155				 * Memory is very low.
2156				 * Drop this packet and reuse buffer.
2157				 */
2158				lan743x_rx_reuse_ring_element(rx, first_index);
2159				goto process_extension;
2160			}
2161
2162			buffer_info = &rx->buffer_info[first_index];
2163			skb = buffer_info->skb;
2164			descriptor = &rx->ring_cpu_ptr[first_index];
2165
2166			/* unmap from dma */
2167			if (buffer_info->dma_ptr) {
2168				dma_unmap_single(&rx->adapter->pdev->dev,
2169						 buffer_info->dma_ptr,
2170						 buffer_info->buffer_length,
2171						 DMA_FROM_DEVICE);
2172				buffer_info->dma_ptr = 0;
2173				buffer_info->buffer_length = 0;
2174			}
2175			buffer_info->skb = NULL;
2176			packet_length =	RX_DESC_DATA0_FRAME_LENGTH_GET_
2177					(le32_to_cpu(descriptor->data0));
2178			skb_put(skb, packet_length - 4);
2179			skb->protocol = eth_type_trans(skb,
2180						       rx->adapter->netdev);
2181			lan743x_rx_init_ring_element(rx, first_index, new_skb);
2182		} else {
2183			int index = first_index;
2184
2185			/* multi buffer packet not supported */
2186			/* this should not happen since
2187			 * buffers are allocated to be at least jumbo size
2188			 */
2189
2190			/* clean up buffers */
2191			if (first_index <= last_index) {
2192				while ((index >= first_index) &&
2193				       (index <= last_index)) {
2194					lan743x_rx_reuse_ring_element(rx,
2195								      index);
2196					index = lan743x_rx_next_index(rx,
2197								      index);
2198				}
2199			} else {
2200				while ((index >= first_index) ||
2201				       (index <= last_index)) {
2202					lan743x_rx_reuse_ring_element(rx,
2203								      index);
2204					index = lan743x_rx_next_index(rx,
2205								      index);
2206				}
2207			}
2208		}
2209
2210process_extension:
2211		if (extension_index >= 0) {
2212			descriptor = &rx->ring_cpu_ptr[extension_index];
2213			buffer_info = &rx->buffer_info[extension_index];
2214
2215			ts_sec = le32_to_cpu(descriptor->data1);
2216			ts_nsec = (le32_to_cpu(descriptor->data2) &
2217				  RX_DESC_DATA2_TS_NS_MASK_);
2218			lan743x_rx_reuse_ring_element(rx, extension_index);
2219			real_last_index = extension_index;
2220		}
2221
2222		if (!skb) {
2223			result = RX_PROCESS_RESULT_PACKET_DROPPED;
2224			goto move_forward;
2225		}
2226
2227		if (extension_index < 0)
2228			goto pass_packet_to_os;
2229		hwtstamps = skb_hwtstamps(skb);
2230		if (hwtstamps)
2231			hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
2232
2233pass_packet_to_os:
2234		/* pass packet to OS */
2235		napi_gro_receive(&rx->napi, skb);
2236		result = RX_PROCESS_RESULT_PACKET_RECEIVED;
2237
2238move_forward:
2239		/* push tail and head forward */
2240		rx->last_tail = real_last_index;
2241		rx->last_head = lan743x_rx_next_index(rx, real_last_index);
2242	}
2243done:
2244	return result;
2245}
2246
2247static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2248{
2249	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2250	struct lan743x_adapter *adapter = rx->adapter;
2251	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2252	u32 rx_tail_flags = 0;
2253	int count;
2254
2255	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2256		/* clear int status bit before reading packet */
2257		lan743x_csr_write(adapter, DMAC_INT_STS,
2258				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2259	}
2260	for (count = 0; count < weight; count++) {
2261		result = lan743x_rx_process_packet(rx);
2262		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
2263			break;
2264	}
2265	rx->frame_count += count;
2266	if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
2267		return weight;
2268
2269	if (!napi_complete_done(napi, count))
2270		return count;
2271
2272	/* re-arm interrupts, must write to rx tail on some chip variants */
2273	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2274		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2275	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2276		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2277	} else {
2278		lan743x_csr_write(adapter, INT_EN_SET,
2279				  INT_BIT_DMA_RX_(rx->channel_number));
2280	}
2281
2282	if (rx_tail_flags)
2283		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2284				  rx_tail_flags | rx->last_tail);
2285
2286	return count;
2287}
2288
2289static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2290{
2291	if (rx->buffer_info && rx->ring_cpu_ptr) {
2292		int index;
2293
2294		for (index = 0; index < rx->ring_size; index++)
2295			lan743x_rx_release_ring_element(rx, index);
2296	}
2297
2298	if (rx->head_cpu_ptr) {
2299		dma_free_coherent(&rx->adapter->pdev->dev,
2300				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2301				  rx->head_dma_ptr);
2302		rx->head_cpu_ptr = NULL;
2303		rx->head_dma_ptr = 0;
2304	}
2305
2306	kfree(rx->buffer_info);
2307	rx->buffer_info = NULL;
2308
2309	if (rx->ring_cpu_ptr) {
2310		dma_free_coherent(&rx->adapter->pdev->dev,
2311				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2312				  rx->ring_dma_ptr);
2313		rx->ring_allocation_size = 0;
2314		rx->ring_cpu_ptr = NULL;
2315		rx->ring_dma_ptr = 0;
2316	}
2317
2318	rx->ring_size = 0;
2319	rx->last_head = 0;
2320}
2321
2322static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2323{
2324	size_t ring_allocation_size = 0;
2325	dma_addr_t dma_ptr = 0;
2326	void *cpu_ptr = NULL;
2327	int ret = -ENOMEM;
2328	int index = 0;
2329
2330	rx->ring_size = LAN743X_RX_RING_SIZE;
2331	if (rx->ring_size <= 1) {
2332		ret = -EINVAL;
2333		goto cleanup;
2334	}
2335	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2336		ret = -EINVAL;
2337		goto cleanup;
2338	}
2339	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2340				      DMA_BIT_MASK(64))) {
2341		if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2342					      DMA_BIT_MASK(32))) {
2343			dev_warn(&rx->adapter->pdev->dev,
2344				 "lan743x_: No suitable DMA available\n");
2345			ret = -ENOMEM;
2346			goto cleanup;
2347		}
2348	}
2349	ring_allocation_size = ALIGN(rx->ring_size *
2350				     sizeof(struct lan743x_rx_descriptor),
2351				     PAGE_SIZE);
2352	dma_ptr = 0;
2353	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2354				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2355	if (!cpu_ptr) {
2356		ret = -ENOMEM;
2357		goto cleanup;
2358	}
2359	rx->ring_allocation_size = ring_allocation_size;
2360	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2361	rx->ring_dma_ptr = dma_ptr;
2362
2363	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2364			  GFP_KERNEL);
2365	if (!cpu_ptr) {
2366		ret = -ENOMEM;
2367		goto cleanup;
2368	}
2369	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2370	dma_ptr = 0;
2371	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2372				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2373				     GFP_KERNEL);
2374	if (!cpu_ptr) {
2375		ret = -ENOMEM;
2376		goto cleanup;
2377	}
2378
2379	rx->head_cpu_ptr = cpu_ptr;
2380	rx->head_dma_ptr = dma_ptr;
2381	if (rx->head_dma_ptr & 0x3) {
2382		ret = -ENOMEM;
2383		goto cleanup;
2384	}
2385
2386	rx->last_head = 0;
2387	for (index = 0; index < rx->ring_size; index++) {
2388		struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx,
2389								   GFP_KERNEL);
2390
2391		ret = lan743x_rx_init_ring_element(rx, index, new_skb);
2392		if (ret)
2393			goto cleanup;
2394	}
2395	return 0;
2396
2397cleanup:
2398	lan743x_rx_ring_cleanup(rx);
2399	return ret;
2400}
2401
2402static void lan743x_rx_close(struct lan743x_rx *rx)
2403{
2404	struct lan743x_adapter *adapter = rx->adapter;
2405
2406	lan743x_csr_write(adapter, FCT_RX_CTL,
2407			  FCT_RX_CTL_DIS_(rx->channel_number));
2408	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2409				 FCT_RX_CTL_EN_(rx->channel_number),
2410				 0, 1000, 20000, 100);
2411
2412	lan743x_csr_write(adapter, DMAC_CMD,
2413			  DMAC_CMD_STOP_R_(rx->channel_number));
2414	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2415
2416	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2417			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2418	lan743x_csr_write(adapter, INT_EN_CLR,
2419			  INT_BIT_DMA_RX_(rx->channel_number));
2420	napi_disable(&rx->napi);
2421
2422	netif_napi_del(&rx->napi);
2423
2424	lan743x_rx_ring_cleanup(rx);
2425}
2426
2427static int lan743x_rx_open(struct lan743x_rx *rx)
2428{
2429	struct lan743x_adapter *adapter = rx->adapter;
2430	u32 data = 0;
2431	int ret;
2432
2433	rx->frame_count = 0;
2434	ret = lan743x_rx_ring_init(rx);
2435	if (ret)
2436		goto return_error;
2437
2438	netif_napi_add(adapter->netdev,
2439		       &rx->napi, lan743x_rx_napi_poll,
2440		       NAPI_POLL_WEIGHT);
2441
2442	lan743x_csr_write(adapter, DMAC_CMD,
2443			  DMAC_CMD_RX_SWR_(rx->channel_number));
2444	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2445				 DMAC_CMD_RX_SWR_(rx->channel_number),
2446				 0, 1000, 20000, 100);
2447
2448	/* set ring base address */
2449	lan743x_csr_write(adapter,
2450			  RX_BASE_ADDRH(rx->channel_number),
2451			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2452	lan743x_csr_write(adapter,
2453			  RX_BASE_ADDRL(rx->channel_number),
2454			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2455
2456	/* set rx write back address */
2457	lan743x_csr_write(adapter,
2458			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2459			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2460	lan743x_csr_write(adapter,
2461			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2462			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2463	data = RX_CFG_A_RX_HP_WB_EN_;
2464	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2465		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2466			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2467			RX_CFG_A_RX_PF_THRES_SET_(16) |
2468			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2469	}
2470
2471	/* set RX_CFG_A */
2472	lan743x_csr_write(adapter,
2473			  RX_CFG_A(rx->channel_number), data);
2474
2475	/* set RX_CFG_B */
2476	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2477	data &= ~RX_CFG_B_RX_PAD_MASK_;
2478	if (!RX_HEAD_PADDING)
2479		data |= RX_CFG_B_RX_PAD_0_;
2480	else
2481		data |= RX_CFG_B_RX_PAD_2_;
2482	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2483	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2484	data |= RX_CFG_B_TS_ALL_RX_;
2485	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2486		data |= RX_CFG_B_RDMABL_512_;
2487
2488	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2489	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2490							 INT_BIT_DMA_RX_
2491							 (rx->channel_number));
2492
2493	/* set RX_CFG_C */
2494	data = 0;
2495	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2496		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2497	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2498		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2499	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2500		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2501	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2502		data |= RX_CFG_C_RX_INT_EN_R2C_;
2503	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2504
2505	rx->last_tail = ((u32)(rx->ring_size - 1));
2506	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2507			  rx->last_tail);
2508	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2509	if (rx->last_head) {
2510		ret = -EIO;
2511		goto napi_delete;
2512	}
2513
2514	napi_enable(&rx->napi);
2515
2516	lan743x_csr_write(adapter, INT_EN_SET,
2517			  INT_BIT_DMA_RX_(rx->channel_number));
2518	lan743x_csr_write(adapter, DMAC_INT_STS,
2519			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2520	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2521			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2522	lan743x_csr_write(adapter, DMAC_CMD,
2523			  DMAC_CMD_START_R_(rx->channel_number));
2524
2525	/* initialize fifo */
2526	lan743x_csr_write(adapter, FCT_RX_CTL,
2527			  FCT_RX_CTL_RESET_(rx->channel_number));
2528	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2529				 FCT_RX_CTL_RESET_(rx->channel_number),
2530				 0, 1000, 20000, 100);
2531	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2532			  FCT_FLOW_CTL_REQ_EN_ |
2533			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2534			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2535
2536	/* enable fifo */
2537	lan743x_csr_write(adapter, FCT_RX_CTL,
2538			  FCT_RX_CTL_EN_(rx->channel_number));
2539	return 0;
2540
2541napi_delete:
2542	netif_napi_del(&rx->napi);
2543	lan743x_rx_ring_cleanup(rx);
2544
2545return_error:
2546	return ret;
2547}
2548
2549static int lan743x_netdev_close(struct net_device *netdev)
2550{
2551	struct lan743x_adapter *adapter = netdev_priv(netdev);
2552	int index;
2553
2554	lan743x_tx_close(&adapter->tx[0]);
2555
2556	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
2557		lan743x_rx_close(&adapter->rx[index]);
2558
2559	lan743x_ptp_close(adapter);
2560
2561	lan743x_phy_close(adapter);
2562
2563	lan743x_mac_close(adapter);
2564
2565	lan743x_intr_close(adapter);
2566
2567	return 0;
2568}
2569
2570static int lan743x_netdev_open(struct net_device *netdev)
2571{
2572	struct lan743x_adapter *adapter = netdev_priv(netdev);
2573	int index;
2574	int ret;
2575
2576	ret = lan743x_intr_open(adapter);
2577	if (ret)
2578		goto return_error;
2579
2580	ret = lan743x_mac_open(adapter);
2581	if (ret)
2582		goto close_intr;
2583
2584	ret = lan743x_phy_open(adapter);
2585	if (ret)
2586		goto close_mac;
2587
2588	ret = lan743x_ptp_open(adapter);
2589	if (ret)
2590		goto close_phy;
2591
2592	lan743x_rfe_open(adapter);
2593
2594	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2595		ret = lan743x_rx_open(&adapter->rx[index]);
2596		if (ret)
2597			goto close_rx;
2598	}
2599
2600	ret = lan743x_tx_open(&adapter->tx[0]);
2601	if (ret)
2602		goto close_rx;
2603
2604	return 0;
2605
2606close_rx:
2607	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2608		if (adapter->rx[index].ring_cpu_ptr)
2609			lan743x_rx_close(&adapter->rx[index]);
2610	}
2611	lan743x_ptp_close(adapter);
2612
2613close_phy:
2614	lan743x_phy_close(adapter);
2615
2616close_mac:
2617	lan743x_mac_close(adapter);
2618
2619close_intr:
2620	lan743x_intr_close(adapter);
2621
2622return_error:
2623	netif_warn(adapter, ifup, adapter->netdev,
2624		   "Error opening LAN743x\n");
2625	return ret;
2626}
2627
2628static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
2629					     struct net_device *netdev)
2630{
2631	struct lan743x_adapter *adapter = netdev_priv(netdev);
2632
2633	return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
2634}
2635
2636static int lan743x_netdev_ioctl(struct net_device *netdev,
2637				struct ifreq *ifr, int cmd)
2638{
2639	if (!netif_running(netdev))
2640		return -EINVAL;
2641	if (cmd == SIOCSHWTSTAMP)
2642		return lan743x_ptp_ioctl(netdev, ifr, cmd);
2643	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
2644}
2645
2646static void lan743x_netdev_set_multicast(struct net_device *netdev)
2647{
2648	struct lan743x_adapter *adapter = netdev_priv(netdev);
2649
2650	lan743x_rfe_set_multicast(adapter);
2651}
2652
2653static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
2654{
2655	struct lan743x_adapter *adapter = netdev_priv(netdev);
2656	int ret = 0;
2657
2658	ret = lan743x_mac_set_mtu(adapter, new_mtu);
2659	if (!ret)
2660		netdev->mtu = new_mtu;
2661	return ret;
2662}
2663
2664static void lan743x_netdev_get_stats64(struct net_device *netdev,
2665				       struct rtnl_link_stats64 *stats)
2666{
2667	struct lan743x_adapter *adapter = netdev_priv(netdev);
2668
2669	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
2670	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
2671	stats->rx_bytes = lan743x_csr_read(adapter,
2672					   STAT_RX_UNICAST_BYTE_COUNT) +
2673			  lan743x_csr_read(adapter,
2674					   STAT_RX_BROADCAST_BYTE_COUNT) +
2675			  lan743x_csr_read(adapter,
2676					   STAT_RX_MULTICAST_BYTE_COUNT);
2677	stats->tx_bytes = lan743x_csr_read(adapter,
2678					   STAT_TX_UNICAST_BYTE_COUNT) +
2679			  lan743x_csr_read(adapter,
2680					   STAT_TX_BROADCAST_BYTE_COUNT) +
2681			  lan743x_csr_read(adapter,
2682					   STAT_TX_MULTICAST_BYTE_COUNT);
2683	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
2684			   lan743x_csr_read(adapter,
2685					    STAT_RX_ALIGNMENT_ERRORS) +
2686			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
2687			   lan743x_csr_read(adapter,
2688					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
2689			   lan743x_csr_read(adapter,
2690					    STAT_RX_OVERSIZE_FRAME_ERRORS);
2691	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
2692			   lan743x_csr_read(adapter,
2693					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
2694			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
2695	stats->rx_dropped = lan743x_csr_read(adapter,
2696					     STAT_RX_DROPPED_FRAMES);
2697	stats->tx_dropped = lan743x_csr_read(adapter,
2698					     STAT_TX_EXCESSIVE_COLLISION);
2699	stats->multicast = lan743x_csr_read(adapter,
2700					    STAT_RX_MULTICAST_FRAMES) +
2701			   lan743x_csr_read(adapter,
2702					    STAT_TX_MULTICAST_FRAMES);
2703	stats->collisions = lan743x_csr_read(adapter,
2704					     STAT_TX_SINGLE_COLLISIONS) +
2705			    lan743x_csr_read(adapter,
2706					     STAT_TX_MULTIPLE_COLLISIONS) +
2707			    lan743x_csr_read(adapter,
2708					     STAT_TX_LATE_COLLISIONS);
2709}
2710
2711static int lan743x_netdev_set_mac_address(struct net_device *netdev,
2712					  void *addr)
2713{
2714	struct lan743x_adapter *adapter = netdev_priv(netdev);
2715	struct sockaddr *sock_addr = addr;
2716	int ret;
2717
2718	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
2719	if (ret)
2720		return ret;
2721	ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
2722	lan743x_mac_set_address(adapter, sock_addr->sa_data);
2723	lan743x_rfe_update_mac_address(adapter);
2724	return 0;
2725}
2726
2727static const struct net_device_ops lan743x_netdev_ops = {
2728	.ndo_open		= lan743x_netdev_open,
2729	.ndo_stop		= lan743x_netdev_close,
2730	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
2731	.ndo_do_ioctl		= lan743x_netdev_ioctl,
2732	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
2733	.ndo_change_mtu		= lan743x_netdev_change_mtu,
2734	.ndo_get_stats64	= lan743x_netdev_get_stats64,
2735	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
2736};
2737
2738static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
2739{
2740	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2741}
2742
2743static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
2744{
2745	mdiobus_unregister(adapter->mdiobus);
2746}
2747
2748static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
2749{
2750	unregister_netdev(adapter->netdev);
2751
2752	lan743x_mdiobus_cleanup(adapter);
2753	lan743x_hardware_cleanup(adapter);
2754	lan743x_pci_cleanup(adapter);
2755}
2756
2757static int lan743x_hardware_init(struct lan743x_adapter *adapter,
2758				 struct pci_dev *pdev)
2759{
2760	struct lan743x_tx *tx;
2761	int index;
2762	int ret;
2763
2764	adapter->intr.irq = adapter->pdev->irq;
2765	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2766
2767	ret = lan743x_gpio_init(adapter);
2768	if (ret)
2769		return ret;
2770
2771	ret = lan743x_mac_init(adapter);
2772	if (ret)
2773		return ret;
2774
2775	ret = lan743x_phy_init(adapter);
2776	if (ret)
2777		return ret;
2778
2779	ret = lan743x_ptp_init(adapter);
2780	if (ret)
2781		return ret;
2782
2783	lan743x_rfe_update_mac_address(adapter);
2784
2785	ret = lan743x_dmac_init(adapter);
2786	if (ret)
2787		return ret;
2788
2789	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2790		adapter->rx[index].adapter = adapter;
2791		adapter->rx[index].channel_number = index;
2792	}
2793
2794	tx = &adapter->tx[0];
2795	tx->adapter = adapter;
2796	tx->channel_number = 0;
2797	spin_lock_init(&tx->ring_lock);
2798	return 0;
2799}
2800
2801static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2802{
2803	int ret;
2804
2805	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2806	if (!(adapter->mdiobus)) {
2807		ret = -ENOMEM;
2808		goto return_error;
2809	}
2810
2811	adapter->mdiobus->priv = (void *)adapter;
2812	adapter->mdiobus->read = lan743x_mdiobus_read;
2813	adapter->mdiobus->write = lan743x_mdiobus_write;
2814	adapter->mdiobus->name = "lan743x-mdiobus";
2815	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2816		 "pci-%s", pci_name(adapter->pdev));
2817
2818	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
2819		/* LAN7430 uses internal phy at address 1 */
2820		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2821
2822	/* register mdiobus */
2823	ret = mdiobus_register(adapter->mdiobus);
2824	if (ret < 0)
2825		goto return_error;
2826	return 0;
2827
2828return_error:
2829	return ret;
2830}
2831
2832/* lan743x_pcidev_probe - Device Initialization Routine
2833 * @pdev: PCI device information struct
2834 * @id: entry in lan743x_pci_tbl
2835 *
2836 * Returns 0 on success, negative on failure
2837 *
2838 * initializes an adapter identified by a pci_dev structure.
2839 * The OS initialization, configuring of the adapter private structure,
2840 * and a hardware reset occur.
2841 **/
2842static int lan743x_pcidev_probe(struct pci_dev *pdev,
2843				const struct pci_device_id *id)
2844{
2845	struct lan743x_adapter *adapter = NULL;
2846	struct net_device *netdev = NULL;
2847	const void *mac_addr;
2848	int ret = -ENODEV;
2849
2850	netdev = devm_alloc_etherdev(&pdev->dev,
2851				     sizeof(struct lan743x_adapter));
2852	if (!netdev)
2853		goto return_error;
2854
2855	SET_NETDEV_DEV(netdev, &pdev->dev);
2856	pci_set_drvdata(pdev, netdev);
2857	adapter = netdev_priv(netdev);
2858	adapter->netdev = netdev;
2859	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2860			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
2861			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
2862	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
2863
2864	mac_addr = of_get_mac_address(pdev->dev.of_node);
2865	if (!IS_ERR(mac_addr))
2866		ether_addr_copy(adapter->mac_address, mac_addr);
2867
2868	ret = lan743x_pci_init(adapter, pdev);
2869	if (ret)
2870		goto return_error;
2871
2872	ret = lan743x_csr_init(adapter);
2873	if (ret)
2874		goto cleanup_pci;
2875
2876	ret = lan743x_hardware_init(adapter, pdev);
2877	if (ret)
2878		goto cleanup_pci;
2879
2880	ret = lan743x_mdiobus_init(adapter);
2881	if (ret)
2882		goto cleanup_hardware;
2883
2884	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
2885	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
2886	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2887	adapter->netdev->hw_features = adapter->netdev->features;
2888
2889	/* carrier off reporting is important to ethtool even BEFORE open */
2890	netif_carrier_off(netdev);
2891
2892	ret = register_netdev(adapter->netdev);
2893	if (ret < 0)
2894		goto cleanup_mdiobus;
2895	return 0;
2896
2897cleanup_mdiobus:
2898	lan743x_mdiobus_cleanup(adapter);
2899
2900cleanup_hardware:
2901	lan743x_hardware_cleanup(adapter);
2902
2903cleanup_pci:
2904	lan743x_pci_cleanup(adapter);
2905
2906return_error:
2907	pr_warn("Initialization failed\n");
2908	return ret;
2909}
2910
2911/**
2912 * lan743x_pcidev_remove - Device Removal Routine
2913 * @pdev: PCI device information struct
2914 *
2915 * this is called by the PCI subsystem to alert the driver
2916 * that it should release a PCI device.  This could be caused by a
2917 * Hot-Plug event, or because the driver is going to be removed from
2918 * memory.
2919 **/
2920static void lan743x_pcidev_remove(struct pci_dev *pdev)
2921{
2922	struct net_device *netdev = pci_get_drvdata(pdev);
2923	struct lan743x_adapter *adapter = netdev_priv(netdev);
2924
2925	lan743x_full_cleanup(adapter);
2926}
2927
2928static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2929{
2930	struct net_device *netdev = pci_get_drvdata(pdev);
2931	struct lan743x_adapter *adapter = netdev_priv(netdev);
2932
2933	rtnl_lock();
2934	netif_device_detach(netdev);
2935
2936	/* close netdev when netdev is at running state.
2937	 * For instance, it is true when system goes to sleep by pm-suspend
2938	 * However, it is false when system goes to sleep by suspend GUI menu
2939	 */
2940	if (netif_running(netdev))
2941		lan743x_netdev_close(netdev);
2942	rtnl_unlock();
2943
2944#ifdef CONFIG_PM
2945	pci_save_state(pdev);
2946#endif
2947
2948	/* clean up lan743x portion */
2949	lan743x_hardware_cleanup(adapter);
2950}
2951
2952#ifdef CONFIG_PM_SLEEP
2953static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2954{
2955	return bitrev16(crc16(0xFFFF, buf, len));
2956}
2957
2958static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
2959{
2960	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
2961	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
2962	const u8 arp_type[2] = { 0x08, 0x06 };
2963	int mask_index;
2964	u32 pmtctl;
2965	u32 wucsr;
2966	u32 macrx;
2967	u16 crc;
2968
2969	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
2970		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
2971
2972	/* clear wake settings */
2973	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
2974	pmtctl |= PMT_CTL_WUPS_MASK_;
2975	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
2976		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
2977		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
2978
2979	macrx = lan743x_csr_read(adapter, MAC_RX);
2980
2981	wucsr = 0;
2982	mask_index = 0;
2983
2984	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
2985
2986	if (adapter->wolopts & WAKE_PHY) {
2987		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
2988		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
2989	}
2990	if (adapter->wolopts & WAKE_MAGIC) {
2991		wucsr |= MAC_WUCSR_MPEN_;
2992		macrx |= MAC_RX_RXEN_;
2993		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2994	}
2995	if (adapter->wolopts & WAKE_UCAST) {
2996		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
2997		macrx |= MAC_RX_RXEN_;
2998		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2999		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3000	}
3001	if (adapter->wolopts & WAKE_BCAST) {
3002		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
3003		macrx |= MAC_RX_RXEN_;
3004		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3005		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3006	}
3007	if (adapter->wolopts & WAKE_MCAST) {
3008		/* IPv4 multicast */
3009		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
3010		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3011				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3012				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3013				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3014		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
3015		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3016		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3017		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3018		mask_index++;
3019
3020		/* IPv6 multicast */
3021		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
3022		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3023				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3024				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3025				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3026		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3027		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3028		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3029		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3030		mask_index++;
3031
3032		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3033		macrx |= MAC_RX_RXEN_;
3034		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3035		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3036	}
3037	if (adapter->wolopts & WAKE_ARP) {
3038		/* set MAC_WUF_CFG & WUF_MASK
3039		 * for packettype (offset 12,13) = ARP (0x0806)
3040		 */
3041		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3042		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3043				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3044				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3045				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3046		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3047		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3048		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3049		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3050		mask_index++;
3051
3052		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3053		macrx |= MAC_RX_RXEN_;
3054		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3055		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3056	}
3057
3058	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3059	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3060	lan743x_csr_write(adapter, MAC_RX, macrx);
3061}
3062
3063static int lan743x_pm_suspend(struct device *dev)
3064{
3065	struct pci_dev *pdev = to_pci_dev(dev);
3066	struct net_device *netdev = pci_get_drvdata(pdev);
3067	struct lan743x_adapter *adapter = netdev_priv(netdev);
3068
3069	lan743x_pcidev_shutdown(pdev);
3070
3071	/* clear all wakes */
3072	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3073	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3074	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3075
3076	if (adapter->wolopts)
3077		lan743x_pm_set_wol(adapter);
3078
3079	/* Host sets PME_En, put D3hot */
3080	return pci_prepare_to_sleep(pdev);;
3081}
3082
3083static int lan743x_pm_resume(struct device *dev)
3084{
3085	struct pci_dev *pdev = to_pci_dev(dev);
3086	struct net_device *netdev = pci_get_drvdata(pdev);
3087	struct lan743x_adapter *adapter = netdev_priv(netdev);
3088	int ret;
3089
3090	pci_set_power_state(pdev, PCI_D0);
3091	pci_restore_state(pdev);
3092	pci_save_state(pdev);
3093
3094	ret = lan743x_hardware_init(adapter, pdev);
3095	if (ret) {
3096		netif_err(adapter, probe, adapter->netdev,
3097			  "lan743x_hardware_init returned %d\n", ret);
3098		lan743x_pci_cleanup(adapter);
3099		return ret;
3100	}
3101
3102	/* open netdev when netdev is at running state while resume.
3103	 * For instance, it is true when system wakesup after pm-suspend
3104	 * However, it is false when system wakes up after suspend GUI menu
3105	 */
3106	if (netif_running(netdev))
3107		lan743x_netdev_open(netdev);
3108
3109	netif_device_attach(netdev);
3110
3111	return 0;
3112}
3113
3114static const struct dev_pm_ops lan743x_pm_ops = {
3115	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3116};
3117#endif /* CONFIG_PM_SLEEP */
3118
3119static const struct pci_device_id lan743x_pcidev_tbl[] = {
3120	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3121	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3122	{ 0, }
3123};
3124
3125MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3126
3127static struct pci_driver lan743x_pcidev_driver = {
3128	.name     = DRIVER_NAME,
3129	.id_table = lan743x_pcidev_tbl,
3130	.probe    = lan743x_pcidev_probe,
3131	.remove   = lan743x_pcidev_remove,
3132#ifdef CONFIG_PM_SLEEP
3133	.driver.pm = &lan743x_pm_ops,
3134#endif
3135	.shutdown = lan743x_pcidev_shutdown,
3136};
3137
3138module_pci_driver(lan743x_pcidev_driver);
3139
3140MODULE_AUTHOR(DRIVER_AUTHOR);
3141MODULE_DESCRIPTION(DRIVER_DESC);
3142MODULE_LICENSE("GPL");
3143