1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  FUJITSU Extended Socket Network Device driver
4 *  Copyright (c) 2015 FUJITSU LIMITED
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/nls.h>
10#include <linux/platform_device.h>
11#include <linux/netdevice.h>
12#include <linux/interrupt.h>
13
14#include "fjes.h"
15#include "fjes_trace.h"
16
17#define MAJ 1
18#define MIN 2
19#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20#define DRV_NAME	"fjes"
21char fjes_driver_name[] = DRV_NAME;
22char fjes_driver_version[] = DRV_VERSION;
23static const char fjes_driver_string[] =
24		"FUJITSU Extended Socket Network Device Driver";
25static const char fjes_copyright[] =
26		"Copyright (c) 2015 FUJITSU LIMITED";
27
28MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_VERSION);
32
33#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
34
35static int fjes_request_irq(struct fjes_adapter *);
36static void fjes_free_irq(struct fjes_adapter *);
37
38static int fjes_open(struct net_device *);
39static int fjes_close(struct net_device *);
40static int fjes_setup_resources(struct fjes_adapter *);
41static void fjes_free_resources(struct fjes_adapter *);
42static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
43static void fjes_raise_intr_rxdata_task(struct work_struct *);
44static void fjes_tx_stall_task(struct work_struct *);
45static void fjes_force_close_task(struct work_struct *);
46static irqreturn_t fjes_intr(int, void*);
47static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
48static int fjes_change_mtu(struct net_device *, int);
49static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
50static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
51static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
52
53static int fjes_acpi_add(struct acpi_device *);
54static int fjes_acpi_remove(struct acpi_device *);
55static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
56
57static int fjes_probe(struct platform_device *);
58static int fjes_remove(struct platform_device *);
59
60static int fjes_sw_init(struct fjes_adapter *);
61static void fjes_netdev_setup(struct net_device *);
62static void fjes_irq_watch_task(struct work_struct *);
63static void fjes_watch_unshare_task(struct work_struct *);
64static void fjes_rx_irq(struct fjes_adapter *, int);
65static int fjes_poll(struct napi_struct *, int);
66
67static const struct acpi_device_id fjes_acpi_ids[] = {
68	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
69	{"", 0},
70};
71MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
72
73static struct acpi_driver fjes_acpi_driver = {
74	.name = DRV_NAME,
75	.class = DRV_NAME,
76	.owner = THIS_MODULE,
77	.ids = fjes_acpi_ids,
78	.ops = {
79		.add = fjes_acpi_add,
80		.remove = fjes_acpi_remove,
81	},
82};
83
84static struct platform_driver fjes_driver = {
85	.driver = {
86		.name = DRV_NAME,
87	},
88	.probe = fjes_probe,
89	.remove = fjes_remove,
90};
91
92static struct resource fjes_resource[] = {
93	{
94		.flags = IORESOURCE_MEM,
95		.start = 0,
96		.end = 0,
97	},
98	{
99		.flags = IORESOURCE_IRQ,
100		.start = 0,
101		.end = 0,
102	},
103};
104
105static bool is_extended_socket_device(struct acpi_device *device)
106{
107	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
108	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
109	union acpi_object *str;
110	acpi_status status;
111	int result;
112
113	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
114	if (ACPI_FAILURE(status))
115		return false;
116
117	str = buffer.pointer;
118	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
119				 str->string.length, UTF16_LITTLE_ENDIAN,
120				 str_buf, sizeof(str_buf) - 1);
121	str_buf[result] = 0;
122
123	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
124		kfree(buffer.pointer);
125		return false;
126	}
127	kfree(buffer.pointer);
128
129	return true;
130}
131
132static int acpi_check_extended_socket_status(struct acpi_device *device)
133{
134	unsigned long long sta;
135	acpi_status status;
136
137	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
138	if (ACPI_FAILURE(status))
139		return -ENODEV;
140
141	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
142	      (sta & ACPI_STA_DEVICE_ENABLED) &&
143	      (sta & ACPI_STA_DEVICE_UI) &&
144	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
145		return -ENODEV;
146
147	return 0;
148}
149
150static int fjes_acpi_add(struct acpi_device *device)
151{
152	struct platform_device *plat_dev;
153	acpi_status status;
154
155	if (!is_extended_socket_device(device))
156		return -ENODEV;
157
158	if (acpi_check_extended_socket_status(device))
159		return -ENODEV;
160
161	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
162				     fjes_get_acpi_resource, fjes_resource);
163	if (ACPI_FAILURE(status))
164		return -ENODEV;
165
166	/* create platform_device */
167	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
168						   ARRAY_SIZE(fjes_resource));
169	if (IS_ERR(plat_dev))
170		return PTR_ERR(plat_dev);
171
172	device->driver_data = plat_dev;
173
174	return 0;
175}
176
177static int fjes_acpi_remove(struct acpi_device *device)
178{
179	struct platform_device *plat_dev;
180
181	plat_dev = (struct platform_device *)acpi_driver_data(device);
182	platform_device_unregister(plat_dev);
183
184	return 0;
185}
186
187static acpi_status
188fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
189{
190	struct acpi_resource_address32 *addr;
191	struct acpi_resource_irq *irq;
192	struct resource *res = data;
193
194	switch (acpi_res->type) {
195	case ACPI_RESOURCE_TYPE_ADDRESS32:
196		addr = &acpi_res->data.address32;
197		res[0].start = addr->address.minimum;
198		res[0].end = addr->address.minimum +
199			addr->address.address_length - 1;
200		break;
201
202	case ACPI_RESOURCE_TYPE_IRQ:
203		irq = &acpi_res->data.irq;
204		if (irq->interrupt_count != 1)
205			return AE_ERROR;
206		res[1].start = irq->interrupts[0];
207		res[1].end = irq->interrupts[0];
208		break;
209
210	default:
211		break;
212	}
213
214	return AE_OK;
215}
216
217static int fjes_request_irq(struct fjes_adapter *adapter)
218{
219	struct net_device *netdev = adapter->netdev;
220	int result = -1;
221
222	adapter->interrupt_watch_enable = true;
223	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
224		queue_delayed_work(adapter->control_wq,
225				   &adapter->interrupt_watch_task,
226				   FJES_IRQ_WATCH_DELAY);
227	}
228
229	if (!adapter->irq_registered) {
230		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
231				     IRQF_SHARED, netdev->name, adapter);
232		if (result)
233			adapter->irq_registered = false;
234		else
235			adapter->irq_registered = true;
236	}
237
238	return result;
239}
240
241static void fjes_free_irq(struct fjes_adapter *adapter)
242{
243	struct fjes_hw *hw = &adapter->hw;
244
245	adapter->interrupt_watch_enable = false;
246	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
247
248	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
249
250	if (adapter->irq_registered) {
251		free_irq(adapter->hw.hw_res.irq, adapter);
252		adapter->irq_registered = false;
253	}
254}
255
256static const struct net_device_ops fjes_netdev_ops = {
257	.ndo_open		= fjes_open,
258	.ndo_stop		= fjes_close,
259	.ndo_start_xmit		= fjes_xmit_frame,
260	.ndo_get_stats64	= fjes_get_stats64,
261	.ndo_change_mtu		= fjes_change_mtu,
262	.ndo_tx_timeout		= fjes_tx_retry,
263	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
264	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
265};
266
267/* fjes_open - Called when a network interface is made active */
268static int fjes_open(struct net_device *netdev)
269{
270	struct fjes_adapter *adapter = netdev_priv(netdev);
271	struct fjes_hw *hw = &adapter->hw;
272	int result;
273
274	if (adapter->open_guard)
275		return -ENXIO;
276
277	result = fjes_setup_resources(adapter);
278	if (result)
279		goto err_setup_res;
280
281	hw->txrx_stop_req_bit = 0;
282	hw->epstop_req_bit = 0;
283
284	napi_enable(&adapter->napi);
285
286	fjes_hw_capture_interrupt_status(hw);
287
288	result = fjes_request_irq(adapter);
289	if (result)
290		goto err_req_irq;
291
292	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
293
294	netif_tx_start_all_queues(netdev);
295	netif_carrier_on(netdev);
296
297	return 0;
298
299err_req_irq:
300	fjes_free_irq(adapter);
301	napi_disable(&adapter->napi);
302
303err_setup_res:
304	fjes_free_resources(adapter);
305	return result;
306}
307
308/* fjes_close - Disables a network interface */
309static int fjes_close(struct net_device *netdev)
310{
311	struct fjes_adapter *adapter = netdev_priv(netdev);
312	struct fjes_hw *hw = &adapter->hw;
313	unsigned long flags;
314	int epidx;
315
316	netif_tx_stop_all_queues(netdev);
317	netif_carrier_off(netdev);
318
319	fjes_hw_raise_epstop(hw);
320
321	napi_disable(&adapter->napi);
322
323	spin_lock_irqsave(&hw->rx_status_lock, flags);
324	for (epidx = 0; epidx < hw->max_epid; epidx++) {
325		if (epidx == hw->my_epid)
326			continue;
327
328		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
329		    EP_PARTNER_SHARED)
330			adapter->hw.ep_shm_info[epidx]
331				   .tx.info->v1i.rx_status &=
332				~FJES_RX_POLL_WORK;
333	}
334	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
335
336	fjes_free_irq(adapter);
337
338	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
339	cancel_work_sync(&adapter->unshare_watch_task);
340	adapter->unshare_watch_bitmask = 0;
341	cancel_work_sync(&adapter->raise_intr_rxdata_task);
342	cancel_work_sync(&adapter->tx_stall_task);
343
344	cancel_work_sync(&hw->update_zone_task);
345	cancel_work_sync(&hw->epstop_task);
346
347	fjes_hw_wait_epstop(hw);
348
349	fjes_free_resources(adapter);
350
351	return 0;
352}
353
354static int fjes_setup_resources(struct fjes_adapter *adapter)
355{
356	struct net_device *netdev = adapter->netdev;
357	struct ep_share_mem_info *buf_pair;
358	struct fjes_hw *hw = &adapter->hw;
359	unsigned long flags;
360	int result;
361	int epidx;
362
363	mutex_lock(&hw->hw_info.lock);
364	result = fjes_hw_request_info(hw);
365	switch (result) {
366	case 0:
367		for (epidx = 0; epidx < hw->max_epid; epidx++) {
368			hw->ep_shm_info[epidx].es_status =
369			    hw->hw_info.res_buf->info.info[epidx].es_status;
370			hw->ep_shm_info[epidx].zone =
371			    hw->hw_info.res_buf->info.info[epidx].zone;
372		}
373		break;
374	default:
375	case -ENOMSG:
376	case -EBUSY:
377		adapter->force_reset = true;
378
379		mutex_unlock(&hw->hw_info.lock);
380		return result;
381	}
382	mutex_unlock(&hw->hw_info.lock);
383
384	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
385		if ((epidx != hw->my_epid) &&
386		    (hw->ep_shm_info[epidx].es_status ==
387		     FJES_ZONING_STATUS_ENABLE)) {
388			fjes_hw_raise_interrupt(hw, epidx,
389						REG_ICTL_MASK_INFO_UPDATE);
390			hw->ep_shm_info[epidx].ep_stats
391				.send_intr_zoneupdate += 1;
392		}
393	}
394
395	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
396
397	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
398		if (epidx == hw->my_epid)
399			continue;
400
401		buf_pair = &hw->ep_shm_info[epidx];
402
403		spin_lock_irqsave(&hw->rx_status_lock, flags);
404		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
405				    netdev->mtu);
406		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
407
408		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
409			mutex_lock(&hw->hw_info.lock);
410			result =
411			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
412			mutex_unlock(&hw->hw_info.lock);
413
414			switch (result) {
415			case 0:
416				break;
417			case -ENOMSG:
418			case -EBUSY:
419			default:
420				adapter->force_reset = true;
421				return result;
422			}
423
424			hw->ep_shm_info[epidx].ep_stats
425				.com_regist_buf_exec += 1;
426		}
427	}
428
429	return 0;
430}
431
432static void fjes_free_resources(struct fjes_adapter *adapter)
433{
434	struct net_device *netdev = adapter->netdev;
435	struct fjes_device_command_param param;
436	struct ep_share_mem_info *buf_pair;
437	struct fjes_hw *hw = &adapter->hw;
438	bool reset_flag = false;
439	unsigned long flags;
440	int result;
441	int epidx;
442
443	for (epidx = 0; epidx < hw->max_epid; epidx++) {
444		if (epidx == hw->my_epid)
445			continue;
446
447		mutex_lock(&hw->hw_info.lock);
448		result = fjes_hw_unregister_buff_addr(hw, epidx);
449		mutex_unlock(&hw->hw_info.lock);
450
451		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
452
453		if (result)
454			reset_flag = true;
455
456		buf_pair = &hw->ep_shm_info[epidx];
457
458		spin_lock_irqsave(&hw->rx_status_lock, flags);
459		fjes_hw_setup_epbuf(&buf_pair->tx,
460				    netdev->dev_addr, netdev->mtu);
461		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
462
463		clear_bit(epidx, &hw->txrx_stop_req_bit);
464	}
465
466	if (reset_flag || adapter->force_reset) {
467		result = fjes_hw_reset(hw);
468
469		adapter->force_reset = false;
470
471		if (result)
472			adapter->open_guard = true;
473
474		hw->hw_info.buffer_share_bit = 0;
475
476		memset((void *)&param, 0, sizeof(param));
477
478		param.req_len = hw->hw_info.req_buf_size;
479		param.req_start = __pa(hw->hw_info.req_buf);
480		param.res_len = hw->hw_info.res_buf_size;
481		param.res_start = __pa(hw->hw_info.res_buf);
482		param.share_start = __pa(hw->hw_info.share->ep_status);
483
484		fjes_hw_init_command_registers(hw, &param);
485	}
486}
487
488static void fjes_tx_stall_task(struct work_struct *work)
489{
490	struct fjes_adapter *adapter = container_of(work,
491			struct fjes_adapter, tx_stall_task);
492	struct net_device *netdev = adapter->netdev;
493	struct fjes_hw *hw = &adapter->hw;
494	int all_queue_available, sendable;
495	enum ep_partner_status pstatus;
496	int max_epid, my_epid, epid;
497	union ep_buffer_info *info;
498	int i;
499
500	if (((long)jiffies -
501		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
502		netif_wake_queue(netdev);
503		return;
504	}
505
506	my_epid = hw->my_epid;
507	max_epid = hw->max_epid;
508
509	for (i = 0; i < 5; i++) {
510		all_queue_available = 1;
511
512		for (epid = 0; epid < max_epid; epid++) {
513			if (my_epid == epid)
514				continue;
515
516			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
517			sendable = (pstatus == EP_PARTNER_SHARED);
518			if (!sendable)
519				continue;
520
521			info = adapter->hw.ep_shm_info[epid].tx.info;
522
523			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
524				return;
525
526			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
527					 info->v1i.count_max)) {
528				all_queue_available = 0;
529				break;
530			}
531		}
532
533		if (all_queue_available) {
534			netif_wake_queue(netdev);
535			return;
536		}
537	}
538
539	usleep_range(50, 100);
540
541	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
542}
543
544static void fjes_force_close_task(struct work_struct *work)
545{
546	struct fjes_adapter *adapter = container_of(work,
547			struct fjes_adapter, force_close_task);
548	struct net_device *netdev = adapter->netdev;
549
550	rtnl_lock();
551	dev_close(netdev);
552	rtnl_unlock();
553}
554
555static void fjes_raise_intr_rxdata_task(struct work_struct *work)
556{
557	struct fjes_adapter *adapter = container_of(work,
558			struct fjes_adapter, raise_intr_rxdata_task);
559	struct fjes_hw *hw = &adapter->hw;
560	enum ep_partner_status pstatus;
561	int max_epid, my_epid, epid;
562
563	my_epid = hw->my_epid;
564	max_epid = hw->max_epid;
565
566	for (epid = 0; epid < max_epid; epid++)
567		hw->ep_shm_info[epid].tx_status_work = 0;
568
569	for (epid = 0; epid < max_epid; epid++) {
570		if (epid == my_epid)
571			continue;
572
573		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
574		if (pstatus == EP_PARTNER_SHARED) {
575			hw->ep_shm_info[epid].tx_status_work =
576				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
577
578			if (hw->ep_shm_info[epid].tx_status_work ==
579				FJES_TX_DELAY_SEND_PENDING) {
580				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
581					FJES_TX_DELAY_SEND_NONE;
582			}
583		}
584	}
585
586	for (epid = 0; epid < max_epid; epid++) {
587		if (epid == my_epid)
588			continue;
589
590		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
591		if ((hw->ep_shm_info[epid].tx_status_work ==
592		     FJES_TX_DELAY_SEND_PENDING) &&
593		    (pstatus == EP_PARTNER_SHARED) &&
594		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
595		      FJES_RX_POLL_WORK)) {
596			fjes_hw_raise_interrupt(hw, epid,
597						REG_ICTL_MASK_RX_DATA);
598			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
599		}
600	}
601
602	usleep_range(500, 1000);
603}
604
605static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
606			void *data, size_t len)
607{
608	int retval;
609
610	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
611					   data, len);
612	if (retval)
613		return retval;
614
615	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
616		FJES_TX_DELAY_SEND_PENDING;
617	if (!work_pending(&adapter->raise_intr_rxdata_task))
618		queue_work(adapter->txrx_wq,
619			   &adapter->raise_intr_rxdata_task);
620
621	retval = 0;
622	return retval;
623}
624
625static netdev_tx_t
626fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
627{
628	struct fjes_adapter *adapter = netdev_priv(netdev);
629	struct fjes_hw *hw = &adapter->hw;
630
631	int max_epid, my_epid, dest_epid;
632	enum ep_partner_status pstatus;
633	struct netdev_queue *cur_queue;
634	char shortpkt[VLAN_ETH_HLEN];
635	bool is_multi, vlan;
636	struct ethhdr *eth;
637	u16 queue_no = 0;
638	u16 vlan_id = 0;
639	netdev_tx_t ret;
640	char *data;
641	int len;
642
643	ret = NETDEV_TX_OK;
644	is_multi = false;
645	cur_queue = netdev_get_tx_queue(netdev, queue_no);
646
647	eth = (struct ethhdr *)skb->data;
648	my_epid = hw->my_epid;
649
650	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
651
652	data = skb->data;
653	len = skb->len;
654
655	if (is_multicast_ether_addr(eth->h_dest)) {
656		dest_epid = 0;
657		max_epid = hw->max_epid;
658		is_multi = true;
659	} else if (is_local_ether_addr(eth->h_dest)) {
660		dest_epid = eth->h_dest[ETH_ALEN - 1];
661		max_epid = dest_epid + 1;
662
663		if ((eth->h_dest[0] == 0x02) &&
664		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
665			      eth->h_dest[3] | eth->h_dest[4])) &&
666		    (dest_epid < hw->max_epid)) {
667			;
668		} else {
669			dest_epid = 0;
670			max_epid = 0;
671			ret = NETDEV_TX_OK;
672
673			adapter->stats64.tx_packets += 1;
674			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
675			adapter->stats64.tx_bytes += len;
676			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
677		}
678	} else {
679		dest_epid = 0;
680		max_epid = 0;
681		ret = NETDEV_TX_OK;
682
683		adapter->stats64.tx_packets += 1;
684		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
685		adapter->stats64.tx_bytes += len;
686		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
687	}
688
689	for (; dest_epid < max_epid; dest_epid++) {
690		if (my_epid == dest_epid)
691			continue;
692
693		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
694		if (pstatus != EP_PARTNER_SHARED) {
695			if (!is_multi)
696				hw->ep_shm_info[dest_epid].ep_stats
697					.tx_dropped_not_shared += 1;
698			ret = NETDEV_TX_OK;
699		} else if (!fjes_hw_check_epbuf_version(
700				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
701			/* version is NOT 0 */
702			adapter->stats64.tx_carrier_errors += 1;
703			hw->ep_shm_info[dest_epid].net_stats
704						.tx_carrier_errors += 1;
705			hw->ep_shm_info[dest_epid].ep_stats
706					.tx_dropped_ver_mismatch += 1;
707
708			ret = NETDEV_TX_OK;
709		} else if (!fjes_hw_check_mtu(
710				&adapter->hw.ep_shm_info[dest_epid].rx,
711				netdev->mtu)) {
712			adapter->stats64.tx_dropped += 1;
713			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
714			adapter->stats64.tx_errors += 1;
715			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
716			hw->ep_shm_info[dest_epid].ep_stats
717					.tx_dropped_buf_size_mismatch += 1;
718
719			ret = NETDEV_TX_OK;
720		} else if (vlan &&
721			   !fjes_hw_check_vlan_id(
722				&adapter->hw.ep_shm_info[dest_epid].rx,
723				vlan_id)) {
724			hw->ep_shm_info[dest_epid].ep_stats
725				.tx_dropped_vlanid_mismatch += 1;
726			ret = NETDEV_TX_OK;
727		} else {
728			if (len < VLAN_ETH_HLEN) {
729				memset(shortpkt, 0, VLAN_ETH_HLEN);
730				memcpy(shortpkt, skb->data, skb->len);
731				len = VLAN_ETH_HLEN;
732				data = shortpkt;
733			}
734
735			if (adapter->tx_retry_count == 0) {
736				adapter->tx_start_jiffies = jiffies;
737				adapter->tx_retry_count = 1;
738			} else {
739				adapter->tx_retry_count++;
740			}
741
742			if (fjes_tx_send(adapter, dest_epid, data, len)) {
743				if (is_multi) {
744					ret = NETDEV_TX_OK;
745				} else if (
746					   ((long)jiffies -
747					    (long)adapter->tx_start_jiffies) >=
748					    FJES_TX_RETRY_TIMEOUT) {
749					adapter->stats64.tx_fifo_errors += 1;
750					hw->ep_shm_info[dest_epid].net_stats
751								.tx_fifo_errors += 1;
752					adapter->stats64.tx_errors += 1;
753					hw->ep_shm_info[dest_epid].net_stats
754								.tx_errors += 1;
755
756					ret = NETDEV_TX_OK;
757				} else {
758					netif_trans_update(netdev);
759					hw->ep_shm_info[dest_epid].ep_stats
760						.tx_buffer_full += 1;
761					netif_tx_stop_queue(cur_queue);
762
763					if (!work_pending(&adapter->tx_stall_task))
764						queue_work(adapter->txrx_wq,
765							   &adapter->tx_stall_task);
766
767					ret = NETDEV_TX_BUSY;
768				}
769			} else {
770				if (!is_multi) {
771					adapter->stats64.tx_packets += 1;
772					hw->ep_shm_info[dest_epid].net_stats
773								.tx_packets += 1;
774					adapter->stats64.tx_bytes += len;
775					hw->ep_shm_info[dest_epid].net_stats
776								.tx_bytes += len;
777				}
778
779				adapter->tx_retry_count = 0;
780				ret = NETDEV_TX_OK;
781			}
782		}
783	}
784
785	if (ret == NETDEV_TX_OK) {
786		dev_kfree_skb(skb);
787		if (is_multi) {
788			adapter->stats64.tx_packets += 1;
789			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
790			adapter->stats64.tx_bytes += 1;
791			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
792		}
793	}
794
795	return ret;
796}
797
798static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
799{
800	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
801
802	netif_tx_wake_queue(queue);
803}
804
805static void
806fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
807{
808	struct fjes_adapter *adapter = netdev_priv(netdev);
809
810	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
811}
812
813static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
814{
815	struct fjes_adapter *adapter = netdev_priv(netdev);
816	bool running = netif_running(netdev);
817	struct fjes_hw *hw = &adapter->hw;
818	unsigned long flags;
819	int ret = -EINVAL;
820	int idx, epidx;
821
822	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
823		if (new_mtu <= fjes_support_mtu[idx]) {
824			new_mtu = fjes_support_mtu[idx];
825			if (new_mtu == netdev->mtu)
826				return 0;
827
828			ret = 0;
829			break;
830		}
831	}
832
833	if (ret)
834		return ret;
835
836	if (running) {
837		spin_lock_irqsave(&hw->rx_status_lock, flags);
838		for (epidx = 0; epidx < hw->max_epid; epidx++) {
839			if (epidx == hw->my_epid)
840				continue;
841			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
842				~FJES_RX_MTU_CHANGING_DONE;
843		}
844		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
845
846		netif_tx_stop_all_queues(netdev);
847		netif_carrier_off(netdev);
848		cancel_work_sync(&adapter->tx_stall_task);
849		napi_disable(&adapter->napi);
850
851		msleep(1000);
852
853		netif_tx_stop_all_queues(netdev);
854	}
855
856	netdev->mtu = new_mtu;
857
858	if (running) {
859		for (epidx = 0; epidx < hw->max_epid; epidx++) {
860			if (epidx == hw->my_epid)
861				continue;
862
863			spin_lock_irqsave(&hw->rx_status_lock, flags);
864			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
865					    netdev->dev_addr,
866					    netdev->mtu);
867
868			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
869				FJES_RX_MTU_CHANGING_DONE;
870			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
871		}
872
873		netif_tx_wake_all_queues(netdev);
874		netif_carrier_on(netdev);
875		napi_enable(&adapter->napi);
876		napi_schedule(&adapter->napi);
877	}
878
879	return ret;
880}
881
882static int fjes_vlan_rx_add_vid(struct net_device *netdev,
883				__be16 proto, u16 vid)
884{
885	struct fjes_adapter *adapter = netdev_priv(netdev);
886	bool ret = true;
887	int epid;
888
889	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
890		if (epid == adapter->hw.my_epid)
891			continue;
892
893		if (!fjes_hw_check_vlan_id(
894			&adapter->hw.ep_shm_info[epid].tx, vid))
895			ret = fjes_hw_set_vlan_id(
896				&adapter->hw.ep_shm_info[epid].tx, vid);
897	}
898
899	return ret ? 0 : -ENOSPC;
900}
901
902static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
903				 __be16 proto, u16 vid)
904{
905	struct fjes_adapter *adapter = netdev_priv(netdev);
906	int epid;
907
908	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
909		if (epid == adapter->hw.my_epid)
910			continue;
911
912		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
913	}
914
915	return 0;
916}
917
918static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
919				   int src_epid)
920{
921	struct fjes_hw *hw = &adapter->hw;
922	enum ep_partner_status status;
923	unsigned long flags;
924
925	status = fjes_hw_get_partner_ep_status(hw, src_epid);
926	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
927	switch (status) {
928	case EP_PARTNER_UNSHARE:
929	case EP_PARTNER_COMPLETE:
930	default:
931		break;
932	case EP_PARTNER_WAITING:
933		if (src_epid < hw->my_epid) {
934			spin_lock_irqsave(&hw->rx_status_lock, flags);
935			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
936				FJES_RX_STOP_REQ_DONE;
937			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
938
939			clear_bit(src_epid, &hw->txrx_stop_req_bit);
940			set_bit(src_epid, &adapter->unshare_watch_bitmask);
941
942			if (!work_pending(&adapter->unshare_watch_task))
943				queue_work(adapter->control_wq,
944					   &adapter->unshare_watch_task);
945		}
946		break;
947	case EP_PARTNER_SHARED:
948		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
949		    FJES_RX_STOP_REQ_REQUEST) {
950			set_bit(src_epid, &hw->epstop_req_bit);
951			if (!work_pending(&hw->epstop_task))
952				queue_work(adapter->control_wq,
953					   &hw->epstop_task);
954		}
955		break;
956	}
957	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
958}
959
960static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
961{
962	struct fjes_hw *hw = &adapter->hw;
963	enum ep_partner_status status;
964	unsigned long flags;
965
966	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
967
968	status = fjes_hw_get_partner_ep_status(hw, src_epid);
969	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
970	switch (status) {
971	case EP_PARTNER_WAITING:
972		spin_lock_irqsave(&hw->rx_status_lock, flags);
973		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
974				FJES_RX_STOP_REQ_DONE;
975		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
976		clear_bit(src_epid, &hw->txrx_stop_req_bit);
977		fallthrough;
978	case EP_PARTNER_UNSHARE:
979	case EP_PARTNER_COMPLETE:
980	default:
981		set_bit(src_epid, &adapter->unshare_watch_bitmask);
982		if (!work_pending(&adapter->unshare_watch_task))
983			queue_work(adapter->control_wq,
984				   &adapter->unshare_watch_task);
985		break;
986	case EP_PARTNER_SHARED:
987		set_bit(src_epid, &hw->epstop_req_bit);
988
989		if (!work_pending(&hw->epstop_task))
990			queue_work(adapter->control_wq, &hw->epstop_task);
991		break;
992	}
993	trace_fjes_stop_req_irq_post(hw, src_epid);
994}
995
996static void fjes_update_zone_irq(struct fjes_adapter *adapter,
997				 int src_epid)
998{
999	struct fjes_hw *hw = &adapter->hw;
1000
1001	if (!work_pending(&hw->update_zone_task))
1002		queue_work(adapter->control_wq, &hw->update_zone_task);
1003}
1004
1005static irqreturn_t fjes_intr(int irq, void *data)
1006{
1007	struct fjes_adapter *adapter = data;
1008	struct fjes_hw *hw = &adapter->hw;
1009	irqreturn_t ret;
1010	u32 icr;
1011
1012	icr = fjes_hw_capture_interrupt_status(hw);
1013
1014	if (icr & REG_IS_MASK_IS_ASSERT) {
1015		if (icr & REG_ICTL_MASK_RX_DATA) {
1016			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
1017			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1018				.recv_intr_rx += 1;
1019		}
1020
1021		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1022			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1023			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1024				.recv_intr_stop += 1;
1025		}
1026
1027		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1028			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1029			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1030				.recv_intr_unshare += 1;
1031		}
1032
1033		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1034			fjes_hw_set_irqmask(hw,
1035					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
1036
1037		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1038			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1039			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1040				.recv_intr_zoneupdate += 1;
1041		}
1042
1043		ret = IRQ_HANDLED;
1044	} else {
1045		ret = IRQ_NONE;
1046	}
1047
1048	return ret;
1049}
1050
1051static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1052				     int start_epid)
1053{
1054	struct fjes_hw *hw = &adapter->hw;
1055	enum ep_partner_status pstatus;
1056	int max_epid, cur_epid;
1057	int i;
1058
1059	max_epid = hw->max_epid;
1060	start_epid = (start_epid + 1 + max_epid) % max_epid;
1061
1062	for (i = 0; i < max_epid; i++) {
1063		cur_epid = (start_epid + i) % max_epid;
1064		if (cur_epid == hw->my_epid)
1065			continue;
1066
1067		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1068		if (pstatus == EP_PARTNER_SHARED) {
1069			if (!fjes_hw_epbuf_rx_is_empty(
1070				&hw->ep_shm_info[cur_epid].rx))
1071				return cur_epid;
1072		}
1073	}
1074	return -1;
1075}
1076
1077static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1078			      int *cur_epid)
1079{
1080	void *frame;
1081
1082	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1083	if (*cur_epid < 0)
1084		return NULL;
1085
1086	frame =
1087	fjes_hw_epbuf_rx_curpkt_get_addr(
1088		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1089
1090	return frame;
1091}
1092
1093static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1094{
1095	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1096}
1097
1098static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1099{
1100	struct fjes_hw *hw = &adapter->hw;
1101
1102	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1103
1104	adapter->unset_rx_last = true;
1105	napi_schedule(&adapter->napi);
1106}
1107
1108static int fjes_poll(struct napi_struct *napi, int budget)
1109{
1110	struct fjes_adapter *adapter =
1111			container_of(napi, struct fjes_adapter, napi);
1112	struct net_device *netdev = napi->dev;
1113	struct fjes_hw *hw = &adapter->hw;
1114	struct sk_buff *skb;
1115	int work_done = 0;
1116	int cur_epid = 0;
1117	int epidx;
1118	size_t frame_len;
1119	void *frame;
1120
1121	spin_lock(&hw->rx_status_lock);
1122	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1123		if (epidx == hw->my_epid)
1124			continue;
1125
1126		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1127		    EP_PARTNER_SHARED)
1128			adapter->hw.ep_shm_info[epidx]
1129				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1130	}
1131	spin_unlock(&hw->rx_status_lock);
1132
1133	while (work_done < budget) {
1134		prefetch(&adapter->hw);
1135		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1136
1137		if (frame) {
1138			skb = napi_alloc_skb(napi, frame_len);
1139			if (!skb) {
1140				adapter->stats64.rx_dropped += 1;
1141				hw->ep_shm_info[cur_epid].net_stats
1142							 .rx_dropped += 1;
1143				adapter->stats64.rx_errors += 1;
1144				hw->ep_shm_info[cur_epid].net_stats
1145							 .rx_errors += 1;
1146			} else {
1147				skb_put_data(skb, frame, frame_len);
1148				skb->protocol = eth_type_trans(skb, netdev);
1149				skb->ip_summed = CHECKSUM_UNNECESSARY;
1150
1151				netif_receive_skb(skb);
1152
1153				work_done++;
1154
1155				adapter->stats64.rx_packets += 1;
1156				hw->ep_shm_info[cur_epid].net_stats
1157							 .rx_packets += 1;
1158				adapter->stats64.rx_bytes += frame_len;
1159				hw->ep_shm_info[cur_epid].net_stats
1160							 .rx_bytes += frame_len;
1161
1162				if (is_multicast_ether_addr(
1163					((struct ethhdr *)frame)->h_dest)) {
1164					adapter->stats64.multicast += 1;
1165					hw->ep_shm_info[cur_epid].net_stats
1166								 .multicast += 1;
1167				}
1168			}
1169
1170			fjes_rxframe_release(adapter, cur_epid);
1171			adapter->unset_rx_last = true;
1172		} else {
1173			break;
1174		}
1175	}
1176
1177	if (work_done < budget) {
1178		napi_complete_done(napi, work_done);
1179
1180		if (adapter->unset_rx_last) {
1181			adapter->rx_last_jiffies = jiffies;
1182			adapter->unset_rx_last = false;
1183		}
1184
1185		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1186			napi_reschedule(napi);
1187		} else {
1188			spin_lock(&hw->rx_status_lock);
1189			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1190				if (epidx == hw->my_epid)
1191					continue;
1192				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1193				    EP_PARTNER_SHARED)
1194					adapter->hw.ep_shm_info[epidx].tx
1195						   .info->v1i.rx_status &=
1196						~FJES_RX_POLL_WORK;
1197			}
1198			spin_unlock(&hw->rx_status_lock);
1199
1200			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1201		}
1202	}
1203
1204	return work_done;
1205}
1206
1207/* fjes_probe - Device Initialization Routine */
1208static int fjes_probe(struct platform_device *plat_dev)
1209{
1210	struct fjes_adapter *adapter;
1211	struct net_device *netdev;
1212	struct resource *res;
1213	struct fjes_hw *hw;
1214	int err;
1215
1216	err = -ENOMEM;
1217	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1218				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1219				 FJES_MAX_QUEUES);
1220
1221	if (!netdev)
1222		goto err_out;
1223
1224	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1225
1226	dev_set_drvdata(&plat_dev->dev, netdev);
1227	adapter = netdev_priv(netdev);
1228	adapter->netdev = netdev;
1229	adapter->plat_dev = plat_dev;
1230	hw = &adapter->hw;
1231	hw->back = adapter;
1232
1233	/* setup the private structure */
1234	err = fjes_sw_init(adapter);
1235	if (err)
1236		goto err_free_netdev;
1237
1238	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1239	adapter->force_reset = false;
1240	adapter->open_guard = false;
1241
1242	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1243	if (unlikely(!adapter->txrx_wq)) {
1244		err = -ENOMEM;
1245		goto err_free_netdev;
1246	}
1247
1248	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1249					      WQ_MEM_RECLAIM, 0);
1250	if (unlikely(!adapter->control_wq)) {
1251		err = -ENOMEM;
1252		goto err_free_txrx_wq;
1253	}
1254
1255	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1256	INIT_WORK(&adapter->raise_intr_rxdata_task,
1257		  fjes_raise_intr_rxdata_task);
1258	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1259	adapter->unshare_watch_bitmask = 0;
1260
1261	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1262	adapter->interrupt_watch_enable = false;
1263
1264	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1265	if (!res) {
1266		err = -EINVAL;
1267		goto err_free_control_wq;
1268	}
1269	hw->hw_res.start = res->start;
1270	hw->hw_res.size = resource_size(res);
1271	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1272	if (hw->hw_res.irq < 0) {
1273		err = hw->hw_res.irq;
1274		goto err_free_control_wq;
1275	}
1276
1277	err = fjes_hw_init(&adapter->hw);
1278	if (err)
1279		goto err_free_control_wq;
1280
1281	/* setup MAC address (02:00:00:00:00:[epid])*/
1282	netdev->dev_addr[0] = 2;
1283	netdev->dev_addr[1] = 0;
1284	netdev->dev_addr[2] = 0;
1285	netdev->dev_addr[3] = 0;
1286	netdev->dev_addr[4] = 0;
1287	netdev->dev_addr[5] = hw->my_epid; /* EPID */
1288
1289	err = register_netdev(netdev);
1290	if (err)
1291		goto err_hw_exit;
1292
1293	netif_carrier_off(netdev);
1294
1295	fjes_dbg_adapter_init(adapter);
1296
1297	return 0;
1298
1299err_hw_exit:
1300	fjes_hw_exit(&adapter->hw);
1301err_free_control_wq:
1302	destroy_workqueue(adapter->control_wq);
1303err_free_txrx_wq:
1304	destroy_workqueue(adapter->txrx_wq);
1305err_free_netdev:
1306	free_netdev(netdev);
1307err_out:
1308	return err;
1309}
1310
1311/* fjes_remove - Device Removal Routine */
1312static int fjes_remove(struct platform_device *plat_dev)
1313{
1314	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1315	struct fjes_adapter *adapter = netdev_priv(netdev);
1316	struct fjes_hw *hw = &adapter->hw;
1317
1318	fjes_dbg_adapter_exit(adapter);
1319
1320	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1321	cancel_work_sync(&adapter->unshare_watch_task);
1322	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1323	cancel_work_sync(&adapter->tx_stall_task);
1324	if (adapter->control_wq)
1325		destroy_workqueue(adapter->control_wq);
1326	if (adapter->txrx_wq)
1327		destroy_workqueue(adapter->txrx_wq);
1328
1329	unregister_netdev(netdev);
1330
1331	fjes_hw_exit(hw);
1332
1333	netif_napi_del(&adapter->napi);
1334
1335	free_netdev(netdev);
1336
1337	return 0;
1338}
1339
1340static int fjes_sw_init(struct fjes_adapter *adapter)
1341{
1342	struct net_device *netdev = adapter->netdev;
1343
1344	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1345
1346	return 0;
1347}
1348
1349/* fjes_netdev_setup - netdevice initialization routine */
1350static void fjes_netdev_setup(struct net_device *netdev)
1351{
1352	ether_setup(netdev);
1353
1354	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1355	netdev->netdev_ops = &fjes_netdev_ops;
1356	fjes_set_ethtool_ops(netdev);
1357	netdev->mtu = fjes_support_mtu[3];
1358	netdev->min_mtu = fjes_support_mtu[0];
1359	netdev->max_mtu = fjes_support_mtu[3];
1360	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1361}
1362
1363static void fjes_irq_watch_task(struct work_struct *work)
1364{
1365	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1366			struct fjes_adapter, interrupt_watch_task);
1367
1368	local_irq_disable();
1369	fjes_intr(adapter->hw.hw_res.irq, adapter);
1370	local_irq_enable();
1371
1372	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1373		napi_schedule(&adapter->napi);
1374
1375	if (adapter->interrupt_watch_enable) {
1376		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1377			queue_delayed_work(adapter->control_wq,
1378					   &adapter->interrupt_watch_task,
1379					   FJES_IRQ_WATCH_DELAY);
1380	}
1381}
1382
1383static void fjes_watch_unshare_task(struct work_struct *work)
1384{
1385	struct fjes_adapter *adapter =
1386	container_of(work, struct fjes_adapter, unshare_watch_task);
1387
1388	struct net_device *netdev = adapter->netdev;
1389	struct fjes_hw *hw = &adapter->hw;
1390
1391	int unshare_watch, unshare_reserve;
1392	int max_epid, my_epid, epidx;
1393	int stop_req, stop_req_done;
1394	ulong unshare_watch_bitmask;
1395	unsigned long flags;
1396	int wait_time = 0;
1397	int is_shared;
1398	int ret;
1399
1400	my_epid = hw->my_epid;
1401	max_epid = hw->max_epid;
1402
1403	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1404	adapter->unshare_watch_bitmask = 0;
1405
1406	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1407	       (wait_time < 3000)) {
1408		for (epidx = 0; epidx < max_epid; epidx++) {
1409			if (epidx == my_epid)
1410				continue;
1411
1412			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1413							   epidx);
1414
1415			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1416
1417			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1418					FJES_RX_STOP_REQ_DONE;
1419
1420			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1421
1422			unshare_reserve = test_bit(epidx,
1423						   &hw->hw_info.buffer_unshare_reserve_bit);
1424
1425			if ((!stop_req ||
1426			     (is_shared && (!is_shared || !stop_req_done))) &&
1427			    (is_shared || !unshare_watch || !unshare_reserve))
1428				continue;
1429
1430			mutex_lock(&hw->hw_info.lock);
1431			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1432			switch (ret) {
1433			case 0:
1434				break;
1435			case -ENOMSG:
1436			case -EBUSY:
1437			default:
1438				if (!work_pending(
1439					&adapter->force_close_task)) {
1440					adapter->force_reset = true;
1441					schedule_work(
1442						&adapter->force_close_task);
1443				}
1444				break;
1445			}
1446			mutex_unlock(&hw->hw_info.lock);
1447			hw->ep_shm_info[epidx].ep_stats
1448					.com_unregist_buf_exec += 1;
1449
1450			spin_lock_irqsave(&hw->rx_status_lock, flags);
1451			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1452					    netdev->dev_addr, netdev->mtu);
1453			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1454
1455			clear_bit(epidx, &hw->txrx_stop_req_bit);
1456			clear_bit(epidx, &unshare_watch_bitmask);
1457			clear_bit(epidx,
1458				  &hw->hw_info.buffer_unshare_reserve_bit);
1459		}
1460
1461		msleep(100);
1462		wait_time += 100;
1463	}
1464
1465	if (hw->hw_info.buffer_unshare_reserve_bit) {
1466		for (epidx = 0; epidx < max_epid; epidx++) {
1467			if (epidx == my_epid)
1468				continue;
1469
1470			if (test_bit(epidx,
1471				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1472				mutex_lock(&hw->hw_info.lock);
1473
1474				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1475				switch (ret) {
1476				case 0:
1477					break;
1478				case -ENOMSG:
1479				case -EBUSY:
1480				default:
1481					if (!work_pending(
1482						&adapter->force_close_task)) {
1483						adapter->force_reset = true;
1484						schedule_work(
1485							&adapter->force_close_task);
1486					}
1487					break;
1488				}
1489				mutex_unlock(&hw->hw_info.lock);
1490
1491				hw->ep_shm_info[epidx].ep_stats
1492					.com_unregist_buf_exec += 1;
1493
1494				spin_lock_irqsave(&hw->rx_status_lock, flags);
1495				fjes_hw_setup_epbuf(
1496					&hw->ep_shm_info[epidx].tx,
1497					netdev->dev_addr, netdev->mtu);
1498				spin_unlock_irqrestore(&hw->rx_status_lock,
1499						       flags);
1500
1501				clear_bit(epidx, &hw->txrx_stop_req_bit);
1502				clear_bit(epidx, &unshare_watch_bitmask);
1503				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1504			}
1505
1506			if (test_bit(epidx, &unshare_watch_bitmask)) {
1507				spin_lock_irqsave(&hw->rx_status_lock, flags);
1508				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1509						~FJES_RX_STOP_REQ_DONE;
1510				spin_unlock_irqrestore(&hw->rx_status_lock,
1511						       flags);
1512			}
1513		}
1514	}
1515}
1516
1517static acpi_status
1518acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1519				 void *context, void **return_value)
1520{
1521	struct acpi_device *device;
1522	bool *found = context;
1523	int result;
1524
1525	result = acpi_bus_get_device(obj_handle, &device);
1526	if (result)
1527		return AE_OK;
1528
1529	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1530		return AE_OK;
1531
1532	if (!is_extended_socket_device(device))
1533		return AE_OK;
1534
1535	if (acpi_check_extended_socket_status(device))
1536		return AE_OK;
1537
1538	*found = true;
1539	return AE_CTRL_TERMINATE;
1540}
1541
1542/* fjes_init_module - Driver Registration Routine */
1543static int __init fjes_init_module(void)
1544{
1545	bool found = false;
1546	int result;
1547
1548	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1549			    acpi_find_extended_socket_device, NULL, &found,
1550			    NULL);
1551
1552	if (!found)
1553		return -ENODEV;
1554
1555	pr_info("%s - version %s - %s\n",
1556		fjes_driver_string, fjes_driver_version, fjes_copyright);
1557
1558	fjes_dbg_init();
1559
1560	result = platform_driver_register(&fjes_driver);
1561	if (result < 0) {
1562		fjes_dbg_exit();
1563		return result;
1564	}
1565
1566	result = acpi_bus_register_driver(&fjes_acpi_driver);
1567	if (result < 0)
1568		goto fail_acpi_driver;
1569
1570	return 0;
1571
1572fail_acpi_driver:
1573	platform_driver_unregister(&fjes_driver);
1574	fjes_dbg_exit();
1575	return result;
1576}
1577
1578module_init(fjes_init_module);
1579
1580/* fjes_exit_module - Driver Exit Cleanup Routine */
1581static void __exit fjes_exit_module(void)
1582{
1583	acpi_bus_unregister_driver(&fjes_acpi_driver);
1584	platform_driver_unregister(&fjes_driver);
1585	fjes_dbg_exit();
1586}
1587
1588module_exit(fjes_exit_module);
1589