1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4/* ethtool support for ixgbe */
5
6#include <linux/interrupt.h>
7#include <linux/types.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/vmalloc.h>
14#include <linux/highmem.h>
15#include <linux/uaccess.h>
16
17#include "ixgbe.h"
18#include "ixgbe_phy.h"
19
20
21#define IXGBE_ALL_RAR_ENTRIES 16
22
23enum {NETDEV_STATS, IXGBE_STATS};
24
25struct ixgbe_stats {
26	char stat_string[ETH_GSTRING_LEN];
27	int type;
28	int sizeof_stat;
29	int stat_offset;
30};
31
32#define IXGBE_STAT(m)		IXGBE_STATS, \
33				sizeof(((struct ixgbe_adapter *)0)->m), \
34				offsetof(struct ixgbe_adapter, m)
35#define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
36				sizeof(((struct rtnl_link_stats64 *)0)->m), \
37				offsetof(struct rtnl_link_stats64, m)
38
39static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
40	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
41	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
42	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
43	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
44	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
45	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
46	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
47	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
48	{"lsc_int", IXGBE_STAT(lsc_int)},
49	{"tx_busy", IXGBE_STAT(tx_busy)},
50	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
51	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
52	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
53	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
54	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
55	{"multicast", IXGBE_NETDEV_STAT(multicast)},
56	{"broadcast", IXGBE_STAT(stats.bprc)},
57	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
58	{"collisions", IXGBE_NETDEV_STAT(collisions)},
59	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
60	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
61	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
62	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
63	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
64	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
65	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
66	{"fdir_overflow", IXGBE_STAT(fdir_overflow)},
67	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
68	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
69	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
70	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
71	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
72	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
73	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
74	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
75	{"rx_length_errors", IXGBE_STAT(stats.rlec)},
76	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
77	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
78	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
79	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
80	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
81	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
82	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
83	{"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
84	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
85	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
86	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
87	{"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
88	{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
89	{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
90	{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
91	{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
92	{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
93	{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
94	{"tx_ipsec", IXGBE_STAT(tx_ipsec)},
95	{"rx_ipsec", IXGBE_STAT(rx_ipsec)},
96#ifdef IXGBE_FCOE
97	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
98	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
99	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
100	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
101	{"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
102	{"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
103	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
104	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
105#endif /* IXGBE_FCOE */
106};
107
108/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
109 * we set the num_rx_queues to evaluate to num_tx_queues. This is
110 * used because we do not have a good way to get the max number of
111 * rx queues with CONFIG_RPS disabled.
112 */
113#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
114
115#define IXGBE_QUEUE_STATS_LEN ( \
116	(netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
117	(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
118#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
119#define IXGBE_PB_STATS_LEN ( \
120			(sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
121			 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
122			 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
123			 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
124			/ sizeof(u64))
125#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
126			 IXGBE_PB_STATS_LEN + \
127			 IXGBE_QUEUE_STATS_LEN)
128
129static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
130	"Register test  (offline)", "Eeprom test    (offline)",
131	"Interrupt test (offline)", "Loopback test  (offline)",
132	"Link test   (on/offline)"
133};
134#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
135
136static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
137#define IXGBE_PRIV_FLAGS_LEGACY_RX	BIT(0)
138	"legacy-rx",
139#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN	BIT(1)
140	"vf-ipsec",
141#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF	BIT(2)
142	"mdd-disable-vf",
143};
144
145#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
146
147#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
148
149static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
150					 struct ethtool_link_ksettings *cmd)
151{
152	if (!ixgbe_isbackplane(hw->phy.media_type)) {
153		ethtool_link_ksettings_add_link_mode(cmd, supported,
154						     10000baseT_Full);
155		return;
156	}
157
158	switch (hw->device_id) {
159	case IXGBE_DEV_ID_82598:
160	case IXGBE_DEV_ID_82599_KX4:
161	case IXGBE_DEV_ID_82599_KX4_MEZZ:
162	case IXGBE_DEV_ID_X550EM_X_KX4:
163		ethtool_link_ksettings_add_link_mode
164			(cmd, supported, 10000baseKX4_Full);
165		break;
166	case IXGBE_DEV_ID_82598_BX:
167	case IXGBE_DEV_ID_82599_KR:
168	case IXGBE_DEV_ID_X550EM_X_KR:
169	case IXGBE_DEV_ID_X550EM_X_XFI:
170		ethtool_link_ksettings_add_link_mode
171			(cmd, supported, 10000baseKR_Full);
172		break;
173	default:
174		ethtool_link_ksettings_add_link_mode
175			(cmd, supported, 10000baseKX4_Full);
176		ethtool_link_ksettings_add_link_mode
177			(cmd, supported, 10000baseKR_Full);
178		break;
179	}
180}
181
182static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
183					   struct ethtool_link_ksettings *cmd)
184{
185	if (!ixgbe_isbackplane(hw->phy.media_type)) {
186		ethtool_link_ksettings_add_link_mode(cmd, advertising,
187						     10000baseT_Full);
188		return;
189	}
190
191	switch (hw->device_id) {
192	case IXGBE_DEV_ID_82598:
193	case IXGBE_DEV_ID_82599_KX4:
194	case IXGBE_DEV_ID_82599_KX4_MEZZ:
195	case IXGBE_DEV_ID_X550EM_X_KX4:
196		ethtool_link_ksettings_add_link_mode
197			(cmd, advertising, 10000baseKX4_Full);
198		break;
199	case IXGBE_DEV_ID_82598_BX:
200	case IXGBE_DEV_ID_82599_KR:
201	case IXGBE_DEV_ID_X550EM_X_KR:
202	case IXGBE_DEV_ID_X550EM_X_XFI:
203		ethtool_link_ksettings_add_link_mode
204			(cmd, advertising, 10000baseKR_Full);
205		break;
206	default:
207		ethtool_link_ksettings_add_link_mode
208			(cmd, advertising, 10000baseKX4_Full);
209		ethtool_link_ksettings_add_link_mode
210			(cmd, advertising, 10000baseKR_Full);
211		break;
212	}
213}
214
215static int ixgbe_get_link_ksettings(struct net_device *netdev,
216				    struct ethtool_link_ksettings *cmd)
217{
218	struct ixgbe_adapter *adapter = netdev_priv(netdev);
219	struct ixgbe_hw *hw = &adapter->hw;
220	ixgbe_link_speed supported_link;
221	bool autoneg = false;
222
223	ethtool_link_ksettings_zero_link_mode(cmd, supported);
224	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
225
226	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
227
228	/* set the supported link speeds */
229	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
230		ixgbe_set_supported_10gtypes(hw, cmd);
231		ixgbe_set_advertising_10gtypes(hw, cmd);
232	}
233	if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
234		ethtool_link_ksettings_add_link_mode(cmd, supported,
235						     5000baseT_Full);
236
237	if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
238		ethtool_link_ksettings_add_link_mode(cmd, supported,
239						     2500baseT_Full);
240
241	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
242		if (ixgbe_isbackplane(hw->phy.media_type)) {
243			ethtool_link_ksettings_add_link_mode(cmd, supported,
244							     1000baseKX_Full);
245			ethtool_link_ksettings_add_link_mode(cmd, advertising,
246							     1000baseKX_Full);
247		} else {
248			ethtool_link_ksettings_add_link_mode(cmd, supported,
249							     1000baseT_Full);
250			ethtool_link_ksettings_add_link_mode(cmd, advertising,
251							     1000baseT_Full);
252		}
253	}
254	if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
255		ethtool_link_ksettings_add_link_mode(cmd, supported,
256						     100baseT_Full);
257		ethtool_link_ksettings_add_link_mode(cmd, advertising,
258						     100baseT_Full);
259	}
260	if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
261		ethtool_link_ksettings_add_link_mode(cmd, supported,
262						     10baseT_Full);
263		ethtool_link_ksettings_add_link_mode(cmd, advertising,
264						     10baseT_Full);
265	}
266
267	/* set the advertised speeds */
268	if (hw->phy.autoneg_advertised) {
269		ethtool_link_ksettings_zero_link_mode(cmd, advertising);
270		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
271			ethtool_link_ksettings_add_link_mode(cmd, advertising,
272							     10baseT_Full);
273		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
274			ethtool_link_ksettings_add_link_mode(cmd, advertising,
275							     100baseT_Full);
276		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
277			ixgbe_set_advertising_10gtypes(hw, cmd);
278		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
279			if (ethtool_link_ksettings_test_link_mode
280				(cmd, supported, 1000baseKX_Full))
281				ethtool_link_ksettings_add_link_mode
282					(cmd, advertising, 1000baseKX_Full);
283			else
284				ethtool_link_ksettings_add_link_mode
285					(cmd, advertising, 1000baseT_Full);
286		}
287		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
288			ethtool_link_ksettings_add_link_mode(cmd, advertising,
289							     5000baseT_Full);
290		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
291			ethtool_link_ksettings_add_link_mode(cmd, advertising,
292							     2500baseT_Full);
293	} else {
294		if (hw->phy.multispeed_fiber && !autoneg) {
295			if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
296				ethtool_link_ksettings_add_link_mode
297					(cmd, advertising, 10000baseT_Full);
298		}
299	}
300
301	if (autoneg) {
302		ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
303		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
304		cmd->base.autoneg = AUTONEG_ENABLE;
305	} else
306		cmd->base.autoneg = AUTONEG_DISABLE;
307
308	/* Determine the remaining settings based on the PHY type. */
309	switch (adapter->hw.phy.type) {
310	case ixgbe_phy_tn:
311	case ixgbe_phy_aq:
312	case ixgbe_phy_x550em_ext_t:
313	case ixgbe_phy_fw:
314	case ixgbe_phy_cu_unknown:
315		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
316		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
317		cmd->base.port = PORT_TP;
318		break;
319	case ixgbe_phy_qt:
320		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
321		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
322		cmd->base.port = PORT_FIBRE;
323		break;
324	case ixgbe_phy_nl:
325	case ixgbe_phy_sfp_passive_tyco:
326	case ixgbe_phy_sfp_passive_unknown:
327	case ixgbe_phy_sfp_ftl:
328	case ixgbe_phy_sfp_avago:
329	case ixgbe_phy_sfp_intel:
330	case ixgbe_phy_sfp_unknown:
331	case ixgbe_phy_qsfp_passive_unknown:
332	case ixgbe_phy_qsfp_active_unknown:
333	case ixgbe_phy_qsfp_intel:
334	case ixgbe_phy_qsfp_unknown:
335		/* SFP+ devices, further checking needed */
336		switch (adapter->hw.phy.sfp_type) {
337		case ixgbe_sfp_type_da_cu:
338		case ixgbe_sfp_type_da_cu_core0:
339		case ixgbe_sfp_type_da_cu_core1:
340			ethtool_link_ksettings_add_link_mode(cmd, supported,
341							     FIBRE);
342			ethtool_link_ksettings_add_link_mode(cmd, advertising,
343							     FIBRE);
344			cmd->base.port = PORT_DA;
345			break;
346		case ixgbe_sfp_type_sr:
347		case ixgbe_sfp_type_lr:
348		case ixgbe_sfp_type_srlr_core0:
349		case ixgbe_sfp_type_srlr_core1:
350		case ixgbe_sfp_type_1g_sx_core0:
351		case ixgbe_sfp_type_1g_sx_core1:
352		case ixgbe_sfp_type_1g_lx_core0:
353		case ixgbe_sfp_type_1g_lx_core1:
354			ethtool_link_ksettings_add_link_mode(cmd, supported,
355							     FIBRE);
356			ethtool_link_ksettings_add_link_mode(cmd, advertising,
357							     FIBRE);
358			cmd->base.port = PORT_FIBRE;
359			break;
360		case ixgbe_sfp_type_not_present:
361			ethtool_link_ksettings_add_link_mode(cmd, supported,
362							     FIBRE);
363			ethtool_link_ksettings_add_link_mode(cmd, advertising,
364							     FIBRE);
365			cmd->base.port = PORT_NONE;
366			break;
367		case ixgbe_sfp_type_1g_cu_core0:
368		case ixgbe_sfp_type_1g_cu_core1:
369			ethtool_link_ksettings_add_link_mode(cmd, supported,
370							     TP);
371			ethtool_link_ksettings_add_link_mode(cmd, advertising,
372							     TP);
373			cmd->base.port = PORT_TP;
374			break;
375		case ixgbe_sfp_type_unknown:
376		default:
377			ethtool_link_ksettings_add_link_mode(cmd, supported,
378							     FIBRE);
379			ethtool_link_ksettings_add_link_mode(cmd, advertising,
380							     FIBRE);
381			cmd->base.port = PORT_OTHER;
382			break;
383		}
384		break;
385	case ixgbe_phy_xaui:
386		ethtool_link_ksettings_add_link_mode(cmd, supported,
387						     FIBRE);
388		ethtool_link_ksettings_add_link_mode(cmd, advertising,
389						     FIBRE);
390		cmd->base.port = PORT_NONE;
391		break;
392	case ixgbe_phy_unknown:
393	case ixgbe_phy_generic:
394	case ixgbe_phy_sfp_unsupported:
395	default:
396		ethtool_link_ksettings_add_link_mode(cmd, supported,
397						     FIBRE);
398		ethtool_link_ksettings_add_link_mode(cmd, advertising,
399						     FIBRE);
400		cmd->base.port = PORT_OTHER;
401		break;
402	}
403
404	/* Indicate pause support */
405	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
406
407	switch (hw->fc.requested_mode) {
408	case ixgbe_fc_full:
409		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
410		break;
411	case ixgbe_fc_rx_pause:
412		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
413		ethtool_link_ksettings_add_link_mode(cmd, advertising,
414						     Asym_Pause);
415		break;
416	case ixgbe_fc_tx_pause:
417		ethtool_link_ksettings_add_link_mode(cmd, advertising,
418						     Asym_Pause);
419		break;
420	default:
421		ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
422		ethtool_link_ksettings_del_link_mode(cmd, advertising,
423						     Asym_Pause);
424	}
425
426	if (netif_carrier_ok(netdev)) {
427		switch (adapter->link_speed) {
428		case IXGBE_LINK_SPEED_10GB_FULL:
429			cmd->base.speed = SPEED_10000;
430			break;
431		case IXGBE_LINK_SPEED_5GB_FULL:
432			cmd->base.speed = SPEED_5000;
433			break;
434		case IXGBE_LINK_SPEED_2_5GB_FULL:
435			cmd->base.speed = SPEED_2500;
436			break;
437		case IXGBE_LINK_SPEED_1GB_FULL:
438			cmd->base.speed = SPEED_1000;
439			break;
440		case IXGBE_LINK_SPEED_100_FULL:
441			cmd->base.speed = SPEED_100;
442			break;
443		case IXGBE_LINK_SPEED_10_FULL:
444			cmd->base.speed = SPEED_10;
445			break;
446		default:
447			break;
448		}
449		cmd->base.duplex = DUPLEX_FULL;
450	} else {
451		cmd->base.speed = SPEED_UNKNOWN;
452		cmd->base.duplex = DUPLEX_UNKNOWN;
453	}
454
455	return 0;
456}
457
458static int ixgbe_set_link_ksettings(struct net_device *netdev,
459				    const struct ethtool_link_ksettings *cmd)
460{
461	struct ixgbe_adapter *adapter = netdev_priv(netdev);
462	struct ixgbe_hw *hw = &adapter->hw;
463	u32 advertised, old;
464	s32 err = 0;
465
466	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
467	    (hw->phy.multispeed_fiber)) {
468		/*
469		 * this function does not support duplex forcing, but can
470		 * limit the advertising of the adapter to the specified speed
471		 */
472		if (!bitmap_subset(cmd->link_modes.advertising,
473				   cmd->link_modes.supported,
474				   __ETHTOOL_LINK_MODE_MASK_NBITS))
475			return -EINVAL;
476
477		/* only allow one speed at a time if no autoneg */
478		if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
479			if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
480								  10000baseT_Full) &&
481			    ethtool_link_ksettings_test_link_mode(cmd, advertising,
482								  1000baseT_Full))
483				return -EINVAL;
484		}
485
486		old = hw->phy.autoneg_advertised;
487		advertised = 0;
488		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
489							  10000baseT_Full))
490			advertised |= IXGBE_LINK_SPEED_10GB_FULL;
491		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
492							  5000baseT_Full))
493			advertised |= IXGBE_LINK_SPEED_5GB_FULL;
494		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
495							  2500baseT_Full))
496			advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
497		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
498							  1000baseT_Full))
499			advertised |= IXGBE_LINK_SPEED_1GB_FULL;
500
501		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
502							  100baseT_Full))
503			advertised |= IXGBE_LINK_SPEED_100_FULL;
504
505		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
506							  10baseT_Full))
507			advertised |= IXGBE_LINK_SPEED_10_FULL;
508
509		if (old == advertised)
510			return err;
511		/* this sets the link speed and restarts auto-neg */
512		while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
513			usleep_range(1000, 2000);
514
515		hw->mac.autotry_restart = true;
516		err = hw->mac.ops.setup_link(hw, advertised, true);
517		if (err) {
518			e_info(probe, "setup link failed with code %d\n", err);
519			hw->mac.ops.setup_link(hw, old, true);
520		}
521		clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
522	} else {
523		/* in this case we currently only support 10Gb/FULL */
524		u32 speed = cmd->base.speed;
525
526		if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
527		    (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
528							    10000baseT_Full)) ||
529		    (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
530			return -EINVAL;
531	}
532
533	return err;
534}
535
536static void ixgbe_get_pause_stats(struct net_device *netdev,
537				  struct ethtool_pause_stats *stats)
538{
539	struct ixgbe_adapter *adapter = netdev_priv(netdev);
540	struct ixgbe_hw_stats *hwstats = &adapter->stats;
541
542	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
543	stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
544}
545
546static void ixgbe_get_pauseparam(struct net_device *netdev,
547				 struct ethtool_pauseparam *pause)
548{
549	struct ixgbe_adapter *adapter = netdev_priv(netdev);
550	struct ixgbe_hw *hw = &adapter->hw;
551
552	if (ixgbe_device_supports_autoneg_fc(hw) &&
553	    !hw->fc.disable_fc_autoneg)
554		pause->autoneg = 1;
555	else
556		pause->autoneg = 0;
557
558	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
559		pause->rx_pause = 1;
560	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
561		pause->tx_pause = 1;
562	} else if (hw->fc.current_mode == ixgbe_fc_full) {
563		pause->rx_pause = 1;
564		pause->tx_pause = 1;
565	}
566}
567
568static int ixgbe_set_pauseparam(struct net_device *netdev,
569				struct ethtool_pauseparam *pause)
570{
571	struct ixgbe_adapter *adapter = netdev_priv(netdev);
572	struct ixgbe_hw *hw = &adapter->hw;
573	struct ixgbe_fc_info fc = hw->fc;
574
575	/* 82598 does no support link flow control with DCB enabled */
576	if ((hw->mac.type == ixgbe_mac_82598EB) &&
577	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
578		return -EINVAL;
579
580	/* some devices do not support autoneg of link flow control */
581	if ((pause->autoneg == AUTONEG_ENABLE) &&
582	    !ixgbe_device_supports_autoneg_fc(hw))
583		return -EINVAL;
584
585	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
586
587	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
588		fc.requested_mode = ixgbe_fc_full;
589	else if (pause->rx_pause && !pause->tx_pause)
590		fc.requested_mode = ixgbe_fc_rx_pause;
591	else if (!pause->rx_pause && pause->tx_pause)
592		fc.requested_mode = ixgbe_fc_tx_pause;
593	else
594		fc.requested_mode = ixgbe_fc_none;
595
596	/* if the thing changed then we'll update and use new autoneg */
597	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
598		hw->fc = fc;
599		if (netif_running(netdev))
600			ixgbe_reinit_locked(adapter);
601		else
602			ixgbe_reset(adapter);
603	}
604
605	return 0;
606}
607
608static u32 ixgbe_get_msglevel(struct net_device *netdev)
609{
610	struct ixgbe_adapter *adapter = netdev_priv(netdev);
611	return adapter->msg_enable;
612}
613
614static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
615{
616	struct ixgbe_adapter *adapter = netdev_priv(netdev);
617	adapter->msg_enable = data;
618}
619
620static int ixgbe_get_regs_len(struct net_device *netdev)
621{
622#define IXGBE_REGS_LEN  1145
623	return IXGBE_REGS_LEN * sizeof(u32);
624}
625
626#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
627
628static void ixgbe_get_regs(struct net_device *netdev,
629			   struct ethtool_regs *regs, void *p)
630{
631	struct ixgbe_adapter *adapter = netdev_priv(netdev);
632	struct ixgbe_hw *hw = &adapter->hw;
633	u32 *regs_buff = p;
634	u8 i;
635
636	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
637
638	regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
639			hw->device_id;
640
641	/* General Registers */
642	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
643	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
644	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
645	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
646	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
647	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
648	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
649	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
650
651	/* NVM Register */
652	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
653	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
654	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
655	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
656	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
657	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
658	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
659	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
660	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
661	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
662
663	/* Interrupt */
664	/* don't read EICR because it can clear interrupt causes, instead
665	 * read EICS which is a shadow but doesn't clear EICR */
666	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
667	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
668	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
669	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
670	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
671	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
672	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
673	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
674	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
675	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
676	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
677	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
678
679	/* Flow Control */
680	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
681	for (i = 0; i < 4; i++)
682		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
683	for (i = 0; i < 8; i++) {
684		switch (hw->mac.type) {
685		case ixgbe_mac_82598EB:
686			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
687			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
688			break;
689		case ixgbe_mac_82599EB:
690		case ixgbe_mac_X540:
691		case ixgbe_mac_X550:
692		case ixgbe_mac_X550EM_x:
693		case ixgbe_mac_x550em_a:
694			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
695			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
696			break;
697		default:
698			break;
699		}
700	}
701	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
702	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
703
704	/* Receive DMA */
705	for (i = 0; i < 64; i++)
706		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
707	for (i = 0; i < 64; i++)
708		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
709	for (i = 0; i < 64; i++)
710		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
711	for (i = 0; i < 64; i++)
712		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
713	for (i = 0; i < 64; i++)
714		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
715	for (i = 0; i < 64; i++)
716		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
717	for (i = 0; i < 16; i++)
718		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
719	for (i = 0; i < 16; i++)
720		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
721	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
722	for (i = 0; i < 8; i++)
723		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
724	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
725	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
726
727	/* Receive */
728	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
729	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
730	for (i = 0; i < 16; i++)
731		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
732	for (i = 0; i < 16; i++)
733		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
734	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
735	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
736	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
737	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
738	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
739	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
740	for (i = 0; i < 8; i++)
741		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
742	for (i = 0; i < 8; i++)
743		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
744	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
745
746	/* Transmit */
747	for (i = 0; i < 32; i++)
748		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
749	for (i = 0; i < 32; i++)
750		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
751	for (i = 0; i < 32; i++)
752		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
753	for (i = 0; i < 32; i++)
754		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
755	for (i = 0; i < 32; i++)
756		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
757	for (i = 0; i < 32; i++)
758		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
759	for (i = 0; i < 32; i++)
760		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
761	for (i = 0; i < 32; i++)
762		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
763	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
764	for (i = 0; i < 16; i++)
765		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
766	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
767	for (i = 0; i < 8; i++)
768		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
769	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
770
771	/* Wake Up */
772	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
773	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
774	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
775	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
776	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
777	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
778	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
779	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
780	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
781
782	/* DCB */
783	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
784	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
785
786	switch (hw->mac.type) {
787	case ixgbe_mac_82598EB:
788		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
789		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
790		for (i = 0; i < 8; i++)
791			regs_buff[833 + i] =
792				IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
793		for (i = 0; i < 8; i++)
794			regs_buff[841 + i] =
795				IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
796		for (i = 0; i < 8; i++)
797			regs_buff[849 + i] =
798				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
799		for (i = 0; i < 8; i++)
800			regs_buff[857 + i] =
801				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
802		break;
803	case ixgbe_mac_82599EB:
804	case ixgbe_mac_X540:
805	case ixgbe_mac_X550:
806	case ixgbe_mac_X550EM_x:
807	case ixgbe_mac_x550em_a:
808		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
809		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
810		for (i = 0; i < 8; i++)
811			regs_buff[833 + i] =
812				IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
813		for (i = 0; i < 8; i++)
814			regs_buff[841 + i] =
815				IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
816		for (i = 0; i < 8; i++)
817			regs_buff[849 + i] =
818				IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
819		for (i = 0; i < 8; i++)
820			regs_buff[857 + i] =
821				IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
822		break;
823	default:
824		break;
825	}
826
827	for (i = 0; i < 8; i++)
828		regs_buff[865 + i] =
829		IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
830	for (i = 0; i < 8; i++)
831		regs_buff[873 + i] =
832		IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
833
834	/* Statistics */
835	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
836	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
837	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
838	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
839	for (i = 0; i < 8; i++)
840		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
841	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
842	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
843	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
844	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
845	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
846	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
847	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
848	for (i = 0; i < 8; i++)
849		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
850	for (i = 0; i < 8; i++)
851		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
852	for (i = 0; i < 8; i++)
853		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
854	for (i = 0; i < 8; i++)
855		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
856	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
857	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
858	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
859	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
860	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
861	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
862	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
863	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
864	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
865	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
866	regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
867	regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
868	regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
869	regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
870	for (i = 0; i < 8; i++)
871		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
872	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
873	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
874	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
875	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
876	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
877	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
878	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
879	regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
880	regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
881	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
882	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
883	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
884	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
885	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
886	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
887	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
888	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
889	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
890	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
891	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
892	for (i = 0; i < 16; i++)
893		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
894	for (i = 0; i < 16; i++)
895		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
896	for (i = 0; i < 16; i++)
897		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
898	for (i = 0; i < 16; i++)
899		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
900
901	/* MAC */
902	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
903	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
904	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
905	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
906	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
907	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
908	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
909	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
910	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
911	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
912	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
913	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
914	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
915	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
916	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
917	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
918	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
919	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
920	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
921	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
922	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
923	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
924	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
925	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
926	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
927	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
928	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
929	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
930	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
931	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
932	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
933	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
934	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
935
936	/* Diagnostic */
937	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
938	for (i = 0; i < 8; i++)
939		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
940	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
941	for (i = 0; i < 4; i++)
942		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
943	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
944	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
945	for (i = 0; i < 8; i++)
946		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
947	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
948	for (i = 0; i < 4; i++)
949		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
950	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
951	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
952	for (i = 0; i < 4; i++)
953		regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
954	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
955	for (i = 0; i < 4; i++)
956		regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
957	for (i = 0; i < 8; i++)
958		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
959	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
960	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
961	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
962	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
963	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
964	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
965	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
966	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
967	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
968
969	/* 82599 X540 specific registers  */
970	regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
971
972	/* 82599 X540 specific DCB registers  */
973	regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
974	regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
975	for (i = 0; i < 4; i++)
976		regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
977	regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
978					/* same as RTTQCNRM */
979	regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
980					/* same as RTTQCNRR */
981
982	/* X540 specific DCB registers  */
983	regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
984	regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
985
986	/* Security config registers */
987	regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
988	regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
989	regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
990	regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
991	regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
992	regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
993}
994
995static int ixgbe_get_eeprom_len(struct net_device *netdev)
996{
997	struct ixgbe_adapter *adapter = netdev_priv(netdev);
998	return adapter->hw.eeprom.word_size * 2;
999}
1000
1001static int ixgbe_get_eeprom(struct net_device *netdev,
1002			    struct ethtool_eeprom *eeprom, u8 *bytes)
1003{
1004	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1005	struct ixgbe_hw *hw = &adapter->hw;
1006	u16 *eeprom_buff;
1007	int first_word, last_word, eeprom_len;
1008	int ret_val = 0;
1009	u16 i;
1010
1011	if (eeprom->len == 0)
1012		return -EINVAL;
1013
1014	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1015
1016	first_word = eeprom->offset >> 1;
1017	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1018	eeprom_len = last_word - first_word + 1;
1019
1020	eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1021	if (!eeprom_buff)
1022		return -ENOMEM;
1023
1024	ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1025					     eeprom_buff);
1026
1027	/* Device's eeprom is always little-endian, word addressable */
1028	for (i = 0; i < eeprom_len; i++)
1029		le16_to_cpus(&eeprom_buff[i]);
1030
1031	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1032	kfree(eeprom_buff);
1033
1034	return ret_val;
1035}
1036
1037static int ixgbe_set_eeprom(struct net_device *netdev,
1038			    struct ethtool_eeprom *eeprom, u8 *bytes)
1039{
1040	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1041	struct ixgbe_hw *hw = &adapter->hw;
1042	u16 *eeprom_buff;
1043	void *ptr;
1044	int max_len, first_word, last_word, ret_val = 0;
1045	u16 i;
1046
1047	if (eeprom->len == 0)
1048		return -EINVAL;
1049
1050	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1051		return -EINVAL;
1052
1053	max_len = hw->eeprom.word_size * 2;
1054
1055	first_word = eeprom->offset >> 1;
1056	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1057	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1058	if (!eeprom_buff)
1059		return -ENOMEM;
1060
1061	ptr = eeprom_buff;
1062
1063	if (eeprom->offset & 1) {
1064		/*
1065		 * need read/modify/write of first changed EEPROM word
1066		 * only the second byte of the word is being modified
1067		 */
1068		ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1069		if (ret_val)
1070			goto err;
1071
1072		ptr++;
1073	}
1074	if ((eeprom->offset + eeprom->len) & 1) {
1075		/*
1076		 * need read/modify/write of last changed EEPROM word
1077		 * only the first byte of the word is being modified
1078		 */
1079		ret_val = hw->eeprom.ops.read(hw, last_word,
1080					  &eeprom_buff[last_word - first_word]);
1081		if (ret_val)
1082			goto err;
1083	}
1084
1085	/* Device's eeprom is always little-endian, word addressable */
1086	for (i = 0; i < last_word - first_word + 1; i++)
1087		le16_to_cpus(&eeprom_buff[i]);
1088
1089	memcpy(ptr, bytes, eeprom->len);
1090
1091	for (i = 0; i < last_word - first_word + 1; i++)
1092		cpu_to_le16s(&eeprom_buff[i]);
1093
1094	ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1095					      last_word - first_word + 1,
1096					      eeprom_buff);
1097
1098	/* Update the checksum */
1099	if (ret_val == 0)
1100		hw->eeprom.ops.update_checksum(hw);
1101
1102err:
1103	kfree(eeprom_buff);
1104	return ret_val;
1105}
1106
1107static void ixgbe_get_drvinfo(struct net_device *netdev,
1108			      struct ethtool_drvinfo *drvinfo)
1109{
1110	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1111
1112	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1113
1114	strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1115		sizeof(drvinfo->fw_version));
1116
1117	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1118		sizeof(drvinfo->bus_info));
1119
1120	drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1121}
1122
1123static void ixgbe_get_ringparam(struct net_device *netdev,
1124				struct ethtool_ringparam *ring)
1125{
1126	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1127	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1128	struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1129
1130	ring->rx_max_pending = IXGBE_MAX_RXD;
1131	ring->tx_max_pending = IXGBE_MAX_TXD;
1132	ring->rx_pending = rx_ring->count;
1133	ring->tx_pending = tx_ring->count;
1134}
1135
1136static int ixgbe_set_ringparam(struct net_device *netdev,
1137			       struct ethtool_ringparam *ring)
1138{
1139	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1140	struct ixgbe_ring *temp_ring;
1141	int i, j, err = 0;
1142	u32 new_rx_count, new_tx_count;
1143
1144	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1145		return -EINVAL;
1146
1147	new_tx_count = clamp_t(u32, ring->tx_pending,
1148			       IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1149	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1150
1151	new_rx_count = clamp_t(u32, ring->rx_pending,
1152			       IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1153	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1154
1155	if ((new_tx_count == adapter->tx_ring_count) &&
1156	    (new_rx_count == adapter->rx_ring_count)) {
1157		/* nothing to do */
1158		return 0;
1159	}
1160
1161	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1162		usleep_range(1000, 2000);
1163
1164	if (!netif_running(adapter->netdev)) {
1165		for (i = 0; i < adapter->num_tx_queues; i++)
1166			adapter->tx_ring[i]->count = new_tx_count;
1167		for (i = 0; i < adapter->num_xdp_queues; i++)
1168			adapter->xdp_ring[i]->count = new_tx_count;
1169		for (i = 0; i < adapter->num_rx_queues; i++)
1170			adapter->rx_ring[i]->count = new_rx_count;
1171		adapter->tx_ring_count = new_tx_count;
1172		adapter->xdp_ring_count = new_tx_count;
1173		adapter->rx_ring_count = new_rx_count;
1174		goto clear_reset;
1175	}
1176
1177	/* allocate temporary buffer to store rings in */
1178	i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1179		  adapter->num_rx_queues);
1180	temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1181
1182	if (!temp_ring) {
1183		err = -ENOMEM;
1184		goto clear_reset;
1185	}
1186
1187	ixgbe_down(adapter);
1188
1189	/*
1190	 * Setup new Tx resources and free the old Tx resources in that order.
1191	 * We can then assign the new resources to the rings via a memcpy.
1192	 * The advantage to this approach is that we are guaranteed to still
1193	 * have resources even in the case of an allocation failure.
1194	 */
1195	if (new_tx_count != adapter->tx_ring_count) {
1196		for (i = 0; i < adapter->num_tx_queues; i++) {
1197			memcpy(&temp_ring[i], adapter->tx_ring[i],
1198			       sizeof(struct ixgbe_ring));
1199
1200			temp_ring[i].count = new_tx_count;
1201			err = ixgbe_setup_tx_resources(&temp_ring[i]);
1202			if (err) {
1203				while (i) {
1204					i--;
1205					ixgbe_free_tx_resources(&temp_ring[i]);
1206				}
1207				goto err_setup;
1208			}
1209		}
1210
1211		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1212			memcpy(&temp_ring[i], adapter->xdp_ring[j],
1213			       sizeof(struct ixgbe_ring));
1214
1215			temp_ring[i].count = new_tx_count;
1216			err = ixgbe_setup_tx_resources(&temp_ring[i]);
1217			if (err) {
1218				while (i) {
1219					i--;
1220					ixgbe_free_tx_resources(&temp_ring[i]);
1221				}
1222				goto err_setup;
1223			}
1224		}
1225
1226		for (i = 0; i < adapter->num_tx_queues; i++) {
1227			ixgbe_free_tx_resources(adapter->tx_ring[i]);
1228
1229			memcpy(adapter->tx_ring[i], &temp_ring[i],
1230			       sizeof(struct ixgbe_ring));
1231		}
1232		for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1233			ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1234
1235			memcpy(adapter->xdp_ring[j], &temp_ring[i],
1236			       sizeof(struct ixgbe_ring));
1237		}
1238
1239		adapter->tx_ring_count = new_tx_count;
1240	}
1241
1242	/* Repeat the process for the Rx rings if needed */
1243	if (new_rx_count != adapter->rx_ring_count) {
1244		for (i = 0; i < adapter->num_rx_queues; i++) {
1245			memcpy(&temp_ring[i], adapter->rx_ring[i],
1246			       sizeof(struct ixgbe_ring));
1247
1248			/* Clear copied XDP RX-queue info */
1249			memset(&temp_ring[i].xdp_rxq, 0,
1250			       sizeof(temp_ring[i].xdp_rxq));
1251
1252			temp_ring[i].count = new_rx_count;
1253			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1254			if (err) {
1255				while (i) {
1256					i--;
1257					ixgbe_free_rx_resources(&temp_ring[i]);
1258				}
1259				goto err_setup;
1260			}
1261
1262		}
1263
1264		for (i = 0; i < adapter->num_rx_queues; i++) {
1265			ixgbe_free_rx_resources(adapter->rx_ring[i]);
1266
1267			memcpy(adapter->rx_ring[i], &temp_ring[i],
1268			       sizeof(struct ixgbe_ring));
1269		}
1270
1271		adapter->rx_ring_count = new_rx_count;
1272	}
1273
1274err_setup:
1275	ixgbe_up(adapter);
1276	vfree(temp_ring);
1277clear_reset:
1278	clear_bit(__IXGBE_RESETTING, &adapter->state);
1279	return err;
1280}
1281
1282static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1283{
1284	switch (sset) {
1285	case ETH_SS_TEST:
1286		return IXGBE_TEST_LEN;
1287	case ETH_SS_STATS:
1288		return IXGBE_STATS_LEN;
1289	case ETH_SS_PRIV_FLAGS:
1290		return IXGBE_PRIV_FLAGS_STR_LEN;
1291	default:
1292		return -EOPNOTSUPP;
1293	}
1294}
1295
1296static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1297				    struct ethtool_stats *stats, u64 *data)
1298{
1299	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1300	struct rtnl_link_stats64 temp;
1301	const struct rtnl_link_stats64 *net_stats;
1302	unsigned int start;
1303	struct ixgbe_ring *ring;
1304	int i, j;
1305	char *p = NULL;
1306
1307	ixgbe_update_stats(adapter);
1308	net_stats = dev_get_stats(netdev, &temp);
1309	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1310		switch (ixgbe_gstrings_stats[i].type) {
1311		case NETDEV_STATS:
1312			p = (char *) net_stats +
1313					ixgbe_gstrings_stats[i].stat_offset;
1314			break;
1315		case IXGBE_STATS:
1316			p = (char *) adapter +
1317					ixgbe_gstrings_stats[i].stat_offset;
1318			break;
1319		default:
1320			data[i] = 0;
1321			continue;
1322		}
1323
1324		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1325			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1326	}
1327	for (j = 0; j < netdev->num_tx_queues; j++) {
1328		ring = adapter->tx_ring[j];
1329		if (!ring) {
1330			data[i] = 0;
1331			data[i+1] = 0;
1332			i += 2;
1333			continue;
1334		}
1335
1336		do {
1337			start = u64_stats_fetch_begin_irq(&ring->syncp);
1338			data[i]   = ring->stats.packets;
1339			data[i+1] = ring->stats.bytes;
1340		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1341		i += 2;
1342	}
1343	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1344		ring = adapter->rx_ring[j];
1345		if (!ring) {
1346			data[i] = 0;
1347			data[i+1] = 0;
1348			i += 2;
1349			continue;
1350		}
1351
1352		do {
1353			start = u64_stats_fetch_begin_irq(&ring->syncp);
1354			data[i]   = ring->stats.packets;
1355			data[i+1] = ring->stats.bytes;
1356		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1357		i += 2;
1358	}
1359
1360	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1361		data[i++] = adapter->stats.pxontxc[j];
1362		data[i++] = adapter->stats.pxofftxc[j];
1363	}
1364	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1365		data[i++] = adapter->stats.pxonrxc[j];
1366		data[i++] = adapter->stats.pxoffrxc[j];
1367	}
1368}
1369
1370static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1371			      u8 *data)
1372{
1373	char *p = (char *)data;
1374	unsigned int i;
1375
1376	switch (stringset) {
1377	case ETH_SS_TEST:
1378		for (i = 0; i < IXGBE_TEST_LEN; i++) {
1379			memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1380			data += ETH_GSTRING_LEN;
1381		}
1382		break;
1383	case ETH_SS_STATS:
1384		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1385			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1386			       ETH_GSTRING_LEN);
1387			p += ETH_GSTRING_LEN;
1388		}
1389		for (i = 0; i < netdev->num_tx_queues; i++) {
1390			sprintf(p, "tx_queue_%u_packets", i);
1391			p += ETH_GSTRING_LEN;
1392			sprintf(p, "tx_queue_%u_bytes", i);
1393			p += ETH_GSTRING_LEN;
1394		}
1395		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1396			sprintf(p, "rx_queue_%u_packets", i);
1397			p += ETH_GSTRING_LEN;
1398			sprintf(p, "rx_queue_%u_bytes", i);
1399			p += ETH_GSTRING_LEN;
1400		}
1401		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1402			sprintf(p, "tx_pb_%u_pxon", i);
1403			p += ETH_GSTRING_LEN;
1404			sprintf(p, "tx_pb_%u_pxoff", i);
1405			p += ETH_GSTRING_LEN;
1406		}
1407		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1408			sprintf(p, "rx_pb_%u_pxon", i);
1409			p += ETH_GSTRING_LEN;
1410			sprintf(p, "rx_pb_%u_pxoff", i);
1411			p += ETH_GSTRING_LEN;
1412		}
1413		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1414		break;
1415	case ETH_SS_PRIV_FLAGS:
1416		memcpy(data, ixgbe_priv_flags_strings,
1417		       IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1418	}
1419}
1420
1421static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1422{
1423	struct ixgbe_hw *hw = &adapter->hw;
1424	bool link_up;
1425	u32 link_speed = 0;
1426
1427	if (ixgbe_removed(hw->hw_addr)) {
1428		*data = 1;
1429		return 1;
1430	}
1431	*data = 0;
1432
1433	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1434	if (link_up)
1435		return *data;
1436	else
1437		*data = 1;
1438	return *data;
1439}
1440
1441/* ethtool register test data */
1442struct ixgbe_reg_test {
1443	u16 reg;
1444	u8  array_len;
1445	u8  test_type;
1446	u32 mask;
1447	u32 write;
1448};
1449
1450/* In the hardware, registers are laid out either singly, in arrays
1451 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1452 * most tests take place on arrays or single registers (handled
1453 * as a single-element array) and special-case the tables.
1454 * Table tests are always pattern tests.
1455 *
1456 * We also make provision for some required setup steps by specifying
1457 * registers to be written without any read-back testing.
1458 */
1459
1460#define PATTERN_TEST	1
1461#define SET_READ_TEST	2
1462#define WRITE_NO_TEST	3
1463#define TABLE32_TEST	4
1464#define TABLE64_TEST_LO	5
1465#define TABLE64_TEST_HI	6
1466
1467/* default 82599 register test */
1468static const struct ixgbe_reg_test reg_test_82599[] = {
1469	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1470	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1471	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1472	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1473	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1474	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1475	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1476	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1477	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1478	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1479	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1480	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1481	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1482	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1483	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1484	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1485	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1486	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1487	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1488	{ .reg = 0 }
1489};
1490
1491/* default 82598 register test */
1492static const struct ixgbe_reg_test reg_test_82598[] = {
1493	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1494	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1495	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1496	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1497	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1498	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1499	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1500	/* Enable all four RX queues before testing. */
1501	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1502	/* RDH is read-only for 82598, only test RDT. */
1503	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1504	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1505	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1506	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1507	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1508	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1509	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1510	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1511	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1512	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1513	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1514	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1515	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1516	{ .reg = 0 }
1517};
1518
1519static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1520			     u32 mask, u32 write)
1521{
1522	u32 pat, val, before;
1523	static const u32 test_pattern[] = {
1524		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1525
1526	if (ixgbe_removed(adapter->hw.hw_addr)) {
1527		*data = 1;
1528		return true;
1529	}
1530	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1531		before = ixgbe_read_reg(&adapter->hw, reg);
1532		ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1533		val = ixgbe_read_reg(&adapter->hw, reg);
1534		if (val != (test_pattern[pat] & write & mask)) {
1535			e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1536			      reg, val, (test_pattern[pat] & write & mask));
1537			*data = reg;
1538			ixgbe_write_reg(&adapter->hw, reg, before);
1539			return true;
1540		}
1541		ixgbe_write_reg(&adapter->hw, reg, before);
1542	}
1543	return false;
1544}
1545
1546static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1547			      u32 mask, u32 write)
1548{
1549	u32 val, before;
1550
1551	if (ixgbe_removed(adapter->hw.hw_addr)) {
1552		*data = 1;
1553		return true;
1554	}
1555	before = ixgbe_read_reg(&adapter->hw, reg);
1556	ixgbe_write_reg(&adapter->hw, reg, write & mask);
1557	val = ixgbe_read_reg(&adapter->hw, reg);
1558	if ((write & mask) != (val & mask)) {
1559		e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1560		      reg, (val & mask), (write & mask));
1561		*data = reg;
1562		ixgbe_write_reg(&adapter->hw, reg, before);
1563		return true;
1564	}
1565	ixgbe_write_reg(&adapter->hw, reg, before);
1566	return false;
1567}
1568
1569static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1570{
1571	const struct ixgbe_reg_test *test;
1572	u32 value, before, after;
1573	u32 i, toggle;
1574
1575	if (ixgbe_removed(adapter->hw.hw_addr)) {
1576		e_err(drv, "Adapter removed - register test blocked\n");
1577		*data = 1;
1578		return 1;
1579	}
1580	switch (adapter->hw.mac.type) {
1581	case ixgbe_mac_82598EB:
1582		toggle = 0x7FFFF3FF;
1583		test = reg_test_82598;
1584		break;
1585	case ixgbe_mac_82599EB:
1586	case ixgbe_mac_X540:
1587	case ixgbe_mac_X550:
1588	case ixgbe_mac_X550EM_x:
1589	case ixgbe_mac_x550em_a:
1590		toggle = 0x7FFFF30F;
1591		test = reg_test_82599;
1592		break;
1593	default:
1594		*data = 1;
1595		return 1;
1596	}
1597
1598	/*
1599	 * Because the status register is such a special case,
1600	 * we handle it separately from the rest of the register
1601	 * tests.  Some bits are read-only, some toggle, and some
1602	 * are writeable on newer MACs.
1603	 */
1604	before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1605	value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1606	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1607	after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1608	if (value != after) {
1609		e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1610		      after, value);
1611		*data = 1;
1612		return 1;
1613	}
1614	/* restore previous status */
1615	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1616
1617	/*
1618	 * Perform the remainder of the register test, looping through
1619	 * the test table until we either fail or reach the null entry.
1620	 */
1621	while (test->reg) {
1622		for (i = 0; i < test->array_len; i++) {
1623			bool b = false;
1624
1625			switch (test->test_type) {
1626			case PATTERN_TEST:
1627				b = reg_pattern_test(adapter, data,
1628						     test->reg + (i * 0x40),
1629						     test->mask,
1630						     test->write);
1631				break;
1632			case SET_READ_TEST:
1633				b = reg_set_and_check(adapter, data,
1634						      test->reg + (i * 0x40),
1635						      test->mask,
1636						      test->write);
1637				break;
1638			case WRITE_NO_TEST:
1639				ixgbe_write_reg(&adapter->hw,
1640						test->reg + (i * 0x40),
1641						test->write);
1642				break;
1643			case TABLE32_TEST:
1644				b = reg_pattern_test(adapter, data,
1645						     test->reg + (i * 4),
1646						     test->mask,
1647						     test->write);
1648				break;
1649			case TABLE64_TEST_LO:
1650				b = reg_pattern_test(adapter, data,
1651						     test->reg + (i * 8),
1652						     test->mask,
1653						     test->write);
1654				break;
1655			case TABLE64_TEST_HI:
1656				b = reg_pattern_test(adapter, data,
1657						     (test->reg + 4) + (i * 8),
1658						     test->mask,
1659						     test->write);
1660				break;
1661			}
1662			if (b)
1663				return 1;
1664		}
1665		test++;
1666	}
1667
1668	*data = 0;
1669	return 0;
1670}
1671
1672static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1673{
1674	struct ixgbe_hw *hw = &adapter->hw;
1675	if (hw->eeprom.ops.validate_checksum(hw, NULL))
1676		*data = 1;
1677	else
1678		*data = 0;
1679	return *data;
1680}
1681
1682static irqreturn_t ixgbe_test_intr(int irq, void *data)
1683{
1684	struct net_device *netdev = (struct net_device *) data;
1685	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1686
1687	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1688
1689	return IRQ_HANDLED;
1690}
1691
1692static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1693{
1694	struct net_device *netdev = adapter->netdev;
1695	u32 mask, i = 0, shared_int = true;
1696	u32 irq = adapter->pdev->irq;
1697
1698	*data = 0;
1699
1700	/* Hook up test interrupt handler just for this test */
1701	if (adapter->msix_entries) {
1702		/* NOTE: we don't test MSI-X interrupts here, yet */
1703		return 0;
1704	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1705		shared_int = false;
1706		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1707				netdev)) {
1708			*data = 1;
1709			return -1;
1710		}
1711	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1712				netdev->name, netdev)) {
1713		shared_int = false;
1714	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1715			       netdev->name, netdev)) {
1716		*data = 1;
1717		return -1;
1718	}
1719	e_info(hw, "testing %s interrupt\n", shared_int ?
1720	       "shared" : "unshared");
1721
1722	/* Disable all the interrupts */
1723	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1724	IXGBE_WRITE_FLUSH(&adapter->hw);
1725	usleep_range(10000, 20000);
1726
1727	/* Test each interrupt */
1728	for (; i < 10; i++) {
1729		/* Interrupt to test */
1730		mask = BIT(i);
1731
1732		if (!shared_int) {
1733			/*
1734			 * Disable the interrupts to be reported in
1735			 * the cause register and then force the same
1736			 * interrupt and see if one gets posted.  If
1737			 * an interrupt was posted to the bus, the
1738			 * test failed.
1739			 */
1740			adapter->test_icr = 0;
1741			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1742					~mask & 0x00007FFF);
1743			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1744					~mask & 0x00007FFF);
1745			IXGBE_WRITE_FLUSH(&adapter->hw);
1746			usleep_range(10000, 20000);
1747
1748			if (adapter->test_icr & mask) {
1749				*data = 3;
1750				break;
1751			}
1752		}
1753
1754		/*
1755		 * Enable the interrupt to be reported in the cause
1756		 * register and then force the same interrupt and see
1757		 * if one gets posted.  If an interrupt was not posted
1758		 * to the bus, the test failed.
1759		 */
1760		adapter->test_icr = 0;
1761		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1762		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1763		IXGBE_WRITE_FLUSH(&adapter->hw);
1764		usleep_range(10000, 20000);
1765
1766		if (!(adapter->test_icr & mask)) {
1767			*data = 4;
1768			break;
1769		}
1770
1771		if (!shared_int) {
1772			/*
1773			 * Disable the other interrupts to be reported in
1774			 * the cause register and then force the other
1775			 * interrupts and see if any get posted.  If
1776			 * an interrupt was posted to the bus, the
1777			 * test failed.
1778			 */
1779			adapter->test_icr = 0;
1780			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1781					~mask & 0x00007FFF);
1782			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1783					~mask & 0x00007FFF);
1784			IXGBE_WRITE_FLUSH(&adapter->hw);
1785			usleep_range(10000, 20000);
1786
1787			if (adapter->test_icr) {
1788				*data = 5;
1789				break;
1790			}
1791		}
1792	}
1793
1794	/* Disable all the interrupts */
1795	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1796	IXGBE_WRITE_FLUSH(&adapter->hw);
1797	usleep_range(10000, 20000);
1798
1799	/* Unhook test interrupt handler */
1800	free_irq(irq, netdev);
1801
1802	return *data;
1803}
1804
1805static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1806{
1807	/* Shut down the DMA engines now so they can be reinitialized later,
1808	 * since the test rings and normally used rings should overlap on
1809	 * queue 0 we can just use the standard disable Rx/Tx calls and they
1810	 * will take care of disabling the test rings for us.
1811	 */
1812
1813	/* first Rx */
1814	ixgbe_disable_rx(adapter);
1815
1816	/* now Tx */
1817	ixgbe_disable_tx(adapter);
1818
1819	ixgbe_reset(adapter);
1820
1821	ixgbe_free_tx_resources(&adapter->test_tx_ring);
1822	ixgbe_free_rx_resources(&adapter->test_rx_ring);
1823}
1824
1825static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1826{
1827	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1828	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1829	struct ixgbe_hw *hw = &adapter->hw;
1830	u32 rctl, reg_data;
1831	int ret_val;
1832	int err;
1833
1834	/* Setup Tx descriptor ring and Tx buffers */
1835	tx_ring->count = IXGBE_DEFAULT_TXD;
1836	tx_ring->queue_index = 0;
1837	tx_ring->dev = &adapter->pdev->dev;
1838	tx_ring->netdev = adapter->netdev;
1839	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1840
1841	err = ixgbe_setup_tx_resources(tx_ring);
1842	if (err)
1843		return 1;
1844
1845	switch (adapter->hw.mac.type) {
1846	case ixgbe_mac_82599EB:
1847	case ixgbe_mac_X540:
1848	case ixgbe_mac_X550:
1849	case ixgbe_mac_X550EM_x:
1850	case ixgbe_mac_x550em_a:
1851		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1852		reg_data |= IXGBE_DMATXCTL_TE;
1853		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1854		break;
1855	default:
1856		break;
1857	}
1858
1859	ixgbe_configure_tx_ring(adapter, tx_ring);
1860
1861	/* Setup Rx Descriptor ring and Rx buffers */
1862	rx_ring->count = IXGBE_DEFAULT_RXD;
1863	rx_ring->queue_index = 0;
1864	rx_ring->dev = &adapter->pdev->dev;
1865	rx_ring->netdev = adapter->netdev;
1866	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1867
1868	err = ixgbe_setup_rx_resources(adapter, rx_ring);
1869	if (err) {
1870		ret_val = 4;
1871		goto err_nomem;
1872	}
1873
1874	hw->mac.ops.disable_rx(hw);
1875
1876	ixgbe_configure_rx_ring(adapter, rx_ring);
1877
1878	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1879	rctl |= IXGBE_RXCTRL_DMBYPS;
1880	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1881
1882	hw->mac.ops.enable_rx(hw);
1883
1884	return 0;
1885
1886err_nomem:
1887	ixgbe_free_desc_rings(adapter);
1888	return ret_val;
1889}
1890
1891static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1892{
1893	struct ixgbe_hw *hw = &adapter->hw;
1894	u32 reg_data;
1895
1896
1897	/* Setup MAC loopback */
1898	reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1899	reg_data |= IXGBE_HLREG0_LPBK;
1900	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1901
1902	reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1903	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1904	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1905
1906	/* X540 and X550 needs to set the MACC.FLU bit to force link up */
1907	switch (adapter->hw.mac.type) {
1908	case ixgbe_mac_X540:
1909	case ixgbe_mac_X550:
1910	case ixgbe_mac_X550EM_x:
1911	case ixgbe_mac_x550em_a:
1912		reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1913		reg_data |= IXGBE_MACC_FLU;
1914		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1915		break;
1916	default:
1917		if (hw->mac.orig_autoc) {
1918			reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1919			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1920		} else {
1921			return 10;
1922		}
1923	}
1924	IXGBE_WRITE_FLUSH(hw);
1925	usleep_range(10000, 20000);
1926
1927	/* Disable Atlas Tx lanes; re-enabled in reset path */
1928	if (hw->mac.type == ixgbe_mac_82598EB) {
1929		u8 atlas;
1930
1931		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1932		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1933		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1934
1935		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1936		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1937		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1938
1939		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1940		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1941		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1942
1943		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1944		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1945		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1946	}
1947
1948	return 0;
1949}
1950
1951static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1952{
1953	u32 reg_data;
1954
1955	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1956	reg_data &= ~IXGBE_HLREG0_LPBK;
1957	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1958}
1959
1960static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1961				      unsigned int frame_size)
1962{
1963	memset(skb->data, 0xFF, frame_size);
1964	frame_size >>= 1;
1965	memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1966	skb->data[frame_size + 10] = 0xBE;
1967	skb->data[frame_size + 12] = 0xAF;
1968}
1969
1970static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1971				     unsigned int frame_size)
1972{
1973	unsigned char *data;
1974	bool match = true;
1975
1976	frame_size >>= 1;
1977
1978	data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1979
1980	if (data[3] != 0xFF ||
1981	    data[frame_size + 10] != 0xBE ||
1982	    data[frame_size + 12] != 0xAF)
1983		match = false;
1984
1985	kunmap(rx_buffer->page);
1986
1987	return match;
1988}
1989
1990static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1991				  struct ixgbe_ring *tx_ring,
1992				  unsigned int size)
1993{
1994	union ixgbe_adv_rx_desc *rx_desc;
1995	u16 rx_ntc, tx_ntc, count = 0;
1996
1997	/* initialize next to clean and descriptor values */
1998	rx_ntc = rx_ring->next_to_clean;
1999	tx_ntc = tx_ring->next_to_clean;
2000	rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2001
2002	while (tx_ntc != tx_ring->next_to_use) {
2003		union ixgbe_adv_tx_desc *tx_desc;
2004		struct ixgbe_tx_buffer *tx_buffer;
2005
2006		tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
2007
2008		/* if DD is not set transmit has not completed */
2009		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2010			return count;
2011
2012		/* unmap buffer on Tx side */
2013		tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2014
2015		/* Free all the Tx ring sk_buffs */
2016		dev_kfree_skb_any(tx_buffer->skb);
2017
2018		/* unmap skb header data */
2019		dma_unmap_single(tx_ring->dev,
2020				 dma_unmap_addr(tx_buffer, dma),
2021				 dma_unmap_len(tx_buffer, len),
2022				 DMA_TO_DEVICE);
2023		dma_unmap_len_set(tx_buffer, len, 0);
2024
2025		/* increment Tx next to clean counter */
2026		tx_ntc++;
2027		if (tx_ntc == tx_ring->count)
2028			tx_ntc = 0;
2029	}
2030
2031	while (rx_desc->wb.upper.length) {
2032		struct ixgbe_rx_buffer *rx_buffer;
2033
2034		/* check Rx buffer */
2035		rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2036
2037		/* sync Rx buffer for CPU read */
2038		dma_sync_single_for_cpu(rx_ring->dev,
2039					rx_buffer->dma,
2040					ixgbe_rx_bufsz(rx_ring),
2041					DMA_FROM_DEVICE);
2042
2043		/* verify contents of skb */
2044		if (ixgbe_check_lbtest_frame(rx_buffer, size))
2045			count++;
2046		else
2047			break;
2048
2049		/* sync Rx buffer for device write */
2050		dma_sync_single_for_device(rx_ring->dev,
2051					   rx_buffer->dma,
2052					   ixgbe_rx_bufsz(rx_ring),
2053					   DMA_FROM_DEVICE);
2054
2055		/* increment Rx next to clean counter */
2056		rx_ntc++;
2057		if (rx_ntc == rx_ring->count)
2058			rx_ntc = 0;
2059
2060		/* fetch next descriptor */
2061		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2062	}
2063
2064	netdev_tx_reset_queue(txring_txq(tx_ring));
2065
2066	/* re-map buffers to ring, store next to clean values */
2067	ixgbe_alloc_rx_buffers(rx_ring, count);
2068	rx_ring->next_to_clean = rx_ntc;
2069	tx_ring->next_to_clean = tx_ntc;
2070
2071	return count;
2072}
2073
2074static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2075{
2076	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2077	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2078	int i, j, lc, good_cnt, ret_val = 0;
2079	unsigned int size = 1024;
2080	netdev_tx_t tx_ret_val;
2081	struct sk_buff *skb;
2082	u32 flags_orig = adapter->flags;
2083
2084	/* DCB can modify the frames on Tx */
2085	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2086
2087	/* allocate test skb */
2088	skb = alloc_skb(size, GFP_KERNEL);
2089	if (!skb)
2090		return 11;
2091
2092	/* place data into test skb */
2093	ixgbe_create_lbtest_frame(skb, size);
2094	skb_put(skb, size);
2095
2096	/*
2097	 * Calculate the loop count based on the largest descriptor ring
2098	 * The idea is to wrap the largest ring a number of times using 64
2099	 * send/receive pairs during each loop
2100	 */
2101
2102	if (rx_ring->count <= tx_ring->count)
2103		lc = ((tx_ring->count / 64) * 2) + 1;
2104	else
2105		lc = ((rx_ring->count / 64) * 2) + 1;
2106
2107	for (j = 0; j <= lc; j++) {
2108		/* reset count of good packets */
2109		good_cnt = 0;
2110
2111		/* place 64 packets on the transmit queue*/
2112		for (i = 0; i < 64; i++) {
2113			skb_get(skb);
2114			tx_ret_val = ixgbe_xmit_frame_ring(skb,
2115							   adapter,
2116							   tx_ring);
2117			if (tx_ret_val == NETDEV_TX_OK)
2118				good_cnt++;
2119		}
2120
2121		if (good_cnt != 64) {
2122			ret_val = 12;
2123			break;
2124		}
2125
2126		/* allow 200 milliseconds for packets to go from Tx to Rx */
2127		msleep(200);
2128
2129		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2130		if (good_cnt != 64) {
2131			ret_val = 13;
2132			break;
2133		}
2134	}
2135
2136	/* free the original skb */
2137	kfree_skb(skb);
2138	adapter->flags = flags_orig;
2139
2140	return ret_val;
2141}
2142
2143static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2144{
2145	*data = ixgbe_setup_desc_rings(adapter);
2146	if (*data)
2147		goto out;
2148	*data = ixgbe_setup_loopback_test(adapter);
2149	if (*data)
2150		goto err_loopback;
2151	*data = ixgbe_run_loopback_test(adapter);
2152	ixgbe_loopback_cleanup(adapter);
2153
2154err_loopback:
2155	ixgbe_free_desc_rings(adapter);
2156out:
2157	return *data;
2158}
2159
2160static void ixgbe_diag_test(struct net_device *netdev,
2161			    struct ethtool_test *eth_test, u64 *data)
2162{
2163	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2164	bool if_running = netif_running(netdev);
2165
2166	if (ixgbe_removed(adapter->hw.hw_addr)) {
2167		e_err(hw, "Adapter removed - test blocked\n");
2168		data[0] = 1;
2169		data[1] = 1;
2170		data[2] = 1;
2171		data[3] = 1;
2172		data[4] = 1;
2173		eth_test->flags |= ETH_TEST_FL_FAILED;
2174		return;
2175	}
2176	set_bit(__IXGBE_TESTING, &adapter->state);
2177	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2178		struct ixgbe_hw *hw = &adapter->hw;
2179
2180		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2181			int i;
2182			for (i = 0; i < adapter->num_vfs; i++) {
2183				if (adapter->vfinfo[i].clear_to_send) {
2184					netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2185					data[0] = 1;
2186					data[1] = 1;
2187					data[2] = 1;
2188					data[3] = 1;
2189					data[4] = 1;
2190					eth_test->flags |= ETH_TEST_FL_FAILED;
2191					clear_bit(__IXGBE_TESTING,
2192						  &adapter->state);
2193					return;
2194				}
2195			}
2196		}
2197
2198		/* Offline tests */
2199		e_info(hw, "offline testing starting\n");
2200
2201		/* Link test performed before hardware reset so autoneg doesn't
2202		 * interfere with test result
2203		 */
2204		if (ixgbe_link_test(adapter, &data[4]))
2205			eth_test->flags |= ETH_TEST_FL_FAILED;
2206
2207		if (if_running)
2208			/* indicate we're in test mode */
2209			ixgbe_close(netdev);
2210		else
2211			ixgbe_reset(adapter);
2212
2213		e_info(hw, "register testing starting\n");
2214		if (ixgbe_reg_test(adapter, &data[0]))
2215			eth_test->flags |= ETH_TEST_FL_FAILED;
2216
2217		ixgbe_reset(adapter);
2218		e_info(hw, "eeprom testing starting\n");
2219		if (ixgbe_eeprom_test(adapter, &data[1]))
2220			eth_test->flags |= ETH_TEST_FL_FAILED;
2221
2222		ixgbe_reset(adapter);
2223		e_info(hw, "interrupt testing starting\n");
2224		if (ixgbe_intr_test(adapter, &data[2]))
2225			eth_test->flags |= ETH_TEST_FL_FAILED;
2226
2227		/* If SRIOV or VMDq is enabled then skip MAC
2228		 * loopback diagnostic. */
2229		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2230				      IXGBE_FLAG_VMDQ_ENABLED)) {
2231			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2232			data[3] = 0;
2233			goto skip_loopback;
2234		}
2235
2236		ixgbe_reset(adapter);
2237		e_info(hw, "loopback testing starting\n");
2238		if (ixgbe_loopback_test(adapter, &data[3]))
2239			eth_test->flags |= ETH_TEST_FL_FAILED;
2240
2241skip_loopback:
2242		ixgbe_reset(adapter);
2243
2244		/* clear testing bit and return adapter to previous state */
2245		clear_bit(__IXGBE_TESTING, &adapter->state);
2246		if (if_running)
2247			ixgbe_open(netdev);
2248		else if (hw->mac.ops.disable_tx_laser)
2249			hw->mac.ops.disable_tx_laser(hw);
2250	} else {
2251		e_info(hw, "online testing starting\n");
2252
2253		/* Online tests */
2254		if (ixgbe_link_test(adapter, &data[4]))
2255			eth_test->flags |= ETH_TEST_FL_FAILED;
2256
2257		/* Offline tests aren't run; pass by default */
2258		data[0] = 0;
2259		data[1] = 0;
2260		data[2] = 0;
2261		data[3] = 0;
2262
2263		clear_bit(__IXGBE_TESTING, &adapter->state);
2264	}
2265}
2266
2267static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2268			       struct ethtool_wolinfo *wol)
2269{
2270	struct ixgbe_hw *hw = &adapter->hw;
2271	int retval = 0;
2272
2273	/* WOL not supported for all devices */
2274	if (!ixgbe_wol_supported(adapter, hw->device_id,
2275				 hw->subsystem_device_id)) {
2276		retval = 1;
2277		wol->supported = 0;
2278	}
2279
2280	return retval;
2281}
2282
2283static void ixgbe_get_wol(struct net_device *netdev,
2284			  struct ethtool_wolinfo *wol)
2285{
2286	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2287
2288	wol->supported = WAKE_UCAST | WAKE_MCAST |
2289			 WAKE_BCAST | WAKE_MAGIC;
2290	wol->wolopts = 0;
2291
2292	if (ixgbe_wol_exclusion(adapter, wol) ||
2293	    !device_can_wakeup(&adapter->pdev->dev))
2294		return;
2295
2296	if (adapter->wol & IXGBE_WUFC_EX)
2297		wol->wolopts |= WAKE_UCAST;
2298	if (adapter->wol & IXGBE_WUFC_MC)
2299		wol->wolopts |= WAKE_MCAST;
2300	if (adapter->wol & IXGBE_WUFC_BC)
2301		wol->wolopts |= WAKE_BCAST;
2302	if (adapter->wol & IXGBE_WUFC_MAG)
2303		wol->wolopts |= WAKE_MAGIC;
2304}
2305
2306static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2307{
2308	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2309
2310	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2311			    WAKE_FILTER))
2312		return -EOPNOTSUPP;
2313
2314	if (ixgbe_wol_exclusion(adapter, wol))
2315		return wol->wolopts ? -EOPNOTSUPP : 0;
2316
2317	adapter->wol = 0;
2318
2319	if (wol->wolopts & WAKE_UCAST)
2320		adapter->wol |= IXGBE_WUFC_EX;
2321	if (wol->wolopts & WAKE_MCAST)
2322		adapter->wol |= IXGBE_WUFC_MC;
2323	if (wol->wolopts & WAKE_BCAST)
2324		adapter->wol |= IXGBE_WUFC_BC;
2325	if (wol->wolopts & WAKE_MAGIC)
2326		adapter->wol |= IXGBE_WUFC_MAG;
2327
2328	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2329
2330	return 0;
2331}
2332
2333static int ixgbe_nway_reset(struct net_device *netdev)
2334{
2335	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2336
2337	if (netif_running(netdev))
2338		ixgbe_reinit_locked(adapter);
2339
2340	return 0;
2341}
2342
2343static int ixgbe_set_phys_id(struct net_device *netdev,
2344			     enum ethtool_phys_id_state state)
2345{
2346	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2347	struct ixgbe_hw *hw = &adapter->hw;
2348
2349	if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2350		return -EOPNOTSUPP;
2351
2352	switch (state) {
2353	case ETHTOOL_ID_ACTIVE:
2354		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2355		return 2;
2356
2357	case ETHTOOL_ID_ON:
2358		hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2359		break;
2360
2361	case ETHTOOL_ID_OFF:
2362		hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2363		break;
2364
2365	case ETHTOOL_ID_INACTIVE:
2366		/* Restore LED settings */
2367		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2368		break;
2369	}
2370
2371	return 0;
2372}
2373
2374static int ixgbe_get_coalesce(struct net_device *netdev,
2375			      struct ethtool_coalesce *ec)
2376{
2377	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2378
2379	/* only valid if in constant ITR mode */
2380	if (adapter->rx_itr_setting <= 1)
2381		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2382	else
2383		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2384
2385	/* if in mixed tx/rx queues per vector mode, report only rx settings */
2386	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2387		return 0;
2388
2389	/* only valid if in constant ITR mode */
2390	if (adapter->tx_itr_setting <= 1)
2391		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2392	else
2393		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2394
2395	return 0;
2396}
2397
2398/*
2399 * this function must be called before setting the new value of
2400 * rx_itr_setting
2401 */
2402static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2403{
2404	struct net_device *netdev = adapter->netdev;
2405
2406	/* nothing to do if LRO or RSC are not enabled */
2407	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2408	    !(netdev->features & NETIF_F_LRO))
2409		return false;
2410
2411	/* check the feature flag value and enable RSC if necessary */
2412	if (adapter->rx_itr_setting == 1 ||
2413	    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2414		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2415			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2416			e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2417			return true;
2418		}
2419	/* if interrupt rate is too high then disable RSC */
2420	} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2421		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2422		e_info(probe, "rx-usecs set too low, disabling RSC\n");
2423		return true;
2424	}
2425	return false;
2426}
2427
2428static int ixgbe_set_coalesce(struct net_device *netdev,
2429			      struct ethtool_coalesce *ec)
2430{
2431	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2432	struct ixgbe_q_vector *q_vector;
2433	int i;
2434	u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2435	bool need_reset = false;
2436
2437	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2438		/* reject Tx specific changes in case of mixed RxTx vectors */
2439		if (ec->tx_coalesce_usecs)
2440			return -EINVAL;
2441		tx_itr_prev = adapter->rx_itr_setting;
2442	} else {
2443		tx_itr_prev = adapter->tx_itr_setting;
2444	}
2445
2446	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2447	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2448		return -EINVAL;
2449
2450	if (ec->rx_coalesce_usecs > 1)
2451		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2452	else
2453		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2454
2455	if (adapter->rx_itr_setting == 1)
2456		rx_itr_param = IXGBE_20K_ITR;
2457	else
2458		rx_itr_param = adapter->rx_itr_setting;
2459
2460	if (ec->tx_coalesce_usecs > 1)
2461		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2462	else
2463		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2464
2465	if (adapter->tx_itr_setting == 1)
2466		tx_itr_param = IXGBE_12K_ITR;
2467	else
2468		tx_itr_param = adapter->tx_itr_setting;
2469
2470	/* mixed Rx/Tx */
2471	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2472		adapter->tx_itr_setting = adapter->rx_itr_setting;
2473
2474	/* detect ITR changes that require update of TXDCTL.WTHRESH */
2475	if ((adapter->tx_itr_setting != 1) &&
2476	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2477		if ((tx_itr_prev == 1) ||
2478		    (tx_itr_prev >= IXGBE_100K_ITR))
2479			need_reset = true;
2480	} else {
2481		if ((tx_itr_prev != 1) &&
2482		    (tx_itr_prev < IXGBE_100K_ITR))
2483			need_reset = true;
2484	}
2485
2486	/* check the old value and enable RSC if necessary */
2487	need_reset |= ixgbe_update_rsc(adapter);
2488
2489	for (i = 0; i < adapter->num_q_vectors; i++) {
2490		q_vector = adapter->q_vector[i];
2491		if (q_vector->tx.count && !q_vector->rx.count)
2492			/* tx only */
2493			q_vector->itr = tx_itr_param;
2494		else
2495			/* rx only or mixed */
2496			q_vector->itr = rx_itr_param;
2497		ixgbe_write_eitr(q_vector);
2498	}
2499
2500	/*
2501	 * do reset here at the end to make sure EITR==0 case is handled
2502	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2503	 * also locks in RSC enable/disable which requires reset
2504	 */
2505	if (need_reset)
2506		ixgbe_do_reset(netdev);
2507
2508	return 0;
2509}
2510
2511static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2512					struct ethtool_rxnfc *cmd)
2513{
2514	union ixgbe_atr_input *mask = &adapter->fdir_mask;
2515	struct ethtool_rx_flow_spec *fsp =
2516		(struct ethtool_rx_flow_spec *)&cmd->fs;
2517	struct hlist_node *node2;
2518	struct ixgbe_fdir_filter *rule = NULL;
2519
2520	/* report total rule count */
2521	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2522
2523	hlist_for_each_entry_safe(rule, node2,
2524				  &adapter->fdir_filter_list, fdir_node) {
2525		if (fsp->location <= rule->sw_idx)
2526			break;
2527	}
2528
2529	if (!rule || fsp->location != rule->sw_idx)
2530		return -EINVAL;
2531
2532	/* fill out the flow spec entry */
2533
2534	/* set flow type field */
2535	switch (rule->filter.formatted.flow_type) {
2536	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2537		fsp->flow_type = TCP_V4_FLOW;
2538		break;
2539	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2540		fsp->flow_type = UDP_V4_FLOW;
2541		break;
2542	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2543		fsp->flow_type = SCTP_V4_FLOW;
2544		break;
2545	case IXGBE_ATR_FLOW_TYPE_IPV4:
2546		fsp->flow_type = IP_USER_FLOW;
2547		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2548		fsp->h_u.usr_ip4_spec.proto = 0;
2549		fsp->m_u.usr_ip4_spec.proto = 0;
2550		break;
2551	default:
2552		return -EINVAL;
2553	}
2554
2555	fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2556	fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2557	fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2558	fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2559	fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2560	fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2561	fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2562	fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2563	fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2564	fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2565	fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2566	fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2567	fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2568	fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2569	fsp->flow_type |= FLOW_EXT;
2570
2571	/* record action */
2572	if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2573		fsp->ring_cookie = RX_CLS_FLOW_DISC;
2574	else
2575		fsp->ring_cookie = rule->action;
2576
2577	return 0;
2578}
2579
2580static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2581				      struct ethtool_rxnfc *cmd,
2582				      u32 *rule_locs)
2583{
2584	struct hlist_node *node2;
2585	struct ixgbe_fdir_filter *rule;
2586	int cnt = 0;
2587
2588	/* report total rule count */
2589	cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2590
2591	hlist_for_each_entry_safe(rule, node2,
2592				  &adapter->fdir_filter_list, fdir_node) {
2593		if (cnt == cmd->rule_cnt)
2594			return -EMSGSIZE;
2595		rule_locs[cnt] = rule->sw_idx;
2596		cnt++;
2597	}
2598
2599	cmd->rule_cnt = cnt;
2600
2601	return 0;
2602}
2603
2604static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2605				   struct ethtool_rxnfc *cmd)
2606{
2607	cmd->data = 0;
2608
2609	/* Report default options for RSS on ixgbe */
2610	switch (cmd->flow_type) {
2611	case TCP_V4_FLOW:
2612		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2613		fallthrough;
2614	case UDP_V4_FLOW:
2615		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2616			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2617		fallthrough;
2618	case SCTP_V4_FLOW:
2619	case AH_ESP_V4_FLOW:
2620	case AH_V4_FLOW:
2621	case ESP_V4_FLOW:
2622	case IPV4_FLOW:
2623		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2624		break;
2625	case TCP_V6_FLOW:
2626		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2627		fallthrough;
2628	case UDP_V6_FLOW:
2629		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2630			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2631		fallthrough;
2632	case SCTP_V6_FLOW:
2633	case AH_ESP_V6_FLOW:
2634	case AH_V6_FLOW:
2635	case ESP_V6_FLOW:
2636	case IPV6_FLOW:
2637		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2638		break;
2639	default:
2640		return -EINVAL;
2641	}
2642
2643	return 0;
2644}
2645
2646static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2647{
2648	if (adapter->hw.mac.type < ixgbe_mac_X550)
2649		return 16;
2650	else
2651		return 64;
2652}
2653
2654static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2655			   u32 *rule_locs)
2656{
2657	struct ixgbe_adapter *adapter = netdev_priv(dev);
2658	int ret = -EOPNOTSUPP;
2659
2660	switch (cmd->cmd) {
2661	case ETHTOOL_GRXRINGS:
2662		cmd->data = min_t(int, adapter->num_rx_queues,
2663				  ixgbe_rss_indir_tbl_max(adapter));
2664		ret = 0;
2665		break;
2666	case ETHTOOL_GRXCLSRLCNT:
2667		cmd->rule_cnt = adapter->fdir_filter_count;
2668		ret = 0;
2669		break;
2670	case ETHTOOL_GRXCLSRULE:
2671		ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2672		break;
2673	case ETHTOOL_GRXCLSRLALL:
2674		ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2675		break;
2676	case ETHTOOL_GRXFH:
2677		ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2678		break;
2679	default:
2680		break;
2681	}
2682
2683	return ret;
2684}
2685
2686int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2687				    struct ixgbe_fdir_filter *input,
2688				    u16 sw_idx)
2689{
2690	struct ixgbe_hw *hw = &adapter->hw;
2691	struct hlist_node *node2;
2692	struct ixgbe_fdir_filter *rule, *parent;
2693	int err = -EINVAL;
2694
2695	parent = NULL;
2696	rule = NULL;
2697
2698	hlist_for_each_entry_safe(rule, node2,
2699				  &adapter->fdir_filter_list, fdir_node) {
2700		/* hash found, or no matching entry */
2701		if (rule->sw_idx >= sw_idx)
2702			break;
2703		parent = rule;
2704	}
2705
2706	/* if there is an old rule occupying our place remove it */
2707	if (rule && (rule->sw_idx == sw_idx)) {
2708		if (!input || (rule->filter.formatted.bkt_hash !=
2709			       input->filter.formatted.bkt_hash)) {
2710			err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2711								&rule->filter,
2712								sw_idx);
2713		}
2714
2715		hlist_del(&rule->fdir_node);
2716		kfree(rule);
2717		adapter->fdir_filter_count--;
2718	}
2719
2720	/*
2721	 * If no input this was a delete, err should be 0 if a rule was
2722	 * successfully found and removed from the list else -EINVAL
2723	 */
2724	if (!input)
2725		return err;
2726
2727	/* initialize node and set software index */
2728	INIT_HLIST_NODE(&input->fdir_node);
2729
2730	/* add filter to the list */
2731	if (parent)
2732		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2733	else
2734		hlist_add_head(&input->fdir_node,
2735			       &adapter->fdir_filter_list);
2736
2737	/* update counts */
2738	adapter->fdir_filter_count++;
2739
2740	return 0;
2741}
2742
2743static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2744				       u8 *flow_type)
2745{
2746	switch (fsp->flow_type & ~FLOW_EXT) {
2747	case TCP_V4_FLOW:
2748		*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2749		break;
2750	case UDP_V4_FLOW:
2751		*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2752		break;
2753	case SCTP_V4_FLOW:
2754		*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2755		break;
2756	case IP_USER_FLOW:
2757		switch (fsp->h_u.usr_ip4_spec.proto) {
2758		case IPPROTO_TCP:
2759			*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2760			break;
2761		case IPPROTO_UDP:
2762			*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2763			break;
2764		case IPPROTO_SCTP:
2765			*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2766			break;
2767		case 0:
2768			if (!fsp->m_u.usr_ip4_spec.proto) {
2769				*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2770				break;
2771			}
2772			fallthrough;
2773		default:
2774			return 0;
2775		}
2776		break;
2777	default:
2778		return 0;
2779	}
2780
2781	return 1;
2782}
2783
2784static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2785					struct ethtool_rxnfc *cmd)
2786{
2787	struct ethtool_rx_flow_spec *fsp =
2788		(struct ethtool_rx_flow_spec *)&cmd->fs;
2789	struct ixgbe_hw *hw = &adapter->hw;
2790	struct ixgbe_fdir_filter *input;
2791	union ixgbe_atr_input mask;
2792	u8 queue;
2793	int err;
2794
2795	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2796		return -EOPNOTSUPP;
2797
2798	/* ring_cookie is a masked into a set of queues and ixgbe pools or
2799	 * we use the drop index.
2800	 */
2801	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2802		queue = IXGBE_FDIR_DROP_QUEUE;
2803	} else {
2804		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2805		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2806
2807		if (!vf && (ring >= adapter->num_rx_queues))
2808			return -EINVAL;
2809		else if (vf &&
2810			 ((vf > adapter->num_vfs) ||
2811			   ring >= adapter->num_rx_queues_per_pool))
2812			return -EINVAL;
2813
2814		/* Map the ring onto the absolute queue index */
2815		if (!vf)
2816			queue = adapter->rx_ring[ring]->reg_idx;
2817		else
2818			queue = ((vf - 1) *
2819				adapter->num_rx_queues_per_pool) + ring;
2820	}
2821
2822	/* Don't allow indexes to exist outside of available space */
2823	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2824		e_err(drv, "Location out of range\n");
2825		return -EINVAL;
2826	}
2827
2828	input = kzalloc(sizeof(*input), GFP_ATOMIC);
2829	if (!input)
2830		return -ENOMEM;
2831
2832	memset(&mask, 0, sizeof(union ixgbe_atr_input));
2833
2834	/* set SW index */
2835	input->sw_idx = fsp->location;
2836
2837	/* record flow type */
2838	if (!ixgbe_flowspec_to_flow_type(fsp,
2839					 &input->filter.formatted.flow_type)) {
2840		e_err(drv, "Unrecognized flow type\n");
2841		goto err_out;
2842	}
2843
2844	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2845				   IXGBE_ATR_L4TYPE_MASK;
2846
2847	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2848		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2849
2850	/* Copy input into formatted structures */
2851	input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2852	mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2853	input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2854	mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2855	input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2856	mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2857	input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2858	mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2859
2860	if (fsp->flow_type & FLOW_EXT) {
2861		input->filter.formatted.vm_pool =
2862				(unsigned char)ntohl(fsp->h_ext.data[1]);
2863		mask.formatted.vm_pool =
2864				(unsigned char)ntohl(fsp->m_ext.data[1]);
2865		input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2866		mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2867		input->filter.formatted.flex_bytes =
2868						fsp->h_ext.vlan_etype;
2869		mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2870	}
2871
2872	/* determine if we need to drop or route the packet */
2873	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2874		input->action = IXGBE_FDIR_DROP_QUEUE;
2875	else
2876		input->action = fsp->ring_cookie;
2877
2878	spin_lock(&adapter->fdir_perfect_lock);
2879
2880	if (hlist_empty(&adapter->fdir_filter_list)) {
2881		/* save mask and program input mask into HW */
2882		memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2883		err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2884		if (err) {
2885			e_err(drv, "Error writing mask\n");
2886			goto err_out_w_lock;
2887		}
2888	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2889		e_err(drv, "Only one mask supported per port\n");
2890		goto err_out_w_lock;
2891	}
2892
2893	/* apply mask and compute/store hash */
2894	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2895
2896	/* program filters to filter memory */
2897	err = ixgbe_fdir_write_perfect_filter_82599(hw,
2898				&input->filter, input->sw_idx, queue);
2899	if (err)
2900		goto err_out_w_lock;
2901
2902	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2903
2904	spin_unlock(&adapter->fdir_perfect_lock);
2905
2906	return err;
2907err_out_w_lock:
2908	spin_unlock(&adapter->fdir_perfect_lock);
2909err_out:
2910	kfree(input);
2911	return -EINVAL;
2912}
2913
2914static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2915					struct ethtool_rxnfc *cmd)
2916{
2917	struct ethtool_rx_flow_spec *fsp =
2918		(struct ethtool_rx_flow_spec *)&cmd->fs;
2919	int err;
2920
2921	spin_lock(&adapter->fdir_perfect_lock);
2922	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2923	spin_unlock(&adapter->fdir_perfect_lock);
2924
2925	return err;
2926}
2927
2928#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2929		       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2930static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2931				  struct ethtool_rxnfc *nfc)
2932{
2933	u32 flags2 = adapter->flags2;
2934
2935	/*
2936	 * RSS does not support anything other than hashing
2937	 * to queues on src and dst IPs and ports
2938	 */
2939	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2940			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
2941		return -EINVAL;
2942
2943	switch (nfc->flow_type) {
2944	case TCP_V4_FLOW:
2945	case TCP_V6_FLOW:
2946		if (!(nfc->data & RXH_IP_SRC) ||
2947		    !(nfc->data & RXH_IP_DST) ||
2948		    !(nfc->data & RXH_L4_B_0_1) ||
2949		    !(nfc->data & RXH_L4_B_2_3))
2950			return -EINVAL;
2951		break;
2952	case UDP_V4_FLOW:
2953		if (!(nfc->data & RXH_IP_SRC) ||
2954		    !(nfc->data & RXH_IP_DST))
2955			return -EINVAL;
2956		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2957		case 0:
2958			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2959			break;
2960		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2961			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2962			break;
2963		default:
2964			return -EINVAL;
2965		}
2966		break;
2967	case UDP_V6_FLOW:
2968		if (!(nfc->data & RXH_IP_SRC) ||
2969		    !(nfc->data & RXH_IP_DST))
2970			return -EINVAL;
2971		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2972		case 0:
2973			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2974			break;
2975		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2976			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2977			break;
2978		default:
2979			return -EINVAL;
2980		}
2981		break;
2982	case AH_ESP_V4_FLOW:
2983	case AH_V4_FLOW:
2984	case ESP_V4_FLOW:
2985	case SCTP_V4_FLOW:
2986	case AH_ESP_V6_FLOW:
2987	case AH_V6_FLOW:
2988	case ESP_V6_FLOW:
2989	case SCTP_V6_FLOW:
2990		if (!(nfc->data & RXH_IP_SRC) ||
2991		    !(nfc->data & RXH_IP_DST) ||
2992		    (nfc->data & RXH_L4_B_0_1) ||
2993		    (nfc->data & RXH_L4_B_2_3))
2994			return -EINVAL;
2995		break;
2996	default:
2997		return -EINVAL;
2998	}
2999
3000	/* if we changed something we need to update flags */
3001	if (flags2 != adapter->flags2) {
3002		struct ixgbe_hw *hw = &adapter->hw;
3003		u32 mrqc;
3004		unsigned int pf_pool = adapter->num_vfs;
3005
3006		if ((hw->mac.type >= ixgbe_mac_X550) &&
3007		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3008			mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
3009		else
3010			mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3011
3012		if ((flags2 & UDP_RSS_FLAGS) &&
3013		    !(adapter->flags2 & UDP_RSS_FLAGS))
3014			e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3015
3016		adapter->flags2 = flags2;
3017
3018		/* Perform hash on these packet types */
3019		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
3020		      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3021		      | IXGBE_MRQC_RSS_FIELD_IPV6
3022		      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3023
3024		mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3025			  IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3026
3027		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3028			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3029
3030		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3031			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3032
3033		if ((hw->mac.type >= ixgbe_mac_X550) &&
3034		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3035			IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3036		else
3037			IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3038	}
3039
3040	return 0;
3041}
3042
3043static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3044{
3045	struct ixgbe_adapter *adapter = netdev_priv(dev);
3046	int ret = -EOPNOTSUPP;
3047
3048	switch (cmd->cmd) {
3049	case ETHTOOL_SRXCLSRLINS:
3050		ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3051		break;
3052	case ETHTOOL_SRXCLSRLDEL:
3053		ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3054		break;
3055	case ETHTOOL_SRXFH:
3056		ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3057		break;
3058	default:
3059		break;
3060	}
3061
3062	return ret;
3063}
3064
3065static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3066{
3067	return IXGBE_RSS_KEY_SIZE;
3068}
3069
3070static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3071{
3072	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3073
3074	return ixgbe_rss_indir_tbl_entries(adapter);
3075}
3076
3077static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3078{
3079	int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3080	u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3081
3082	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3083		rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3084
3085	for (i = 0; i < reta_size; i++)
3086		indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3087}
3088
3089static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3090			  u8 *hfunc)
3091{
3092	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3093
3094	if (hfunc)
3095		*hfunc = ETH_RSS_HASH_TOP;
3096
3097	if (indir)
3098		ixgbe_get_reta(adapter, indir);
3099
3100	if (key)
3101		memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3102
3103	return 0;
3104}
3105
3106static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3107			  const u8 *key, const u8 hfunc)
3108{
3109	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3110	int i;
3111	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3112
3113	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3114		return -EOPNOTSUPP;
3115
3116	/* Fill out the redirection table */
3117	if (indir) {
3118		int max_queues = min_t(int, adapter->num_rx_queues,
3119				       ixgbe_rss_indir_tbl_max(adapter));
3120
3121		/*Allow at least 2 queues w/ SR-IOV.*/
3122		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3123		    (max_queues < 2))
3124			max_queues = 2;
3125
3126		/* Verify user input. */
3127		for (i = 0; i < reta_entries; i++)
3128			if (indir[i] >= max_queues)
3129				return -EINVAL;
3130
3131		for (i = 0; i < reta_entries; i++)
3132			adapter->rss_indir_tbl[i] = indir[i];
3133
3134		ixgbe_store_reta(adapter);
3135	}
3136
3137	/* Fill out the rss hash key */
3138	if (key) {
3139		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3140		ixgbe_store_key(adapter);
3141	}
3142
3143	return 0;
3144}
3145
3146static int ixgbe_get_ts_info(struct net_device *dev,
3147			     struct ethtool_ts_info *info)
3148{
3149	struct ixgbe_adapter *adapter = netdev_priv(dev);
3150
3151	/* we always support timestamping disabled */
3152	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3153
3154	switch (adapter->hw.mac.type) {
3155	case ixgbe_mac_X550:
3156	case ixgbe_mac_X550EM_x:
3157	case ixgbe_mac_x550em_a:
3158		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3159		break;
3160	case ixgbe_mac_X540:
3161	case ixgbe_mac_82599EB:
3162		info->rx_filters |=
3163			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3164			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3165			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3166		break;
3167	default:
3168		return ethtool_op_get_ts_info(dev, info);
3169	}
3170
3171	info->so_timestamping =
3172		SOF_TIMESTAMPING_TX_SOFTWARE |
3173		SOF_TIMESTAMPING_RX_SOFTWARE |
3174		SOF_TIMESTAMPING_SOFTWARE |
3175		SOF_TIMESTAMPING_TX_HARDWARE |
3176		SOF_TIMESTAMPING_RX_HARDWARE |
3177		SOF_TIMESTAMPING_RAW_HARDWARE;
3178
3179	if (adapter->ptp_clock)
3180		info->phc_index = ptp_clock_index(adapter->ptp_clock);
3181	else
3182		info->phc_index = -1;
3183
3184	info->tx_types =
3185		BIT(HWTSTAMP_TX_OFF) |
3186		BIT(HWTSTAMP_TX_ON);
3187
3188	return 0;
3189}
3190
3191static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3192{
3193	unsigned int max_combined;
3194	u8 tcs = adapter->hw_tcs;
3195
3196	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3197		/* We only support one q_vector without MSI-X */
3198		max_combined = 1;
3199	} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3200		/* Limit value based on the queue mask */
3201		max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3202	} else if (tcs > 1) {
3203		/* For DCB report channels per traffic class */
3204		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3205			/* 8 TC w/ 4 queues per TC */
3206			max_combined = 4;
3207		} else if (tcs > 4) {
3208			/* 8 TC w/ 8 queues per TC */
3209			max_combined = 8;
3210		} else {
3211			/* 4 TC w/ 16 queues per TC */
3212			max_combined = 16;
3213		}
3214	} else if (adapter->atr_sample_rate) {
3215		/* support up to 64 queues with ATR */
3216		max_combined = IXGBE_MAX_FDIR_INDICES;
3217	} else {
3218		/* support up to 16 queues with RSS */
3219		max_combined = ixgbe_max_rss_indices(adapter);
3220	}
3221
3222	return min_t(int, max_combined, num_online_cpus());
3223}
3224
3225static void ixgbe_get_channels(struct net_device *dev,
3226			       struct ethtool_channels *ch)
3227{
3228	struct ixgbe_adapter *adapter = netdev_priv(dev);
3229
3230	/* report maximum channels */
3231	ch->max_combined = ixgbe_max_channels(adapter);
3232
3233	/* report info for other vector */
3234	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3235		ch->max_other = NON_Q_VECTORS;
3236		ch->other_count = NON_Q_VECTORS;
3237	}
3238
3239	/* record RSS queues */
3240	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3241
3242	/* nothing else to report if RSS is disabled */
3243	if (ch->combined_count == 1)
3244		return;
3245
3246	/* we do not support ATR queueing if SR-IOV is enabled */
3247	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3248		return;
3249
3250	/* same thing goes for being DCB enabled */
3251	if (adapter->hw_tcs > 1)
3252		return;
3253
3254	/* if ATR is disabled we can exit */
3255	if (!adapter->atr_sample_rate)
3256		return;
3257
3258	/* report flow director queues as maximum channels */
3259	ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3260}
3261
3262static int ixgbe_set_channels(struct net_device *dev,
3263			      struct ethtool_channels *ch)
3264{
3265	struct ixgbe_adapter *adapter = netdev_priv(dev);
3266	unsigned int count = ch->combined_count;
3267	u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3268
3269	/* verify they are not requesting separate vectors */
3270	if (!count || ch->rx_count || ch->tx_count)
3271		return -EINVAL;
3272
3273	/* verify other_count has not changed */
3274	if (ch->other_count != NON_Q_VECTORS)
3275		return -EINVAL;
3276
3277	/* verify the number of channels does not exceed hardware limits */
3278	if (count > ixgbe_max_channels(adapter))
3279		return -EINVAL;
3280
3281	/* update feature limits from largest to smallest supported values */
3282	adapter->ring_feature[RING_F_FDIR].limit = count;
3283
3284	/* cap RSS limit */
3285	if (count > max_rss_indices)
3286		count = max_rss_indices;
3287	adapter->ring_feature[RING_F_RSS].limit = count;
3288
3289#ifdef IXGBE_FCOE
3290	/* cap FCoE limit at 8 */
3291	if (count > IXGBE_FCRETA_SIZE)
3292		count = IXGBE_FCRETA_SIZE;
3293	adapter->ring_feature[RING_F_FCOE].limit = count;
3294
3295#endif
3296	/* use setup TC to update any traffic class queue mapping */
3297	return ixgbe_setup_tc(dev, adapter->hw_tcs);
3298}
3299
3300static int ixgbe_get_module_info(struct net_device *dev,
3301				       struct ethtool_modinfo *modinfo)
3302{
3303	struct ixgbe_adapter *adapter = netdev_priv(dev);
3304	struct ixgbe_hw *hw = &adapter->hw;
3305	s32 status;
3306	u8 sff8472_rev, addr_mode;
3307	bool page_swap = false;
3308
3309	if (hw->phy.type == ixgbe_phy_fw)
3310		return -ENXIO;
3311
3312	/* Check whether we support SFF-8472 or not */
3313	status = hw->phy.ops.read_i2c_eeprom(hw,
3314					     IXGBE_SFF_SFF_8472_COMP,
3315					     &sff8472_rev);
3316	if (status)
3317		return -EIO;
3318
3319	/* addressing mode is not supported */
3320	status = hw->phy.ops.read_i2c_eeprom(hw,
3321					     IXGBE_SFF_SFF_8472_SWAP,
3322					     &addr_mode);
3323	if (status)
3324		return -EIO;
3325
3326	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3327		e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3328		page_swap = true;
3329	}
3330
3331	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3332	    !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3333		/* We have a SFP, but it does not support SFF-8472 */
3334		modinfo->type = ETH_MODULE_SFF_8079;
3335		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3336	} else {
3337		/* We have a SFP which supports a revision of SFF-8472. */
3338		modinfo->type = ETH_MODULE_SFF_8472;
3339		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3340	}
3341
3342	return 0;
3343}
3344
3345static int ixgbe_get_module_eeprom(struct net_device *dev,
3346					 struct ethtool_eeprom *ee,
3347					 u8 *data)
3348{
3349	struct ixgbe_adapter *adapter = netdev_priv(dev);
3350	struct ixgbe_hw *hw = &adapter->hw;
3351	s32 status = -EFAULT;
3352	u8 databyte = 0xFF;
3353	int i = 0;
3354
3355	if (ee->len == 0)
3356		return -EINVAL;
3357
3358	if (hw->phy.type == ixgbe_phy_fw)
3359		return -ENXIO;
3360
3361	for (i = ee->offset; i < ee->offset + ee->len; i++) {
3362		/* I2C reads can take long time */
3363		if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3364			return -EBUSY;
3365
3366		if (i < ETH_MODULE_SFF_8079_LEN)
3367			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3368		else
3369			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3370
3371		if (status)
3372			return -EIO;
3373
3374		data[i - ee->offset] = databyte;
3375	}
3376
3377	return 0;
3378}
3379
3380static const struct {
3381	ixgbe_link_speed mac_speed;
3382	u32 supported;
3383} ixgbe_ls_map[] = {
3384	{ IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3385	{ IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3386	{ IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3387	{ IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3388	{ IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3389};
3390
3391static const struct {
3392	u32 lp_advertised;
3393	u32 mac_speed;
3394} ixgbe_lp_map[] = {
3395	{ FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3396	{ FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3397	{ FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3398	{ FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3399	{ FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3400	{ FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3401};
3402
3403static int
3404ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3405{
3406	u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3407	struct ixgbe_hw *hw = &adapter->hw;
3408	s32 rc;
3409	u16 i;
3410
3411	rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3412	if (rc)
3413		return rc;
3414
3415	edata->lp_advertised = 0;
3416	for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3417		if (info[0] & ixgbe_lp_map[i].lp_advertised)
3418			edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3419	}
3420
3421	edata->supported = 0;
3422	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3423		if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3424			edata->supported |= ixgbe_ls_map[i].supported;
3425	}
3426
3427	edata->advertised = 0;
3428	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3429		if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3430			edata->advertised |= ixgbe_ls_map[i].supported;
3431	}
3432
3433	edata->eee_enabled = !!edata->advertised;
3434	edata->tx_lpi_enabled = edata->eee_enabled;
3435	if (edata->advertised & edata->lp_advertised)
3436		edata->eee_active = true;
3437
3438	return 0;
3439}
3440
3441static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3442{
3443	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3444	struct ixgbe_hw *hw = &adapter->hw;
3445
3446	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3447		return -EOPNOTSUPP;
3448
3449	if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3450		return ixgbe_get_eee_fw(adapter, edata);
3451
3452	return -EOPNOTSUPP;
3453}
3454
3455static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3456{
3457	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3458	struct ixgbe_hw *hw = &adapter->hw;
3459	struct ethtool_eee eee_data;
3460	s32 ret_val;
3461
3462	if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3463		return -EOPNOTSUPP;
3464
3465	memset(&eee_data, 0, sizeof(struct ethtool_eee));
3466
3467	ret_val = ixgbe_get_eee(netdev, &eee_data);
3468	if (ret_val)
3469		return ret_val;
3470
3471	if (eee_data.eee_enabled && !edata->eee_enabled) {
3472		if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3473			e_err(drv, "Setting EEE tx-lpi is not supported\n");
3474			return -EINVAL;
3475		}
3476
3477		if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3478			e_err(drv,
3479			      "Setting EEE Tx LPI timer is not supported\n");
3480			return -EINVAL;
3481		}
3482
3483		if (eee_data.advertised != edata->advertised) {
3484			e_err(drv,
3485			      "Setting EEE advertised speeds is not supported\n");
3486			return -EINVAL;
3487		}
3488	}
3489
3490	if (eee_data.eee_enabled != edata->eee_enabled) {
3491		if (edata->eee_enabled) {
3492			adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3493			hw->phy.eee_speeds_advertised =
3494						   hw->phy.eee_speeds_supported;
3495		} else {
3496			adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3497			hw->phy.eee_speeds_advertised = 0;
3498		}
3499
3500		/* reset link */
3501		if (netif_running(netdev))
3502			ixgbe_reinit_locked(adapter);
3503		else
3504			ixgbe_reset(adapter);
3505	}
3506
3507	return 0;
3508}
3509
3510static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3511{
3512	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3513	u32 priv_flags = 0;
3514
3515	if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3516		priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3517
3518	if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3519		priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3520
3521	if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
3522		priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
3523
3524	return priv_flags;
3525}
3526
3527static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3528{
3529	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3530	unsigned int flags2 = adapter->flags2;
3531	unsigned int i;
3532
3533	flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3534	if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3535		flags2 |= IXGBE_FLAG2_RX_LEGACY;
3536
3537	flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3538	if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3539		flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3540
3541	flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
3542	if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
3543		if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3544			/* Reset primary abort counter */
3545			for (i = 0; i < adapter->num_vfs; i++)
3546				adapter->vfinfo[i].primary_abort_count = 0;
3547
3548			flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
3549		} else {
3550			e_info(probe,
3551			       "Cannot set private flags: Operation not supported\n");
3552			return -EOPNOTSUPP;
3553		}
3554	}
3555
3556	if (flags2 != adapter->flags2) {
3557		adapter->flags2 = flags2;
3558
3559		/* reset interface to repopulate queues */
3560		if (netif_running(netdev))
3561			ixgbe_reinit_locked(adapter);
3562	}
3563
3564	return 0;
3565}
3566
3567static const struct ethtool_ops ixgbe_ethtool_ops = {
3568	.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3569	.get_drvinfo            = ixgbe_get_drvinfo,
3570	.get_regs_len           = ixgbe_get_regs_len,
3571	.get_regs               = ixgbe_get_regs,
3572	.get_wol                = ixgbe_get_wol,
3573	.set_wol                = ixgbe_set_wol,
3574	.nway_reset             = ixgbe_nway_reset,
3575	.get_link               = ethtool_op_get_link,
3576	.get_eeprom_len         = ixgbe_get_eeprom_len,
3577	.get_eeprom             = ixgbe_get_eeprom,
3578	.set_eeprom             = ixgbe_set_eeprom,
3579	.get_ringparam          = ixgbe_get_ringparam,
3580	.set_ringparam          = ixgbe_set_ringparam,
3581	.get_pause_stats	= ixgbe_get_pause_stats,
3582	.get_pauseparam         = ixgbe_get_pauseparam,
3583	.set_pauseparam         = ixgbe_set_pauseparam,
3584	.get_msglevel           = ixgbe_get_msglevel,
3585	.set_msglevel           = ixgbe_set_msglevel,
3586	.self_test              = ixgbe_diag_test,
3587	.get_strings            = ixgbe_get_strings,
3588	.set_phys_id            = ixgbe_set_phys_id,
3589	.get_sset_count         = ixgbe_get_sset_count,
3590	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
3591	.get_coalesce           = ixgbe_get_coalesce,
3592	.set_coalesce           = ixgbe_set_coalesce,
3593	.get_rxnfc		= ixgbe_get_rxnfc,
3594	.set_rxnfc		= ixgbe_set_rxnfc,
3595	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
3596	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
3597	.get_rxfh		= ixgbe_get_rxfh,
3598	.set_rxfh		= ixgbe_set_rxfh,
3599	.get_eee		= ixgbe_get_eee,
3600	.set_eee		= ixgbe_set_eee,
3601	.get_channels		= ixgbe_get_channels,
3602	.set_channels		= ixgbe_set_channels,
3603	.get_priv_flags		= ixgbe_get_priv_flags,
3604	.set_priv_flags		= ixgbe_set_priv_flags,
3605	.get_ts_info		= ixgbe_get_ts_info,
3606	.get_module_info	= ixgbe_get_module_info,
3607	.get_module_eeprom	= ixgbe_get_module_eeprom,
3608	.get_link_ksettings     = ixgbe_get_link_ksettings,
3609	.set_link_ksettings     = ixgbe_set_link_ksettings,
3610};
3611
3612void ixgbe_set_ethtool_ops(struct net_device *netdev)
3613{
3614	netdev->ethtool_ops = &ixgbe_ethtool_ops;
3615}
3616