1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4/* ethtool support for iavf */
5#include "iavf.h"
6
7#include <linux/uaccess.h>
8
9/* ethtool statistics helpers */
10
11/**
12 * struct iavf_stats - definition for an ethtool statistic
13 * @stat_string: statistic name to display in ethtool -S output
14 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
15 * @stat_offset: offsetof() the stat from a base pointer
16 *
17 * This structure defines a statistic to be added to the ethtool stats buffer.
18 * It defines a statistic as offset from a common base pointer. Stats should
19 * be defined in constant arrays using the IAVF_STAT macro, with every element
20 * of the array using the same _type for calculating the sizeof_stat and
21 * stat_offset.
22 *
23 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
24 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
25 * the iavf_add_ethtool_stat() helper function.
26 *
27 * The @stat_string is interpreted as a format string, allowing formatted
28 * values to be inserted while looping over multiple structures for a given
29 * statistics array. Thus, every statistic string in an array should have the
30 * same type and number of format specifiers, to be formatted by variadic
31 * arguments to the iavf_add_stat_string() helper function.
32 **/
33struct iavf_stats {
34	char stat_string[ETH_GSTRING_LEN];
35	int sizeof_stat;
36	int stat_offset;
37};
38
39/* Helper macro to define an iavf_stat structure with proper size and type.
40 * Use this when defining constant statistics arrays. Note that @_type expects
41 * only a type name and is used multiple times.
42 */
43#define IAVF_STAT(_type, _name, _stat) { \
44	.stat_string = _name, \
45	.sizeof_stat = sizeof_field(_type, _stat), \
46	.stat_offset = offsetof(_type, _stat) \
47}
48
49/* Helper macro for defining some statistics related to queues */
50#define IAVF_QUEUE_STAT(_name, _stat) \
51	IAVF_STAT(struct iavf_ring, _name, _stat)
52
53/* Stats associated with a Tx or Rx ring */
54static const struct iavf_stats iavf_gstrings_queue_stats[] = {
55	IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
56	IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
57};
58
59/**
60 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
61 * @data: location to store the stat value
62 * @pointer: basis for where to copy from
63 * @stat: the stat definition
64 *
65 * Copies the stat data defined by the pointer and stat structure pair into
66 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
67 * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
68 */
69static void
70iavf_add_one_ethtool_stat(u64 *data, void *pointer,
71			  const struct iavf_stats *stat)
72{
73	char *p;
74
75	if (!pointer) {
76		/* ensure that the ethtool data buffer is zero'd for any stats
77		 * which don't have a valid pointer.
78		 */
79		*data = 0;
80		return;
81	}
82
83	p = (char *)pointer + stat->stat_offset;
84	switch (stat->sizeof_stat) {
85	case sizeof(u64):
86		*data = *((u64 *)p);
87		break;
88	case sizeof(u32):
89		*data = *((u32 *)p);
90		break;
91	case sizeof(u16):
92		*data = *((u16 *)p);
93		break;
94	case sizeof(u8):
95		*data = *((u8 *)p);
96		break;
97	default:
98		WARN_ONCE(1, "unexpected stat size for %s",
99			  stat->stat_string);
100		*data = 0;
101	}
102}
103
104/**
105 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
106 * @data: ethtool stats buffer
107 * @pointer: location to copy stats from
108 * @stats: array of stats to copy
109 * @size: the size of the stats definition
110 *
111 * Copy the stats defined by the stats array using the pointer as a base into
112 * the data buffer supplied by ethtool. Updates the data pointer to point to
113 * the next empty location for successive calls to __iavf_add_ethtool_stats.
114 * If pointer is null, set the data values to zero and update the pointer to
115 * skip these stats.
116 **/
117static void
118__iavf_add_ethtool_stats(u64 **data, void *pointer,
119			 const struct iavf_stats stats[],
120			 const unsigned int size)
121{
122	unsigned int i;
123
124	for (i = 0; i < size; i++)
125		iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
126}
127
128/**
129 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
130 * @data: ethtool stats buffer
131 * @pointer: location where stats are stored
132 * @stats: static const array of stat definitions
133 *
134 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
135 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
136 * ensuring that we pass the size associated with the given stats array.
137 *
138 * The parameter @stats is evaluated twice, so parameters with side effects
139 * should be avoided.
140 **/
141#define iavf_add_ethtool_stats(data, pointer, stats) \
142	__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
143
144/**
145 * iavf_add_queue_stats - copy queue statistics into supplied buffer
146 * @data: ethtool stats buffer
147 * @ring: the ring to copy
148 *
149 * Queue statistics must be copied while protected by
150 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
151 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
152 * ring pointer is null, zero out the queue stat values and update the data
153 * pointer. Otherwise safely copy the stats from the ring into the supplied
154 * buffer and update the data pointer when finished.
155 *
156 * This function expects to be called while under rcu_read_lock().
157 **/
158static void
159iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
160{
161	const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
162	const struct iavf_stats *stats = iavf_gstrings_queue_stats;
163	unsigned int start;
164	unsigned int i;
165
166	/* To avoid invalid statistics values, ensure that we keep retrying
167	 * the copy until we get a consistent value according to
168	 * u64_stats_fetch_retry. But first, make sure our ring is
169	 * non-null before attempting to access its syncp.
170	 */
171	do {
172		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
173		for (i = 0; i < size; i++)
174			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
175	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));
176
177	/* Once we successfully copy the stats in, update the data pointer */
178	*data += size;
179}
180
181/**
182 * __iavf_add_stat_strings - copy stat strings into ethtool buffer
183 * @p: ethtool supplied buffer
184 * @stats: stat definitions array
185 * @size: size of the stats array
186 *
187 * Format and copy the strings described by stats into the buffer pointed at
188 * by p.
189 **/
190static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
191				    const unsigned int size, ...)
192{
193	unsigned int i;
194
195	for (i = 0; i < size; i++) {
196		va_list args;
197
198		va_start(args, size);
199		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
200		*p += ETH_GSTRING_LEN;
201		va_end(args);
202	}
203}
204
205/**
206 * iavf_add_stat_strings - copy stat strings into ethtool buffer
207 * @p: ethtool supplied buffer
208 * @stats: stat definitions array
209 *
210 * Format and copy the strings described by the const static stats value into
211 * the buffer pointed at by p.
212 *
213 * The parameter @stats is evaluated twice, so parameters with side effects
214 * should be avoided. Additionally, stats must be an array such that
215 * ARRAY_SIZE can be called on it.
216 **/
217#define iavf_add_stat_strings(p, stats, ...) \
218	__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
219
220#define VF_STAT(_name, _stat) \
221	IAVF_STAT(struct iavf_adapter, _name, _stat)
222
223static const struct iavf_stats iavf_gstrings_stats[] = {
224	VF_STAT("rx_bytes", current_stats.rx_bytes),
225	VF_STAT("rx_unicast", current_stats.rx_unicast),
226	VF_STAT("rx_multicast", current_stats.rx_multicast),
227	VF_STAT("rx_broadcast", current_stats.rx_broadcast),
228	VF_STAT("rx_discards", current_stats.rx_discards),
229	VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
230	VF_STAT("tx_bytes", current_stats.tx_bytes),
231	VF_STAT("tx_unicast", current_stats.tx_unicast),
232	VF_STAT("tx_multicast", current_stats.tx_multicast),
233	VF_STAT("tx_broadcast", current_stats.tx_broadcast),
234	VF_STAT("tx_discards", current_stats.tx_discards),
235	VF_STAT("tx_errors", current_stats.tx_errors),
236};
237
238#define IAVF_STATS_LEN	ARRAY_SIZE(iavf_gstrings_stats)
239
240#define IAVF_QUEUE_STATS_LEN	ARRAY_SIZE(iavf_gstrings_queue_stats)
241
242/* For now we have one and only one private flag and it is only defined
243 * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead
244 * of leaving all this code sitting around empty we will strip it unless
245 * our one private flag is actually available.
246 */
247struct iavf_priv_flags {
248	char flag_string[ETH_GSTRING_LEN];
249	u32 flag;
250	bool read_only;
251};
252
253#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
254	.flag_string = _name, \
255	.flag = _flag, \
256	.read_only = _read_only, \
257}
258
259static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
260	IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
261};
262
263#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
264
265/**
266 * iavf_get_link_ksettings - Get Link Speed and Duplex settings
267 * @netdev: network interface device structure
268 * @cmd: ethtool command
269 *
270 * Reports speed/duplex settings. Because this is a VF, we don't know what
271 * kind of link we really have, so we fake it.
272 **/
273static int iavf_get_link_ksettings(struct net_device *netdev,
274				   struct ethtool_link_ksettings *cmd)
275{
276	struct iavf_adapter *adapter = netdev_priv(netdev);
277
278	ethtool_link_ksettings_zero_link_mode(cmd, supported);
279	cmd->base.autoneg = AUTONEG_DISABLE;
280	cmd->base.port = PORT_NONE;
281	cmd->base.duplex = DUPLEX_FULL;
282
283	if (ADV_LINK_SUPPORT(adapter)) {
284		if (adapter->link_speed_mbps &&
285		    adapter->link_speed_mbps < U32_MAX)
286			cmd->base.speed = adapter->link_speed_mbps;
287		else
288			cmd->base.speed = SPEED_UNKNOWN;
289
290		return 0;
291	}
292
293	switch (adapter->link_speed) {
294	case VIRTCHNL_LINK_SPEED_40GB:
295		cmd->base.speed = SPEED_40000;
296		break;
297	case VIRTCHNL_LINK_SPEED_25GB:
298		cmd->base.speed = SPEED_25000;
299		break;
300	case VIRTCHNL_LINK_SPEED_20GB:
301		cmd->base.speed = SPEED_20000;
302		break;
303	case VIRTCHNL_LINK_SPEED_10GB:
304		cmd->base.speed = SPEED_10000;
305		break;
306	case VIRTCHNL_LINK_SPEED_5GB:
307		cmd->base.speed = SPEED_5000;
308		break;
309	case VIRTCHNL_LINK_SPEED_2_5GB:
310		cmd->base.speed = SPEED_2500;
311		break;
312	case VIRTCHNL_LINK_SPEED_1GB:
313		cmd->base.speed = SPEED_1000;
314		break;
315	case VIRTCHNL_LINK_SPEED_100MB:
316		cmd->base.speed = SPEED_100;
317		break;
318	default:
319		break;
320	}
321
322	return 0;
323}
324
325/**
326 * iavf_get_sset_count - Get length of string set
327 * @netdev: network interface device structure
328 * @sset: id of string set
329 *
330 * Reports size of various string tables.
331 **/
332static int iavf_get_sset_count(struct net_device *netdev, int sset)
333{
334	/* Report the maximum number queues, even if not every queue is
335	 * currently configured. Since allocation of queues is in pairs,
336	 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
337	 * at device creation and never changes.
338	 */
339
340	if (sset == ETH_SS_STATS)
341		return IAVF_STATS_LEN +
342			(IAVF_QUEUE_STATS_LEN * 2 *
343			 netdev->real_num_tx_queues);
344	else if (sset == ETH_SS_PRIV_FLAGS)
345		return IAVF_PRIV_FLAGS_STR_LEN;
346	else
347		return -EINVAL;
348}
349
350/**
351 * iavf_get_ethtool_stats - report device statistics
352 * @netdev: network interface device structure
353 * @stats: ethtool statistics structure
354 * @data: pointer to data buffer
355 *
356 * All statistics are added to the data buffer as an array of u64.
357 **/
358static void iavf_get_ethtool_stats(struct net_device *netdev,
359				   struct ethtool_stats *stats, u64 *data)
360{
361	struct iavf_adapter *adapter = netdev_priv(netdev);
362	unsigned int i;
363
364	/* Explicitly request stats refresh */
365	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
366
367	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
368
369	rcu_read_lock();
370	/* As num_active_queues describe both tx and rx queues, we can use
371	 * it to iterate over rings' stats.
372	 */
373	for (i = 0; i < adapter->num_active_queues; i++) {
374		struct iavf_ring *ring;
375
376		/* Tx rings stats */
377		ring = &adapter->tx_rings[i];
378		iavf_add_queue_stats(&data, ring);
379
380		/* Rx rings stats */
381		ring = &adapter->rx_rings[i];
382		iavf_add_queue_stats(&data, ring);
383	}
384	rcu_read_unlock();
385}
386
387/**
388 * iavf_get_priv_flag_strings - Get private flag strings
389 * @netdev: network interface device structure
390 * @data: buffer for string data
391 *
392 * Builds the private flags string table
393 **/
394static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
395{
396	unsigned int i;
397
398	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
399		snprintf(data, ETH_GSTRING_LEN, "%s",
400			 iavf_gstrings_priv_flags[i].flag_string);
401		data += ETH_GSTRING_LEN;
402	}
403}
404
405/**
406 * iavf_get_stat_strings - Get stat strings
407 * @netdev: network interface device structure
408 * @data: buffer for string data
409 *
410 * Builds the statistics string table
411 **/
412static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
413{
414	unsigned int i;
415
416	iavf_add_stat_strings(&data, iavf_gstrings_stats);
417
418	/* Queues are always allocated in pairs, so we just use
419	 * real_num_tx_queues for both Tx and Rx queues.
420	 */
421	for (i = 0; i < netdev->real_num_tx_queues; i++) {
422		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
423				      "tx", i);
424		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
425				      "rx", i);
426	}
427}
428
429/**
430 * iavf_get_strings - Get string set
431 * @netdev: network interface device structure
432 * @sset: id of string set
433 * @data: buffer for string data
434 *
435 * Builds string tables for various string sets
436 **/
437static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
438{
439	switch (sset) {
440	case ETH_SS_STATS:
441		iavf_get_stat_strings(netdev, data);
442		break;
443	case ETH_SS_PRIV_FLAGS:
444		iavf_get_priv_flag_strings(netdev, data);
445		break;
446	default:
447		break;
448	}
449}
450
451/**
452 * iavf_get_priv_flags - report device private flags
453 * @netdev: network interface device structure
454 *
455 * The get string set count and the string set should be matched for each
456 * flag returned.  Add new strings for each flag to the iavf_gstrings_priv_flags
457 * array.
458 *
459 * Returns a u32 bitmap of flags.
460 **/
461static u32 iavf_get_priv_flags(struct net_device *netdev)
462{
463	struct iavf_adapter *adapter = netdev_priv(netdev);
464	u32 i, ret_flags = 0;
465
466	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
467		const struct iavf_priv_flags *priv_flags;
468
469		priv_flags = &iavf_gstrings_priv_flags[i];
470
471		if (priv_flags->flag & adapter->flags)
472			ret_flags |= BIT(i);
473	}
474
475	return ret_flags;
476}
477
478/**
479 * iavf_set_priv_flags - set private flags
480 * @netdev: network interface device structure
481 * @flags: bit flags to be set
482 **/
483static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
484{
485	struct iavf_adapter *adapter = netdev_priv(netdev);
486	u32 orig_flags, new_flags, changed_flags;
487	int ret = 0;
488	u32 i;
489
490	orig_flags = READ_ONCE(adapter->flags);
491	new_flags = orig_flags;
492
493	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
494		const struct iavf_priv_flags *priv_flags;
495
496		priv_flags = &iavf_gstrings_priv_flags[i];
497
498		if (flags & BIT(i))
499			new_flags |= priv_flags->flag;
500		else
501			new_flags &= ~(priv_flags->flag);
502
503		if (priv_flags->read_only &&
504		    ((orig_flags ^ new_flags) & ~BIT(i)))
505			return -EOPNOTSUPP;
506	}
507
508	/* Before we finalize any flag changes, any checks which we need to
509	 * perform to determine if the new flags will be supported should go
510	 * here...
511	 */
512
513	/* Compare and exchange the new flags into place. If we failed, that
514	 * is if cmpxchg returns anything but the old value, this means
515	 * something else must have modified the flags variable since we
516	 * copied it. We'll just punt with an error and log something in the
517	 * message buffer.
518	 */
519	if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
520		dev_warn(&adapter->pdev->dev,
521			 "Unable to update adapter->flags as it was modified by another thread...\n");
522		return -EAGAIN;
523	}
524
525	changed_flags = orig_flags ^ new_flags;
526
527	/* Process any additional changes needed as a result of flag changes.
528	 * The changed_flags value reflects the list of bits that were changed
529	 * in the code above.
530	 */
531
532	/* issue a reset to force legacy-rx change to take effect */
533	if (changed_flags & IAVF_FLAG_LEGACY_RX) {
534		if (netif_running(netdev)) {
535			iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
536			ret = iavf_wait_for_reset(adapter);
537			if (ret)
538				netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
539		}
540	}
541
542	return ret;
543}
544
545/**
546 * iavf_get_msglevel - Get debug message level
547 * @netdev: network interface device structure
548 *
549 * Returns current debug message level.
550 **/
551static u32 iavf_get_msglevel(struct net_device *netdev)
552{
553	struct iavf_adapter *adapter = netdev_priv(netdev);
554
555	return adapter->msg_enable;
556}
557
558/**
559 * iavf_set_msglevel - Set debug message level
560 * @netdev: network interface device structure
561 * @data: message level
562 *
563 * Set current debug message level. Higher values cause the driver to
564 * be noisier.
565 **/
566static void iavf_set_msglevel(struct net_device *netdev, u32 data)
567{
568	struct iavf_adapter *adapter = netdev_priv(netdev);
569
570	if (IAVF_DEBUG_USER & data)
571		adapter->hw.debug_mask = data;
572	adapter->msg_enable = data;
573}
574
575/**
576 * iavf_get_drvinfo - Get driver info
577 * @netdev: network interface device structure
578 * @drvinfo: ethool driver info structure
579 *
580 * Returns information about the driver and device for display to the user.
581 **/
582static void iavf_get_drvinfo(struct net_device *netdev,
583			     struct ethtool_drvinfo *drvinfo)
584{
585	struct iavf_adapter *adapter = netdev_priv(netdev);
586
587	strscpy(drvinfo->driver, iavf_driver_name, 32);
588	strscpy(drvinfo->fw_version, "N/A", 4);
589	strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
590	drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
591}
592
593/**
594 * iavf_get_ringparam - Get ring parameters
595 * @netdev: network interface device structure
596 * @ring: ethtool ringparam structure
597 * @kernel_ring: ethtool extenal ringparam structure
598 * @extack: netlink extended ACK report struct
599 *
600 * Returns current ring parameters. TX and RX rings are reported separately,
601 * but the number of rings is not reported.
602 **/
603static void iavf_get_ringparam(struct net_device *netdev,
604			       struct ethtool_ringparam *ring,
605			       struct kernel_ethtool_ringparam *kernel_ring,
606			       struct netlink_ext_ack *extack)
607{
608	struct iavf_adapter *adapter = netdev_priv(netdev);
609
610	ring->rx_max_pending = IAVF_MAX_RXD;
611	ring->tx_max_pending = IAVF_MAX_TXD;
612	ring->rx_pending = adapter->rx_desc_count;
613	ring->tx_pending = adapter->tx_desc_count;
614}
615
616/**
617 * iavf_set_ringparam - Set ring parameters
618 * @netdev: network interface device structure
619 * @ring: ethtool ringparam structure
620 * @kernel_ring: ethtool external ringparam structure
621 * @extack: netlink extended ACK report struct
622 *
623 * Sets ring parameters. TX and RX rings are controlled separately, but the
624 * number of rings is not specified, so all rings get the same settings.
625 **/
626static int iavf_set_ringparam(struct net_device *netdev,
627			      struct ethtool_ringparam *ring,
628			      struct kernel_ethtool_ringparam *kernel_ring,
629			      struct netlink_ext_ack *extack)
630{
631	struct iavf_adapter *adapter = netdev_priv(netdev);
632	u32 new_rx_count, new_tx_count;
633	int ret = 0;
634
635	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
636		return -EINVAL;
637
638	if (ring->tx_pending > IAVF_MAX_TXD ||
639	    ring->tx_pending < IAVF_MIN_TXD ||
640	    ring->rx_pending > IAVF_MAX_RXD ||
641	    ring->rx_pending < IAVF_MIN_RXD) {
642		netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
643			   ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
644			   IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
645		return -EINVAL;
646	}
647
648	new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
649	if (new_tx_count != ring->tx_pending)
650		netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
651			    new_tx_count);
652
653	new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
654	if (new_rx_count != ring->rx_pending)
655		netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
656			    new_rx_count);
657
658	/* if nothing to do return success */
659	if ((new_tx_count == adapter->tx_desc_count) &&
660	    (new_rx_count == adapter->rx_desc_count)) {
661		netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
662		return 0;
663	}
664
665	if (new_tx_count != adapter->tx_desc_count) {
666		netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
667			   adapter->tx_desc_count, new_tx_count);
668		adapter->tx_desc_count = new_tx_count;
669	}
670
671	if (new_rx_count != adapter->rx_desc_count) {
672		netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
673			   adapter->rx_desc_count, new_rx_count);
674		adapter->rx_desc_count = new_rx_count;
675	}
676
677	if (netif_running(netdev)) {
678		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
679		ret = iavf_wait_for_reset(adapter);
680		if (ret)
681			netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
682	}
683
684	return ret;
685}
686
687/**
688 * __iavf_get_coalesce - get per-queue coalesce settings
689 * @netdev: the netdev to check
690 * @ec: ethtool coalesce data structure
691 * @queue: which queue to pick
692 *
693 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
694 * are per queue. If queue is <0 then we default to queue 0 as the
695 * representative value.
696 **/
697static int __iavf_get_coalesce(struct net_device *netdev,
698			       struct ethtool_coalesce *ec, int queue)
699{
700	struct iavf_adapter *adapter = netdev_priv(netdev);
701	struct iavf_ring *rx_ring, *tx_ring;
702
703	/* Rx and Tx usecs per queue value. If user doesn't specify the
704	 * queue, return queue 0's value to represent.
705	 */
706	if (queue < 0)
707		queue = 0;
708	else if (queue >= adapter->num_active_queues)
709		return -EINVAL;
710
711	rx_ring = &adapter->rx_rings[queue];
712	tx_ring = &adapter->tx_rings[queue];
713
714	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
715		ec->use_adaptive_rx_coalesce = 1;
716
717	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
718		ec->use_adaptive_tx_coalesce = 1;
719
720	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
721	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
722
723	return 0;
724}
725
726/**
727 * iavf_get_coalesce - Get interrupt coalescing settings
728 * @netdev: network interface device structure
729 * @ec: ethtool coalesce structure
730 * @kernel_coal: ethtool CQE mode setting structure
731 * @extack: extack for reporting error messages
732 *
733 * Returns current coalescing settings. This is referred to elsewhere in the
734 * driver as Interrupt Throttle Rate, as this is how the hardware describes
735 * this functionality. Note that if per-queue settings have been modified this
736 * only represents the settings of queue 0.
737 **/
738static int iavf_get_coalesce(struct net_device *netdev,
739			     struct ethtool_coalesce *ec,
740			     struct kernel_ethtool_coalesce *kernel_coal,
741			     struct netlink_ext_ack *extack)
742{
743	return __iavf_get_coalesce(netdev, ec, -1);
744}
745
746/**
747 * iavf_get_per_queue_coalesce - get coalesce values for specific queue
748 * @netdev: netdev to read
749 * @ec: coalesce settings from ethtool
750 * @queue: the queue to read
751 *
752 * Read specific queue's coalesce settings.
753 **/
754static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
755				       struct ethtool_coalesce *ec)
756{
757	return __iavf_get_coalesce(netdev, ec, queue);
758}
759
760/**
761 * iavf_set_itr_per_queue - set ITR values for specific queue
762 * @adapter: the VF adapter struct to set values for
763 * @ec: coalesce settings from ethtool
764 * @queue: the queue to modify
765 *
766 * Change the ITR settings for a specific queue.
767 **/
768static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
769				  struct ethtool_coalesce *ec, int queue)
770{
771	struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
772	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
773	struct iavf_q_vector *q_vector;
774	u16 itr_setting;
775
776	itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
777
778	if (ec->rx_coalesce_usecs != itr_setting &&
779	    ec->use_adaptive_rx_coalesce) {
780		netif_info(adapter, drv, adapter->netdev,
781			   "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
782		return -EINVAL;
783	}
784
785	itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
786
787	if (ec->tx_coalesce_usecs != itr_setting &&
788	    ec->use_adaptive_tx_coalesce) {
789		netif_info(adapter, drv, adapter->netdev,
790			   "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
791		return -EINVAL;
792	}
793
794	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
795	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
796
797	rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
798	if (!ec->use_adaptive_rx_coalesce)
799		rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
800
801	tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
802	if (!ec->use_adaptive_tx_coalesce)
803		tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
804
805	q_vector = rx_ring->q_vector;
806	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
807
808	q_vector = tx_ring->q_vector;
809	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
810
811	/* The interrupt handler itself will take care of programming
812	 * the Tx and Rx ITR values based on the values we have entered
813	 * into the q_vector, no need to write the values now.
814	 */
815	return 0;
816}
817
818/**
819 * __iavf_set_coalesce - set coalesce settings for particular queue
820 * @netdev: the netdev to change
821 * @ec: ethtool coalesce settings
822 * @queue: the queue to change
823 *
824 * Sets the coalesce settings for a particular queue.
825 **/
826static int __iavf_set_coalesce(struct net_device *netdev,
827			       struct ethtool_coalesce *ec, int queue)
828{
829	struct iavf_adapter *adapter = netdev_priv(netdev);
830	int i;
831
832	if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
833		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
834		return -EINVAL;
835	} else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
836		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
837		return -EINVAL;
838	}
839
840	/* Rx and Tx usecs has per queue value. If user doesn't specify the
841	 * queue, apply to all queues.
842	 */
843	if (queue < 0) {
844		for (i = 0; i < adapter->num_active_queues; i++)
845			if (iavf_set_itr_per_queue(adapter, ec, i))
846				return -EINVAL;
847	} else if (queue < adapter->num_active_queues) {
848		if (iavf_set_itr_per_queue(adapter, ec, queue))
849			return -EINVAL;
850	} else {
851		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
852			   adapter->num_active_queues - 1);
853		return -EINVAL;
854	}
855
856	return 0;
857}
858
859/**
860 * iavf_set_coalesce - Set interrupt coalescing settings
861 * @netdev: network interface device structure
862 * @ec: ethtool coalesce structure
863 * @kernel_coal: ethtool CQE mode setting structure
864 * @extack: extack for reporting error messages
865 *
866 * Change current coalescing settings for every queue.
867 **/
868static int iavf_set_coalesce(struct net_device *netdev,
869			     struct ethtool_coalesce *ec,
870			     struct kernel_ethtool_coalesce *kernel_coal,
871			     struct netlink_ext_ack *extack)
872{
873	return __iavf_set_coalesce(netdev, ec, -1);
874}
875
876/**
877 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
878 * @netdev: the netdev to change
879 * @ec: ethtool's coalesce settings
880 * @queue: the queue to modify
881 *
882 * Modifies a specific queue's coalesce settings.
883 */
884static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
885				       struct ethtool_coalesce *ec)
886{
887	return __iavf_set_coalesce(netdev, ec, queue);
888}
889
890/**
891 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
892 * flow type values
893 * @flow: filter type to be converted
894 *
895 * Returns the corresponding ethtool flow type.
896 */
897static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
898{
899	switch (flow) {
900	case IAVF_FDIR_FLOW_IPV4_TCP:
901		return TCP_V4_FLOW;
902	case IAVF_FDIR_FLOW_IPV4_UDP:
903		return UDP_V4_FLOW;
904	case IAVF_FDIR_FLOW_IPV4_SCTP:
905		return SCTP_V4_FLOW;
906	case IAVF_FDIR_FLOW_IPV4_AH:
907		return AH_V4_FLOW;
908	case IAVF_FDIR_FLOW_IPV4_ESP:
909		return ESP_V4_FLOW;
910	case IAVF_FDIR_FLOW_IPV4_OTHER:
911		return IPV4_USER_FLOW;
912	case IAVF_FDIR_FLOW_IPV6_TCP:
913		return TCP_V6_FLOW;
914	case IAVF_FDIR_FLOW_IPV6_UDP:
915		return UDP_V6_FLOW;
916	case IAVF_FDIR_FLOW_IPV6_SCTP:
917		return SCTP_V6_FLOW;
918	case IAVF_FDIR_FLOW_IPV6_AH:
919		return AH_V6_FLOW;
920	case IAVF_FDIR_FLOW_IPV6_ESP:
921		return ESP_V6_FLOW;
922	case IAVF_FDIR_FLOW_IPV6_OTHER:
923		return IPV6_USER_FLOW;
924	case IAVF_FDIR_FLOW_NON_IP_L2:
925		return ETHER_FLOW;
926	default:
927		/* 0 is undefined ethtool flow */
928		return 0;
929	}
930}
931
932/**
933 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
934 * @eth: Ethtool flow type to be converted
935 *
936 * Returns flow enum
937 */
938static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
939{
940	switch (eth) {
941	case TCP_V4_FLOW:
942		return IAVF_FDIR_FLOW_IPV4_TCP;
943	case UDP_V4_FLOW:
944		return IAVF_FDIR_FLOW_IPV4_UDP;
945	case SCTP_V4_FLOW:
946		return IAVF_FDIR_FLOW_IPV4_SCTP;
947	case AH_V4_FLOW:
948		return IAVF_FDIR_FLOW_IPV4_AH;
949	case ESP_V4_FLOW:
950		return IAVF_FDIR_FLOW_IPV4_ESP;
951	case IPV4_USER_FLOW:
952		return IAVF_FDIR_FLOW_IPV4_OTHER;
953	case TCP_V6_FLOW:
954		return IAVF_FDIR_FLOW_IPV6_TCP;
955	case UDP_V6_FLOW:
956		return IAVF_FDIR_FLOW_IPV6_UDP;
957	case SCTP_V6_FLOW:
958		return IAVF_FDIR_FLOW_IPV6_SCTP;
959	case AH_V6_FLOW:
960		return IAVF_FDIR_FLOW_IPV6_AH;
961	case ESP_V6_FLOW:
962		return IAVF_FDIR_FLOW_IPV6_ESP;
963	case IPV6_USER_FLOW:
964		return IAVF_FDIR_FLOW_IPV6_OTHER;
965	case ETHER_FLOW:
966		return IAVF_FDIR_FLOW_NON_IP_L2;
967	default:
968		return IAVF_FDIR_FLOW_NONE;
969	}
970}
971
972/**
973 * iavf_is_mask_valid - check mask field set
974 * @mask: full mask to check
975 * @field: field for which mask should be valid
976 *
977 * If the mask is fully set return true. If it is not valid for field return
978 * false.
979 */
980static bool iavf_is_mask_valid(u64 mask, u64 field)
981{
982	return (mask & field) == field;
983}
984
985/**
986 * iavf_parse_rx_flow_user_data - deconstruct user-defined data
987 * @fsp: pointer to ethtool Rx flow specification
988 * @fltr: pointer to Flow Director filter for userdef data storage
989 *
990 * Returns 0 on success, negative error value on failure
991 */
992static int
993iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
994			     struct iavf_fdir_fltr *fltr)
995{
996	struct iavf_flex_word *flex;
997	int i, cnt = 0;
998
999	if (!(fsp->flow_type & FLOW_EXT))
1000		return 0;
1001
1002	for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
1003#define IAVF_USERDEF_FLEX_WORD_M	GENMASK(15, 0)
1004#define IAVF_USERDEF_FLEX_OFFS_S	16
1005#define IAVF_USERDEF_FLEX_OFFS_M	GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
1006#define IAVF_USERDEF_FLEX_FLTR_M	GENMASK(31, 0)
1007		u32 value = be32_to_cpu(fsp->h_ext.data[i]);
1008		u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
1009
1010		if (!value || !mask)
1011			continue;
1012
1013		if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
1014			return -EINVAL;
1015
1016		/* 504 is the maximum value for offsets, and offset is measured
1017		 * from the start of the MAC address.
1018		 */
1019#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
1020		flex = &fltr->flex_words[cnt++];
1021		flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
1022		flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
1023			     IAVF_USERDEF_FLEX_OFFS_S;
1024		if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
1025			return -EINVAL;
1026	}
1027
1028	fltr->flex_cnt = cnt;
1029
1030	return 0;
1031}
1032
1033/**
1034 * iavf_fill_rx_flow_ext_data - fill the additional data
1035 * @fsp: pointer to ethtool Rx flow specification
1036 * @fltr: pointer to Flow Director filter to get additional data
1037 */
1038static void
1039iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
1040			   struct iavf_fdir_fltr *fltr)
1041{
1042	if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
1043		return;
1044
1045	fsp->flow_type |= FLOW_EXT;
1046
1047	memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
1048	memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
1049}
1050
1051/**
1052 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
1053 * @adapter: the VF adapter structure that contains filter list
1054 * @cmd: ethtool command data structure to receive the filter data
1055 *
1056 * Returns 0 as expected for success by ethtool
1057 */
1058static int
1059iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
1060			    struct ethtool_rxnfc *cmd)
1061{
1062	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1063	struct iavf_fdir_fltr *rule = NULL;
1064	int ret = 0;
1065
1066	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1067		return -EOPNOTSUPP;
1068
1069	spin_lock_bh(&adapter->fdir_fltr_lock);
1070
1071	rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
1072	if (!rule) {
1073		ret = -EINVAL;
1074		goto release_lock;
1075	}
1076
1077	fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
1078
1079	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
1080	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
1081
1082	switch (fsp->flow_type) {
1083	case TCP_V4_FLOW:
1084	case UDP_V4_FLOW:
1085	case SCTP_V4_FLOW:
1086		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1087		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1088		fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
1089		fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
1090		fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
1091		fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1092		fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1093		fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
1094		fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
1095		fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
1096		break;
1097	case AH_V4_FLOW:
1098	case ESP_V4_FLOW:
1099		fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1100		fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1101		fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
1102		fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
1103		fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1104		fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1105		fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
1106		fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
1107		break;
1108	case IPV4_USER_FLOW:
1109		fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1110		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1111		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
1112		fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
1113		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1114		fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
1115		fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1116		fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1117		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
1118		fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
1119		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
1120		fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
1121		break;
1122	case TCP_V6_FLOW:
1123	case UDP_V6_FLOW:
1124	case SCTP_V6_FLOW:
1125		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1126		       sizeof(struct in6_addr));
1127		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1128		       sizeof(struct in6_addr));
1129		fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
1130		fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
1131		fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
1132		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1133		       sizeof(struct in6_addr));
1134		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1135		       sizeof(struct in6_addr));
1136		fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
1137		fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
1138		fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
1139		break;
1140	case AH_V6_FLOW:
1141	case ESP_V6_FLOW:
1142		memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1143		       sizeof(struct in6_addr));
1144		memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1145		       sizeof(struct in6_addr));
1146		fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1147		fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1148		memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1149		       sizeof(struct in6_addr));
1150		memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1151		       sizeof(struct in6_addr));
1152		fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1153		fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1154		break;
1155	case IPV6_USER_FLOW:
1156		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1157		       sizeof(struct in6_addr));
1158		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1159		       sizeof(struct in6_addr));
1160		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1161		fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1162		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1163		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1164		       sizeof(struct in6_addr));
1165		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1166		       sizeof(struct in6_addr));
1167		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1168		fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1169		fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1170		break;
1171	case ETHER_FLOW:
1172		fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1173		fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1174		break;
1175	default:
1176		ret = -EINVAL;
1177		break;
1178	}
1179
1180	iavf_fill_rx_flow_ext_data(fsp, rule);
1181
1182	if (rule->action == VIRTCHNL_ACTION_DROP)
1183		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1184	else
1185		fsp->ring_cookie = rule->q_index;
1186
1187release_lock:
1188	spin_unlock_bh(&adapter->fdir_fltr_lock);
1189	return ret;
1190}
1191
1192/**
1193 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1194 * @adapter: the VF adapter structure containing the filter list
1195 * @cmd: ethtool command data structure
1196 * @rule_locs: ethtool array passed in from OS to receive filter IDs
1197 *
1198 * Returns 0 as expected for success by ethtool
1199 */
1200static int
1201iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1202		       u32 *rule_locs)
1203{
1204	struct iavf_fdir_fltr *fltr;
1205	unsigned int cnt = 0;
1206	int val = 0;
1207
1208	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1209		return -EOPNOTSUPP;
1210
1211	cmd->data = IAVF_MAX_FDIR_FILTERS;
1212
1213	spin_lock_bh(&adapter->fdir_fltr_lock);
1214
1215	list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
1216		if (cnt == cmd->rule_cnt) {
1217			val = -EMSGSIZE;
1218			goto release_lock;
1219		}
1220		rule_locs[cnt] = fltr->loc;
1221		cnt++;
1222	}
1223
1224release_lock:
1225	spin_unlock_bh(&adapter->fdir_fltr_lock);
1226	if (!val)
1227		cmd->rule_cnt = cnt;
1228
1229	return val;
1230}
1231
1232/**
1233 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1234 * @adapter: pointer to the VF adapter structure
1235 * @fsp: pointer to ethtool Rx flow specification
1236 * @fltr: filter structure
1237 */
1238static int
1239iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1240			struct iavf_fdir_fltr *fltr)
1241{
1242	u32 flow_type, q_index = 0;
1243	enum virtchnl_action act;
1244	int err;
1245
1246	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1247		act = VIRTCHNL_ACTION_DROP;
1248	} else {
1249		q_index = fsp->ring_cookie;
1250		if (q_index >= adapter->num_active_queues)
1251			return -EINVAL;
1252
1253		act = VIRTCHNL_ACTION_QUEUE;
1254	}
1255
1256	fltr->action = act;
1257	fltr->loc = fsp->location;
1258	fltr->q_index = q_index;
1259
1260	if (fsp->flow_type & FLOW_EXT) {
1261		memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1262		       sizeof(fltr->ext_data.usr_def));
1263		memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1264		       sizeof(fltr->ext_mask.usr_def));
1265	}
1266
1267	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1268	fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
1269
1270	switch (flow_type) {
1271	case TCP_V4_FLOW:
1272	case UDP_V4_FLOW:
1273	case SCTP_V4_FLOW:
1274		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1275		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1276		fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1277		fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1278		fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1279		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1280		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1281		fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1282		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1283		fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
1284		fltr->ip_ver = 4;
1285		break;
1286	case AH_V4_FLOW:
1287	case ESP_V4_FLOW:
1288		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1289		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1290		fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1291		fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1292		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1293		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1294		fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1295		fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
1296		fltr->ip_ver = 4;
1297		break;
1298	case IPV4_USER_FLOW:
1299		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1300		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1301		fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1302		fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1303		fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1304		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1305		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1306		fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1307		fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1308		fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
1309		fltr->ip_ver = 4;
1310		break;
1311	case TCP_V6_FLOW:
1312	case UDP_V6_FLOW:
1313	case SCTP_V6_FLOW:
1314		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1315		       sizeof(struct in6_addr));
1316		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1317		       sizeof(struct in6_addr));
1318		fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1319		fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1320		fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1321		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1322		       sizeof(struct in6_addr));
1323		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1324		       sizeof(struct in6_addr));
1325		fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1326		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1327		fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
1328		fltr->ip_ver = 6;
1329		break;
1330	case AH_V6_FLOW:
1331	case ESP_V6_FLOW:
1332		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1333		       sizeof(struct in6_addr));
1334		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1335		       sizeof(struct in6_addr));
1336		fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1337		fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1338		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1339		       sizeof(struct in6_addr));
1340		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1341		       sizeof(struct in6_addr));
1342		fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1343		fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
1344		fltr->ip_ver = 6;
1345		break;
1346	case IPV6_USER_FLOW:
1347		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1348		       sizeof(struct in6_addr));
1349		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1350		       sizeof(struct in6_addr));
1351		fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1352		fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1353		fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1354		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1355		       sizeof(struct in6_addr));
1356		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1357		       sizeof(struct in6_addr));
1358		fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1359		fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1360		fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1361		fltr->ip_ver = 6;
1362		break;
1363	case ETHER_FLOW:
1364		fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1365		fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1366		break;
1367	default:
1368		/* not doing un-parsed flow types */
1369		return -EINVAL;
1370	}
1371
1372	err = iavf_validate_fdir_fltr_masks(adapter, fltr);
1373	if (err)
1374		return err;
1375
1376	if (iavf_fdir_is_dup_fltr(adapter, fltr))
1377		return -EEXIST;
1378
1379	err = iavf_parse_rx_flow_user_data(fsp, fltr);
1380	if (err)
1381		return err;
1382
1383	return iavf_fill_fdir_add_msg(adapter, fltr);
1384}
1385
1386/**
1387 * iavf_add_fdir_ethtool - add Flow Director filter
1388 * @adapter: pointer to the VF adapter structure
1389 * @cmd: command to add Flow Director filter
1390 *
1391 * Returns 0 on success and negative values for failure
1392 */
1393static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1394{
1395	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1396	struct iavf_fdir_fltr *fltr;
1397	int count = 50;
1398	int err;
1399
1400	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1401		return -EOPNOTSUPP;
1402
1403	if (fsp->flow_type & FLOW_MAC_EXT)
1404		return -EINVAL;
1405
1406	spin_lock_bh(&adapter->fdir_fltr_lock);
1407	if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
1408		spin_unlock_bh(&adapter->fdir_fltr_lock);
1409		dev_err(&adapter->pdev->dev,
1410			"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
1411			IAVF_MAX_FDIR_FILTERS);
1412		return -ENOSPC;
1413	}
1414
1415	if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
1416		dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1417		spin_unlock_bh(&adapter->fdir_fltr_lock);
1418		return -EEXIST;
1419	}
1420	spin_unlock_bh(&adapter->fdir_fltr_lock);
1421
1422	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1423	if (!fltr)
1424		return -ENOMEM;
1425
1426	while (!mutex_trylock(&adapter->crit_lock)) {
1427		if (--count == 0) {
1428			kfree(fltr);
1429			return -EINVAL;
1430		}
1431		udelay(1);
1432	}
1433
1434	err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
1435	if (err)
1436		goto ret;
1437
1438	spin_lock_bh(&adapter->fdir_fltr_lock);
1439	iavf_fdir_list_add_fltr(adapter, fltr);
1440	adapter->fdir_active_fltr++;
1441	if (adapter->link_up) {
1442		fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
1443		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1444	} else {
1445		fltr->state = IAVF_FDIR_FLTR_INACTIVE;
1446	}
1447	spin_unlock_bh(&adapter->fdir_fltr_lock);
1448
1449	if (adapter->link_up)
1450		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1451ret:
1452	if (err && fltr)
1453		kfree(fltr);
1454
1455	mutex_unlock(&adapter->crit_lock);
1456	return err;
1457}
1458
1459/**
1460 * iavf_del_fdir_ethtool - delete Flow Director filter
1461 * @adapter: pointer to the VF adapter structure
1462 * @cmd: command to delete Flow Director filter
1463 *
1464 * Returns 0 on success and negative values for failure
1465 */
1466static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1467{
1468	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1469	struct iavf_fdir_fltr *fltr = NULL;
1470	int err = 0;
1471
1472	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1473		return -EOPNOTSUPP;
1474
1475	spin_lock_bh(&adapter->fdir_fltr_lock);
1476	fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
1477	if (fltr) {
1478		if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
1479			fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1480			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1481		} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
1482			list_del(&fltr->list);
1483			kfree(fltr);
1484			adapter->fdir_active_fltr--;
1485			fltr = NULL;
1486		} else {
1487			err = -EBUSY;
1488		}
1489	} else if (adapter->fdir_active_fltr) {
1490		err = -EINVAL;
1491	}
1492	spin_unlock_bh(&adapter->fdir_fltr_lock);
1493
1494	if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
1495		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1496
1497	return err;
1498}
1499
1500/**
1501 * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1502 * @cmd: ethtool rxnfc command
1503 *
1504 * This function parses the rxnfc command and returns intended
1505 * header types for RSS configuration
1506 */
1507static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1508{
1509	u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1510
1511	switch (cmd->flow_type) {
1512	case TCP_V4_FLOW:
1513		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1514			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1515		break;
1516	case UDP_V4_FLOW:
1517		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1518			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1519		break;
1520	case SCTP_V4_FLOW:
1521		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1522			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1523		break;
1524	case TCP_V6_FLOW:
1525		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1526			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1527		break;
1528	case UDP_V6_FLOW:
1529		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1530			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1531		break;
1532	case SCTP_V6_FLOW:
1533		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1534			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1535		break;
1536	default:
1537		break;
1538	}
1539
1540	return hdrs;
1541}
1542
1543/**
1544 * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1545 * @cmd: ethtool rxnfc command
1546 *
1547 * This function parses the rxnfc command and returns intended hash fields for
1548 * RSS configuration
1549 */
1550static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
1551{
1552	u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1553
1554	if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1555		switch (cmd->flow_type) {
1556		case TCP_V4_FLOW:
1557		case UDP_V4_FLOW:
1558		case SCTP_V4_FLOW:
1559			if (cmd->data & RXH_IP_SRC)
1560				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1561			if (cmd->data & RXH_IP_DST)
1562				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1563			break;
1564		case TCP_V6_FLOW:
1565		case UDP_V6_FLOW:
1566		case SCTP_V6_FLOW:
1567			if (cmd->data & RXH_IP_SRC)
1568				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1569			if (cmd->data & RXH_IP_DST)
1570				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1571			break;
1572		default:
1573			break;
1574		}
1575	}
1576
1577	if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1578		switch (cmd->flow_type) {
1579		case TCP_V4_FLOW:
1580		case TCP_V6_FLOW:
1581			if (cmd->data & RXH_L4_B_0_1)
1582				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1583			if (cmd->data & RXH_L4_B_2_3)
1584				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1585			break;
1586		case UDP_V4_FLOW:
1587		case UDP_V6_FLOW:
1588			if (cmd->data & RXH_L4_B_0_1)
1589				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1590			if (cmd->data & RXH_L4_B_2_3)
1591				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1592			break;
1593		case SCTP_V4_FLOW:
1594		case SCTP_V6_FLOW:
1595			if (cmd->data & RXH_L4_B_0_1)
1596				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1597			if (cmd->data & RXH_L4_B_2_3)
1598				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1599			break;
1600		default:
1601			break;
1602		}
1603	}
1604
1605	return hfld;
1606}
1607
1608/**
1609 * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1610 * @adapter: pointer to the VF adapter structure
1611 * @cmd: ethtool rxnfc command
1612 *
1613 * Returns Success if the flow input set is supported.
1614 */
1615static int
1616iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1617			  struct ethtool_rxnfc *cmd)
1618{
1619	struct iavf_adv_rss *rss_old, *rss_new;
1620	bool rss_new_add = false;
1621	int count = 50, err = 0;
1622	u64 hash_flds;
1623	u32 hdrs;
1624
1625	if (!ADV_RSS_SUPPORT(adapter))
1626		return -EOPNOTSUPP;
1627
1628	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1629	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1630		return -EINVAL;
1631
1632	hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
1633	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1634		return -EINVAL;
1635
1636	rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
1637	if (!rss_new)
1638		return -ENOMEM;
1639
1640	if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
1641		kfree(rss_new);
1642		return -EINVAL;
1643	}
1644
1645	while (!mutex_trylock(&adapter->crit_lock)) {
1646		if (--count == 0) {
1647			kfree(rss_new);
1648			return -EINVAL;
1649		}
1650
1651		udelay(1);
1652	}
1653
1654	spin_lock_bh(&adapter->adv_rss_lock);
1655	rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1656	if (rss_old) {
1657		if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1658			err = -EBUSY;
1659		} else if (rss_old->hash_flds != hash_flds) {
1660			rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1661			rss_old->hash_flds = hash_flds;
1662			memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1663			       sizeof(rss_new->cfg_msg));
1664			adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1665		} else {
1666			err = -EEXIST;
1667		}
1668	} else {
1669		rss_new_add = true;
1670		rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1671		rss_new->packet_hdrs = hdrs;
1672		rss_new->hash_flds = hash_flds;
1673		list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
1674		adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1675	}
1676	spin_unlock_bh(&adapter->adv_rss_lock);
1677
1678	if (!err)
1679		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1680
1681	mutex_unlock(&adapter->crit_lock);
1682
1683	if (!rss_new_add)
1684		kfree(rss_new);
1685
1686	return err;
1687}
1688
1689/**
1690 * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1691 * @adapter: pointer to the VF adapter structure
1692 * @cmd: ethtool rxnfc command
1693 *
1694 * Returns Success if the flow input set is supported.
1695 */
1696static int
1697iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1698			  struct ethtool_rxnfc *cmd)
1699{
1700	struct iavf_adv_rss *rss;
1701	u64 hash_flds;
1702	u32 hdrs;
1703
1704	if (!ADV_RSS_SUPPORT(adapter))
1705		return -EOPNOTSUPP;
1706
1707	cmd->data = 0;
1708
1709	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1710	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1711		return -EINVAL;
1712
1713	spin_lock_bh(&adapter->adv_rss_lock);
1714	rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1715	if (rss)
1716		hash_flds = rss->hash_flds;
1717	else
1718		hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1719	spin_unlock_bh(&adapter->adv_rss_lock);
1720
1721	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1722		return -EINVAL;
1723
1724	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1725			 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1726		cmd->data |= (u64)RXH_IP_SRC;
1727
1728	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1729			 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1730		cmd->data |= (u64)RXH_IP_DST;
1731
1732	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1733			 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1734			 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1735		cmd->data |= (u64)RXH_L4_B_0_1;
1736
1737	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1738			 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1739			 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1740		cmd->data |= (u64)RXH_L4_B_2_3;
1741
1742	return 0;
1743}
1744
1745/**
1746 * iavf_set_rxnfc - command to set Rx flow rules.
1747 * @netdev: network interface device structure
1748 * @cmd: ethtool rxnfc command
1749 *
1750 * Returns 0 for success and negative values for errors
1751 */
1752static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1753{
1754	struct iavf_adapter *adapter = netdev_priv(netdev);
1755	int ret = -EOPNOTSUPP;
1756
1757	switch (cmd->cmd) {
1758	case ETHTOOL_SRXCLSRLINS:
1759		ret = iavf_add_fdir_ethtool(adapter, cmd);
1760		break;
1761	case ETHTOOL_SRXCLSRLDEL:
1762		ret = iavf_del_fdir_ethtool(adapter, cmd);
1763		break;
1764	case ETHTOOL_SRXFH:
1765		ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1766		break;
1767	default:
1768		break;
1769	}
1770
1771	return ret;
1772}
1773
1774/**
1775 * iavf_get_rxnfc - command to get RX flow classification rules
1776 * @netdev: network interface device structure
1777 * @cmd: ethtool rxnfc command
1778 * @rule_locs: pointer to store rule locations
1779 *
1780 * Returns Success if the command is supported.
1781 **/
1782static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1783			  u32 *rule_locs)
1784{
1785	struct iavf_adapter *adapter = netdev_priv(netdev);
1786	int ret = -EOPNOTSUPP;
1787
1788	switch (cmd->cmd) {
1789	case ETHTOOL_GRXRINGS:
1790		cmd->data = adapter->num_active_queues;
1791		ret = 0;
1792		break;
1793	case ETHTOOL_GRXCLSRLCNT:
1794		if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1795			break;
1796		spin_lock_bh(&adapter->fdir_fltr_lock);
1797		cmd->rule_cnt = adapter->fdir_active_fltr;
1798		spin_unlock_bh(&adapter->fdir_fltr_lock);
1799		cmd->data = IAVF_MAX_FDIR_FILTERS;
1800		ret = 0;
1801		break;
1802	case ETHTOOL_GRXCLSRULE:
1803		ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1804		break;
1805	case ETHTOOL_GRXCLSRLALL:
1806		ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
1807		break;
1808	case ETHTOOL_GRXFH:
1809		ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1810		break;
1811	default:
1812		break;
1813	}
1814
1815	return ret;
1816}
1817/**
1818 * iavf_get_channels: get the number of channels supported by the device
1819 * @netdev: network interface device structure
1820 * @ch: channel information structure
1821 *
1822 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1823 * queue pair. Report one extra channel to match our "other" MSI-X vector.
1824 **/
1825static void iavf_get_channels(struct net_device *netdev,
1826			      struct ethtool_channels *ch)
1827{
1828	struct iavf_adapter *adapter = netdev_priv(netdev);
1829
1830	/* Report maximum channels */
1831	ch->max_combined = adapter->vsi_res->num_queue_pairs;
1832
1833	ch->max_other = NONQ_VECS;
1834	ch->other_count = NONQ_VECS;
1835
1836	ch->combined_count = adapter->num_active_queues;
1837}
1838
1839/**
1840 * iavf_set_channels: set the new channel count
1841 * @netdev: network interface device structure
1842 * @ch: channel information structure
1843 *
1844 * Negotiate a new number of channels with the PF then do a reset.  During
1845 * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
1846 * negative on failure.
1847 **/
1848static int iavf_set_channels(struct net_device *netdev,
1849			     struct ethtool_channels *ch)
1850{
1851	struct iavf_adapter *adapter = netdev_priv(netdev);
1852	u32 num_req = ch->combined_count;
1853	int ret = 0;
1854
1855	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1856	    adapter->num_tc) {
1857		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1858		return -EINVAL;
1859	}
1860
1861	/* All of these should have already been checked by ethtool before this
1862	 * even gets to us, but just to be sure.
1863	 */
1864	if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1865		return -EINVAL;
1866
1867	if (num_req == adapter->num_active_queues)
1868		return 0;
1869
1870	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1871		return -EINVAL;
1872
1873	adapter->num_req_queues = num_req;
1874	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1875	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
1876
1877	ret = iavf_wait_for_reset(adapter);
1878	if (ret)
1879		netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
1880
1881	return ret;
1882}
1883
1884/**
1885 * iavf_get_rxfh_key_size - get the RSS hash key size
1886 * @netdev: network interface device structure
1887 *
1888 * Returns the table size.
1889 **/
1890static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1891{
1892	struct iavf_adapter *adapter = netdev_priv(netdev);
1893
1894	return adapter->rss_key_size;
1895}
1896
1897/**
1898 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1899 * @netdev: network interface device structure
1900 *
1901 * Returns the table size.
1902 **/
1903static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1904{
1905	struct iavf_adapter *adapter = netdev_priv(netdev);
1906
1907	return adapter->rss_lut_size;
1908}
1909
1910/**
1911 * iavf_get_rxfh - get the rx flow hash indirection table
1912 * @netdev: network interface device structure
1913 * @indir: indirection table
1914 * @key: hash key
1915 * @hfunc: hash function in use
1916 *
1917 * Reads the indirection table directly from the hardware. Always returns 0.
1918 **/
1919static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1920			 u8 *hfunc)
1921{
1922	struct iavf_adapter *adapter = netdev_priv(netdev);
1923	u16 i;
1924
1925	if (hfunc)
1926		*hfunc = ETH_RSS_HASH_TOP;
1927	if (key)
1928		memcpy(key, adapter->rss_key, adapter->rss_key_size);
1929
1930	if (indir)
1931		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1932		for (i = 0; i < adapter->rss_lut_size; i++)
1933			indir[i] = (u32)adapter->rss_lut[i];
1934
1935	return 0;
1936}
1937
1938/**
1939 * iavf_set_rxfh - set the rx flow hash indirection table
1940 * @netdev: network interface device structure
1941 * @indir: indirection table
1942 * @key: hash key
1943 * @hfunc: hash function to use
1944 *
1945 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1946 * returns 0 after programming the table.
1947 **/
1948static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
1949			 const u8 *key, const u8 hfunc)
1950{
1951	struct iavf_adapter *adapter = netdev_priv(netdev);
1952	u16 i;
1953
1954	/* Only support toeplitz hash function */
1955	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1956		return -EOPNOTSUPP;
1957
1958	if (!key && !indir)
1959		return 0;
1960
1961	if (key)
1962		memcpy(adapter->rss_key, key, adapter->rss_key_size);
1963
1964	if (indir) {
1965		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1966		for (i = 0; i < adapter->rss_lut_size; i++)
1967			adapter->rss_lut[i] = (u8)(indir[i]);
1968	}
1969
1970	return iavf_config_rss(adapter);
1971}
1972
1973static const struct ethtool_ops iavf_ethtool_ops = {
1974	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1975				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1976	.get_drvinfo		= iavf_get_drvinfo,
1977	.get_link		= ethtool_op_get_link,
1978	.get_ringparam		= iavf_get_ringparam,
1979	.set_ringparam		= iavf_set_ringparam,
1980	.get_strings		= iavf_get_strings,
1981	.get_ethtool_stats	= iavf_get_ethtool_stats,
1982	.get_sset_count		= iavf_get_sset_count,
1983	.get_priv_flags		= iavf_get_priv_flags,
1984	.set_priv_flags		= iavf_set_priv_flags,
1985	.get_msglevel		= iavf_get_msglevel,
1986	.set_msglevel		= iavf_set_msglevel,
1987	.get_coalesce		= iavf_get_coalesce,
1988	.set_coalesce		= iavf_set_coalesce,
1989	.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1990	.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1991	.set_rxnfc		= iavf_set_rxnfc,
1992	.get_rxnfc		= iavf_get_rxnfc,
1993	.get_rxfh_indir_size	= iavf_get_rxfh_indir_size,
1994	.get_rxfh		= iavf_get_rxfh,
1995	.set_rxfh		= iavf_set_rxfh,
1996	.get_channels		= iavf_get_channels,
1997	.set_channels		= iavf_set_channels,
1998	.get_rxfh_key_size	= iavf_get_rxfh_key_size,
1999	.get_link_ksettings	= iavf_get_link_ksettings,
2000};
2001
2002/**
2003 * iavf_set_ethtool_ops - Initialize ethtool ops struct
2004 * @netdev: network interface device structure
2005 *
2006 * Sets ethtool ops struct in our netdev so that ethtool can call
2007 * our functions.
2008 **/
2009void iavf_set_ethtool_ops(struct net_device *netdev)
2010{
2011	netdev->ethtool_ops = &iavf_ethtool_ops;
2012}
2013