1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Cavium, Inc.
4 */
5
6/* ETHTOOL Support for VNIC_VF Device*/
7
8#include <linux/pci.h>
9#include <linux/net_tstamp.h>
10
11#include "nic_reg.h"
12#include "nic.h"
13#include "nicvf_queues.h"
14#include "q_struct.h"
15#include "thunder_bgx.h"
16#include "../common/cavium_ptp.h"
17
18#define DRV_NAME	"nicvf"
19
20struct nicvf_stat {
21	char name[ETH_GSTRING_LEN];
22	unsigned int index;
23};
24
25#define NICVF_HW_STAT(stat) { \
26	.name = #stat, \
27	.index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
28}
29
30#define NICVF_DRV_STAT(stat) { \
31	.name = #stat, \
32	.index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
33}
34
35static const struct nicvf_stat nicvf_hw_stats[] = {
36	NICVF_HW_STAT(rx_bytes),
37	NICVF_HW_STAT(rx_frames),
38	NICVF_HW_STAT(rx_ucast_frames),
39	NICVF_HW_STAT(rx_bcast_frames),
40	NICVF_HW_STAT(rx_mcast_frames),
41	NICVF_HW_STAT(rx_drops),
42	NICVF_HW_STAT(rx_drop_red),
43	NICVF_HW_STAT(rx_drop_red_bytes),
44	NICVF_HW_STAT(rx_drop_overrun),
45	NICVF_HW_STAT(rx_drop_overrun_bytes),
46	NICVF_HW_STAT(rx_drop_bcast),
47	NICVF_HW_STAT(rx_drop_mcast),
48	NICVF_HW_STAT(rx_drop_l3_bcast),
49	NICVF_HW_STAT(rx_drop_l3_mcast),
50	NICVF_HW_STAT(rx_fcs_errors),
51	NICVF_HW_STAT(rx_l2_errors),
52	NICVF_HW_STAT(tx_bytes),
53	NICVF_HW_STAT(tx_frames),
54	NICVF_HW_STAT(tx_ucast_frames),
55	NICVF_HW_STAT(tx_bcast_frames),
56	NICVF_HW_STAT(tx_mcast_frames),
57	NICVF_HW_STAT(tx_drops),
58};
59
60static const struct nicvf_stat nicvf_drv_stats[] = {
61	NICVF_DRV_STAT(rx_bgx_truncated_pkts),
62	NICVF_DRV_STAT(rx_jabber_errs),
63	NICVF_DRV_STAT(rx_fcs_errs),
64	NICVF_DRV_STAT(rx_bgx_errs),
65	NICVF_DRV_STAT(rx_prel2_errs),
66	NICVF_DRV_STAT(rx_l2_hdr_malformed),
67	NICVF_DRV_STAT(rx_oversize),
68	NICVF_DRV_STAT(rx_undersize),
69	NICVF_DRV_STAT(rx_l2_len_mismatch),
70	NICVF_DRV_STAT(rx_l2_pclp),
71	NICVF_DRV_STAT(rx_ip_ver_errs),
72	NICVF_DRV_STAT(rx_ip_csum_errs),
73	NICVF_DRV_STAT(rx_ip_hdr_malformed),
74	NICVF_DRV_STAT(rx_ip_payload_malformed),
75	NICVF_DRV_STAT(rx_ip_ttl_errs),
76	NICVF_DRV_STAT(rx_l3_pclp),
77	NICVF_DRV_STAT(rx_l4_malformed),
78	NICVF_DRV_STAT(rx_l4_csum_errs),
79	NICVF_DRV_STAT(rx_udp_len_errs),
80	NICVF_DRV_STAT(rx_l4_port_errs),
81	NICVF_DRV_STAT(rx_tcp_flag_errs),
82	NICVF_DRV_STAT(rx_tcp_offset_errs),
83	NICVF_DRV_STAT(rx_l4_pclp),
84	NICVF_DRV_STAT(rx_truncated_pkts),
85
86	NICVF_DRV_STAT(tx_desc_fault),
87	NICVF_DRV_STAT(tx_hdr_cons_err),
88	NICVF_DRV_STAT(tx_subdesc_err),
89	NICVF_DRV_STAT(tx_max_size_exceeded),
90	NICVF_DRV_STAT(tx_imm_size_oflow),
91	NICVF_DRV_STAT(tx_data_seq_err),
92	NICVF_DRV_STAT(tx_mem_seq_err),
93	NICVF_DRV_STAT(tx_lock_viol),
94	NICVF_DRV_STAT(tx_data_fault),
95	NICVF_DRV_STAT(tx_tstmp_conflict),
96	NICVF_DRV_STAT(tx_tstmp_timeout),
97	NICVF_DRV_STAT(tx_mem_fault),
98	NICVF_DRV_STAT(tx_csum_overlap),
99	NICVF_DRV_STAT(tx_csum_overflow),
100
101	NICVF_DRV_STAT(tx_tso),
102	NICVF_DRV_STAT(tx_timeout),
103	NICVF_DRV_STAT(txq_stop),
104	NICVF_DRV_STAT(txq_wake),
105	NICVF_DRV_STAT(rcv_buffer_alloc_failures),
106	NICVF_DRV_STAT(page_alloc),
107};
108
109static const struct nicvf_stat nicvf_queue_stats[] = {
110	{ "bytes", 0 },
111	{ "frames", 1 },
112};
113
114static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
115static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
116static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
117
118static int nicvf_get_link_ksettings(struct net_device *netdev,
119				    struct ethtool_link_ksettings *cmd)
120{
121	struct nicvf *nic = netdev_priv(netdev);
122	u32 supported, advertising;
123
124	supported = 0;
125	advertising = 0;
126
127	if (!nic->link_up) {
128		cmd->base.duplex = DUPLEX_UNKNOWN;
129		cmd->base.speed = SPEED_UNKNOWN;
130		return 0;
131	}
132
133	switch (nic->speed) {
134	case SPEED_1000:
135		cmd->base.port = PORT_MII | PORT_TP;
136		cmd->base.autoneg = AUTONEG_ENABLE;
137		supported |= SUPPORTED_MII | SUPPORTED_TP;
138		supported |= SUPPORTED_1000baseT_Full |
139				  SUPPORTED_1000baseT_Half |
140				  SUPPORTED_100baseT_Full  |
141				  SUPPORTED_100baseT_Half  |
142				  SUPPORTED_10baseT_Full   |
143				  SUPPORTED_10baseT_Half;
144		supported |= SUPPORTED_Autoneg;
145		advertising |= ADVERTISED_1000baseT_Full |
146				    ADVERTISED_1000baseT_Half |
147				    ADVERTISED_100baseT_Full  |
148				    ADVERTISED_100baseT_Half  |
149				    ADVERTISED_10baseT_Full   |
150				    ADVERTISED_10baseT_Half;
151		break;
152	case SPEED_10000:
153		if (nic->mac_type == BGX_MODE_RXAUI) {
154			cmd->base.port = PORT_TP;
155			supported |= SUPPORTED_TP;
156		} else {
157			cmd->base.port = PORT_FIBRE;
158			supported |= SUPPORTED_FIBRE;
159		}
160		cmd->base.autoneg = AUTONEG_DISABLE;
161		supported |= SUPPORTED_10000baseT_Full;
162		break;
163	case SPEED_40000:
164		cmd->base.port = PORT_FIBRE;
165		cmd->base.autoneg = AUTONEG_DISABLE;
166		supported |= SUPPORTED_FIBRE;
167		supported |= SUPPORTED_40000baseCR4_Full;
168		break;
169	}
170	cmd->base.duplex = nic->duplex;
171	cmd->base.speed = nic->speed;
172
173	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
174						supported);
175	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
176						advertising);
177
178	return 0;
179}
180
181static u32 nicvf_get_link(struct net_device *netdev)
182{
183	struct nicvf *nic = netdev_priv(netdev);
184
185	return nic->link_up;
186}
187
188static void nicvf_get_drvinfo(struct net_device *netdev,
189			      struct ethtool_drvinfo *info)
190{
191	struct nicvf *nic = netdev_priv(netdev);
192
193	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
194	strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
195}
196
197static u32 nicvf_get_msglevel(struct net_device *netdev)
198{
199	struct nicvf *nic = netdev_priv(netdev);
200
201	return nic->msg_enable;
202}
203
204static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
205{
206	struct nicvf *nic = netdev_priv(netdev);
207
208	nic->msg_enable = lvl;
209}
210
211static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
212{
213	int stats, qidx;
214	int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
215
216	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
217		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
218			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
219				nicvf_queue_stats[stats].name);
220			*data += ETH_GSTRING_LEN;
221		}
222	}
223
224	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
225		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
226			sprintf(*data, "txq%d: %s", qidx + start_qidx,
227				nicvf_queue_stats[stats].name);
228			*data += ETH_GSTRING_LEN;
229		}
230	}
231}
232
233static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
234{
235	struct nicvf *nic = netdev_priv(netdev);
236	int stats;
237	int sqs;
238
239	if (sset != ETH_SS_STATS)
240		return;
241
242	for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
243		memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
244		data += ETH_GSTRING_LEN;
245	}
246
247	for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
248		memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
249		data += ETH_GSTRING_LEN;
250	}
251
252	nicvf_get_qset_strings(nic, &data, 0);
253
254	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
255		if (!nic->snicvf[sqs])
256			continue;
257		nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
258	}
259
260	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
261		sprintf(data, "bgx_rxstat%d: ", stats);
262		data += ETH_GSTRING_LEN;
263	}
264
265	for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
266		sprintf(data, "bgx_txstat%d: ", stats);
267		data += ETH_GSTRING_LEN;
268	}
269}
270
271static int nicvf_get_sset_count(struct net_device *netdev, int sset)
272{
273	struct nicvf *nic = netdev_priv(netdev);
274	int qstats_count;
275	int sqs;
276
277	if (sset != ETH_SS_STATS)
278		return -EINVAL;
279
280	qstats_count = nicvf_n_queue_stats *
281		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
282	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
283		struct nicvf *snic;
284
285		snic = nic->snicvf[sqs];
286		if (!snic)
287			continue;
288		qstats_count += nicvf_n_queue_stats *
289				(snic->qs->rq_cnt + snic->qs->sq_cnt);
290	}
291
292	return nicvf_n_hw_stats + nicvf_n_drv_stats +
293		qstats_count +
294		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
295}
296
297static void nicvf_get_qset_stats(struct nicvf *nic,
298				 struct ethtool_stats *stats, u64 **data)
299{
300	int stat, qidx;
301
302	if (!nic)
303		return;
304
305	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
306		nicvf_update_rq_stats(nic, qidx);
307		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
308			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
309					[nicvf_queue_stats[stat].index];
310	}
311
312	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
313		nicvf_update_sq_stats(nic, qidx);
314		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
315			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
316					[nicvf_queue_stats[stat].index];
317	}
318}
319
320static void nicvf_get_ethtool_stats(struct net_device *netdev,
321				    struct ethtool_stats *stats, u64 *data)
322{
323	struct nicvf *nic = netdev_priv(netdev);
324	int stat, tmp_stats;
325	int sqs, cpu;
326
327	nicvf_update_stats(nic);
328
329	/* Update LMAC stats */
330	nicvf_update_lmac_stats(nic);
331
332	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
333		*(data++) = ((u64 *)&nic->hw_stats)
334				[nicvf_hw_stats[stat].index];
335	for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
336		tmp_stats = 0;
337		for_each_possible_cpu(cpu)
338			tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
339				     [nicvf_drv_stats[stat].index];
340		*(data++) = tmp_stats;
341	}
342
343	nicvf_get_qset_stats(nic, stats, &data);
344
345	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
346		if (!nic->snicvf[sqs])
347			continue;
348		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
349	}
350
351	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
352		*(data++) = nic->bgx_stats.rx_stats[stat];
353	for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
354		*(data++) = nic->bgx_stats.tx_stats[stat];
355}
356
357static int nicvf_get_regs_len(struct net_device *dev)
358{
359	return sizeof(u64) * NIC_VF_REG_COUNT;
360}
361
362static void nicvf_get_regs(struct net_device *dev,
363			   struct ethtool_regs *regs, void *reg)
364{
365	struct nicvf *nic = netdev_priv(dev);
366	u64 *p = (u64 *)reg;
367	u64 reg_offset;
368	int mbox, key, stat, q;
369	int i = 0;
370
371	regs->version = 0;
372	memset(p, 0, NIC_VF_REG_COUNT);
373
374	p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
375	/* Mailbox registers */
376	for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
377		p[i++] = nicvf_reg_read(nic,
378					NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
379
380	p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
381	p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
382	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
383	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
384	p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
385
386	for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
387		p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
388
389	/* Tx/Rx statistics */
390	for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
391		p[i++] = nicvf_reg_read(nic,
392					NIC_VNIC_TX_STAT_0_4 | (stat << 3));
393
394	for (i = 0; i < RX_STATS_ENUM_LAST; i++)
395		p[i++] = nicvf_reg_read(nic,
396					NIC_VNIC_RX_STAT_0_13 | (stat << 3));
397
398	p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
399
400	/* All completion queue's registers */
401	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
402		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
403		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
404		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
405		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
406		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
407		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
408		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
409		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
410		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
411		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
412	}
413
414	/* All receive queue's registers */
415	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
416		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
417		p[i++] = nicvf_queue_reg_read(nic,
418						  NIC_QSET_RQ_0_7_STAT_0_1, q);
419		reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
420		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
421	}
422
423	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
424		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
425		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
426		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
427		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
428		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
429		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
430		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
431		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
432		/* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
433		 * produces bus errors when read
434		 */
435		p[i++] = 0;
436		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
437		reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
438		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
439	}
440
441	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
442		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
443		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
444		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
445		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
446		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
447		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
448		p[i++] = nicvf_queue_reg_read(nic,
449					      NIC_QSET_RBDR_0_1_STATUS0, q);
450		p[i++] = nicvf_queue_reg_read(nic,
451					      NIC_QSET_RBDR_0_1_STATUS1, q);
452		reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
453		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
454	}
455}
456
457static int nicvf_get_coalesce(struct net_device *netdev,
458			      struct ethtool_coalesce *cmd)
459{
460	struct nicvf *nic = netdev_priv(netdev);
461
462	cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
463	return 0;
464}
465
466static void nicvf_get_ringparam(struct net_device *netdev,
467				struct ethtool_ringparam *ring)
468{
469	struct nicvf *nic = netdev_priv(netdev);
470	struct queue_set *qs = nic->qs;
471
472	ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
473	ring->rx_pending = qs->cq_len;
474	ring->tx_max_pending = MAX_SND_QUEUE_LEN;
475	ring->tx_pending = qs->sq_len;
476}
477
478static int nicvf_set_ringparam(struct net_device *netdev,
479			       struct ethtool_ringparam *ring)
480{
481	struct nicvf *nic = netdev_priv(netdev);
482	struct queue_set *qs = nic->qs;
483	u32 rx_count, tx_count;
484
485	/* Due to HW errata this is not supported on T88 pass 1.x silicon */
486	if (pass1_silicon(nic->pdev))
487		return -EINVAL;
488
489	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
490		return -EINVAL;
491
492	tx_count = clamp_t(u32, ring->tx_pending,
493			   MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
494	rx_count = clamp_t(u32, ring->rx_pending,
495			   MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
496
497	if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
498		return 0;
499
500	/* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
501	qs->sq_len = rounddown_pow_of_two(tx_count);
502	qs->cq_len = rounddown_pow_of_two(rx_count);
503
504	if (netif_running(netdev)) {
505		nicvf_stop(netdev);
506		nicvf_open(netdev);
507	}
508
509	return 0;
510}
511
512static int nicvf_get_rss_hash_opts(struct nicvf *nic,
513				   struct ethtool_rxnfc *info)
514{
515	info->data = 0;
516
517	switch (info->flow_type) {
518	case TCP_V4_FLOW:
519	case TCP_V6_FLOW:
520	case UDP_V4_FLOW:
521	case UDP_V6_FLOW:
522	case SCTP_V4_FLOW:
523	case SCTP_V6_FLOW:
524		info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
525		fallthrough;
526	case IPV4_FLOW:
527	case IPV6_FLOW:
528		info->data |= RXH_IP_SRC | RXH_IP_DST;
529		break;
530	default:
531		return -EINVAL;
532	}
533
534	return 0;
535}
536
537static int nicvf_get_rxnfc(struct net_device *dev,
538			   struct ethtool_rxnfc *info, u32 *rules)
539{
540	struct nicvf *nic = netdev_priv(dev);
541	int ret = -EOPNOTSUPP;
542
543	switch (info->cmd) {
544	case ETHTOOL_GRXRINGS:
545		info->data = nic->rx_queues;
546		ret = 0;
547		break;
548	case ETHTOOL_GRXFH:
549		return nicvf_get_rss_hash_opts(nic, info);
550	default:
551		break;
552	}
553	return ret;
554}
555
556static int nicvf_set_rss_hash_opts(struct nicvf *nic,
557				   struct ethtool_rxnfc *info)
558{
559	struct nicvf_rss_info *rss = &nic->rss_info;
560	u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
561
562	if (!rss->enable)
563		netdev_err(nic->netdev,
564			   "RSS is disabled, hash cannot be set\n");
565
566	netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
567		    info->flow_type, info->data);
568
569	if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
570		return -EINVAL;
571
572	switch (info->flow_type) {
573	case TCP_V4_FLOW:
574	case TCP_V6_FLOW:
575		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
576		case 0:
577			rss_cfg &= ~(1ULL << RSS_HASH_TCP);
578			break;
579		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
580			rss_cfg |= (1ULL << RSS_HASH_TCP);
581			break;
582		default:
583			return -EINVAL;
584		}
585		break;
586	case UDP_V4_FLOW:
587	case UDP_V6_FLOW:
588		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
589		case 0:
590			rss_cfg &= ~(1ULL << RSS_HASH_UDP);
591			break;
592		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
593			rss_cfg |= (1ULL << RSS_HASH_UDP);
594			break;
595		default:
596			return -EINVAL;
597		}
598		break;
599	case SCTP_V4_FLOW:
600	case SCTP_V6_FLOW:
601		switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
602		case 0:
603			rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
604			break;
605		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
606			rss_cfg |= (1ULL << RSS_HASH_L4ETC);
607			break;
608		default:
609			return -EINVAL;
610		}
611		break;
612	case IPV4_FLOW:
613	case IPV6_FLOW:
614		rss_cfg = RSS_HASH_IP;
615		break;
616	default:
617		return -EINVAL;
618	}
619
620	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
621	return 0;
622}
623
624static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
625{
626	struct nicvf *nic = netdev_priv(dev);
627
628	switch (info->cmd) {
629	case ETHTOOL_SRXFH:
630		return nicvf_set_rss_hash_opts(nic, info);
631	default:
632		break;
633	}
634	return -EOPNOTSUPP;
635}
636
637static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
638{
639	return RSS_HASH_KEY_SIZE * sizeof(u64);
640}
641
642static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
643{
644	struct nicvf *nic = netdev_priv(dev);
645
646	return nic->rss_info.rss_size;
647}
648
649static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
650			  u8 *hfunc)
651{
652	struct nicvf *nic = netdev_priv(dev);
653	struct nicvf_rss_info *rss = &nic->rss_info;
654	int idx;
655
656	if (indir) {
657		for (idx = 0; idx < rss->rss_size; idx++)
658			indir[idx] = rss->ind_tbl[idx];
659	}
660
661	if (hkey)
662		memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
663
664	if (hfunc)
665		*hfunc = ETH_RSS_HASH_TOP;
666
667	return 0;
668}
669
670static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
671			  const u8 *hkey, const u8 hfunc)
672{
673	struct nicvf *nic = netdev_priv(dev);
674	struct nicvf_rss_info *rss = &nic->rss_info;
675	int idx;
676
677	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
678		return -EOPNOTSUPP;
679
680	if (!rss->enable) {
681		netdev_err(nic->netdev,
682			   "RSS is disabled, cannot change settings\n");
683		return -EIO;
684	}
685
686	if (indir) {
687		for (idx = 0; idx < rss->rss_size; idx++)
688			rss->ind_tbl[idx] = indir[idx];
689	}
690
691	if (hkey) {
692		memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
693		nicvf_set_rss_key(nic);
694	}
695
696	nicvf_config_rss(nic);
697	return 0;
698}
699
700/* Get no of queues device supports and current queue count */
701static void nicvf_get_channels(struct net_device *dev,
702			       struct ethtool_channels *channel)
703{
704	struct nicvf *nic = netdev_priv(dev);
705
706	memset(channel, 0, sizeof(*channel));
707
708	channel->max_rx = nic->max_queues;
709	channel->max_tx = nic->max_queues;
710
711	channel->rx_count = nic->rx_queues;
712	channel->tx_count = nic->tx_queues;
713}
714
715/* Set no of Tx, Rx queues to be used */
716static int nicvf_set_channels(struct net_device *dev,
717			      struct ethtool_channels *channel)
718{
719	struct nicvf *nic = netdev_priv(dev);
720	int err = 0;
721	bool if_up = netif_running(dev);
722	u8 cqcount, txq_count;
723
724	if (!channel->rx_count || !channel->tx_count)
725		return -EINVAL;
726	if (channel->rx_count > nic->max_queues)
727		return -EINVAL;
728	if (channel->tx_count > nic->max_queues)
729		return -EINVAL;
730
731	if (nic->xdp_prog &&
732	    ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
733		netdev_err(nic->netdev,
734			   "XDP mode, RXQs + TXQs > Max %d\n",
735			   nic->max_queues);
736		return -EINVAL;
737	}
738
739	if (if_up)
740		nicvf_stop(dev);
741
742	nic->rx_queues = channel->rx_count;
743	nic->tx_queues = channel->tx_count;
744	if (!nic->xdp_prog)
745		nic->xdp_tx_queues = 0;
746	else
747		nic->xdp_tx_queues = channel->rx_count;
748
749	txq_count = nic->xdp_tx_queues + nic->tx_queues;
750	cqcount = max(nic->rx_queues, txq_count);
751
752	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
753		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
754		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
755	} else {
756		nic->sqs_count = 0;
757	}
758
759	nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
760	nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
761	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
762
763	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
764	if (err)
765		return err;
766
767	if (if_up)
768		nicvf_open(dev);
769
770	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
771		    nic->tx_queues, nic->rx_queues);
772
773	return err;
774}
775
776static void nicvf_get_pauseparam(struct net_device *dev,
777				 struct ethtool_pauseparam *pause)
778{
779	struct nicvf *nic = netdev_priv(dev);
780	union nic_mbx mbx = {};
781
782	/* Supported only for 10G/40G interfaces */
783	if ((nic->mac_type == BGX_MODE_SGMII) ||
784	    (nic->mac_type == BGX_MODE_QSGMII) ||
785	    (nic->mac_type == BGX_MODE_RGMII))
786		return;
787
788	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
789	mbx.pfc.get = 1;
790	if (!nicvf_send_msg_to_pf(nic, &mbx)) {
791		pause->autoneg = nic->pfc.autoneg;
792		pause->rx_pause = nic->pfc.fc_rx;
793		pause->tx_pause = nic->pfc.fc_tx;
794	}
795}
796
797static int nicvf_set_pauseparam(struct net_device *dev,
798				struct ethtool_pauseparam *pause)
799{
800	struct nicvf *nic = netdev_priv(dev);
801	union nic_mbx mbx = {};
802
803	/* Supported only for 10G/40G interfaces */
804	if ((nic->mac_type == BGX_MODE_SGMII) ||
805	    (nic->mac_type == BGX_MODE_QSGMII) ||
806	    (nic->mac_type == BGX_MODE_RGMII))
807		return -EOPNOTSUPP;
808
809	if (pause->autoneg)
810		return -EOPNOTSUPP;
811
812	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
813	mbx.pfc.get = 0;
814	mbx.pfc.fc_rx = pause->rx_pause;
815	mbx.pfc.fc_tx = pause->tx_pause;
816	if (nicvf_send_msg_to_pf(nic, &mbx))
817		return -EAGAIN;
818
819	nic->pfc.fc_rx = pause->rx_pause;
820	nic->pfc.fc_tx = pause->tx_pause;
821
822	return 0;
823}
824
825static int nicvf_get_ts_info(struct net_device *netdev,
826			     struct ethtool_ts_info *info)
827{
828	struct nicvf *nic = netdev_priv(netdev);
829
830	if (!nic->ptp_clock)
831		return ethtool_op_get_ts_info(netdev, info);
832
833	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
834				SOF_TIMESTAMPING_RX_SOFTWARE |
835				SOF_TIMESTAMPING_SOFTWARE |
836				SOF_TIMESTAMPING_TX_HARDWARE |
837				SOF_TIMESTAMPING_RX_HARDWARE |
838				SOF_TIMESTAMPING_RAW_HARDWARE;
839
840	info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
841
842	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
843
844	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
845			   (1 << HWTSTAMP_FILTER_ALL);
846
847	return 0;
848}
849
850static const struct ethtool_ops nicvf_ethtool_ops = {
851	.get_link		= nicvf_get_link,
852	.get_drvinfo		= nicvf_get_drvinfo,
853	.get_msglevel		= nicvf_get_msglevel,
854	.set_msglevel		= nicvf_set_msglevel,
855	.get_strings		= nicvf_get_strings,
856	.get_sset_count		= nicvf_get_sset_count,
857	.get_ethtool_stats	= nicvf_get_ethtool_stats,
858	.get_regs_len		= nicvf_get_regs_len,
859	.get_regs		= nicvf_get_regs,
860	.get_coalesce		= nicvf_get_coalesce,
861	.get_ringparam		= nicvf_get_ringparam,
862	.set_ringparam		= nicvf_set_ringparam,
863	.get_rxnfc		= nicvf_get_rxnfc,
864	.set_rxnfc		= nicvf_set_rxnfc,
865	.get_rxfh_key_size	= nicvf_get_rxfh_key_size,
866	.get_rxfh_indir_size	= nicvf_get_rxfh_indir_size,
867	.get_rxfh		= nicvf_get_rxfh,
868	.set_rxfh		= nicvf_set_rxfh,
869	.get_channels		= nicvf_get_channels,
870	.set_channels		= nicvf_set_channels,
871	.get_pauseparam         = nicvf_get_pauseparam,
872	.set_pauseparam         = nicvf_set_pauseparam,
873	.get_ts_info		= nicvf_get_ts_info,
874	.get_link_ksettings	= nicvf_get_link_ksettings,
875};
876
877void nicvf_set_ethtool_ops(struct net_device *netdev)
878{
879	netdev->ethtool_ops = &nicvf_ethtool_ops;
880}
881