1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Broadcom Starfighter 2 DSA switch CFP support
4 *
5 * Copyright (C) 2016, Broadcom
6 */
7
8#include <linux/list.h>
9#include <linux/ethtool.h>
10#include <linux/if_ether.h>
11#include <linux/in.h>
12#include <linux/netdevice.h>
13#include <net/dsa.h>
14#include <linux/bitmap.h>
15#include <net/flow_offload.h>
16#include <net/switchdev.h>
17#include <uapi/linux/if_bridge.h>
18
19#include "bcm_sf2.h"
20#include "bcm_sf2_regs.h"
21
22struct cfp_rule {
23	int port;
24	struct ethtool_rx_flow_spec fs;
25	struct list_head next;
26};
27
28struct cfp_udf_slice_layout {
29	u8 slices[UDFS_PER_SLICE];
30	u32 mask_value;
31	u32 base_offset;
32};
33
34struct cfp_udf_layout {
35	struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
36};
37
38static const u8 zero_slice[UDFS_PER_SLICE] = { };
39
40/* UDF slices layout for a TCPv4/UDPv4 specification */
41static const struct cfp_udf_layout udf_tcpip4_layout = {
42	.udfs = {
43		[1] = {
44			.slices = {
45				/* End of L2, byte offset 12, src IP[0:15] */
46				CFG_UDF_EOL2 | 6,
47				/* End of L2, byte offset 14, src IP[16:31] */
48				CFG_UDF_EOL2 | 7,
49				/* End of L2, byte offset 16, dst IP[0:15] */
50				CFG_UDF_EOL2 | 8,
51				/* End of L2, byte offset 18, dst IP[16:31] */
52				CFG_UDF_EOL2 | 9,
53				/* End of L3, byte offset 0, src port */
54				CFG_UDF_EOL3 | 0,
55				/* End of L3, byte offset 2, dst port */
56				CFG_UDF_EOL3 | 1,
57				0, 0, 0
58			},
59			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
60			.base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
61		},
62	},
63};
64
65/* UDF slices layout for a TCPv6/UDPv6 specification */
66static const struct cfp_udf_layout udf_tcpip6_layout = {
67	.udfs = {
68		[0] = {
69			.slices = {
70				/* End of L2, byte offset 8, src IP[0:15] */
71				CFG_UDF_EOL2 | 4,
72				/* End of L2, byte offset 10, src IP[16:31] */
73				CFG_UDF_EOL2 | 5,
74				/* End of L2, byte offset 12, src IP[32:47] */
75				CFG_UDF_EOL2 | 6,
76				/* End of L2, byte offset 14, src IP[48:63] */
77				CFG_UDF_EOL2 | 7,
78				/* End of L2, byte offset 16, src IP[64:79] */
79				CFG_UDF_EOL2 | 8,
80				/* End of L2, byte offset 18, src IP[80:95] */
81				CFG_UDF_EOL2 | 9,
82				/* End of L2, byte offset 20, src IP[96:111] */
83				CFG_UDF_EOL2 | 10,
84				/* End of L2, byte offset 22, src IP[112:127] */
85				CFG_UDF_EOL2 | 11,
86				/* End of L3, byte offset 0, src port */
87				CFG_UDF_EOL3 | 0,
88			},
89			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
90			.base_offset = CORE_UDF_0_B_0_8_PORT_0,
91		},
92		[3] = {
93			.slices = {
94				/* End of L2, byte offset 24, dst IP[0:15] */
95				CFG_UDF_EOL2 | 12,
96				/* End of L2, byte offset 26, dst IP[16:31] */
97				CFG_UDF_EOL2 | 13,
98				/* End of L2, byte offset 28, dst IP[32:47] */
99				CFG_UDF_EOL2 | 14,
100				/* End of L2, byte offset 30, dst IP[48:63] */
101				CFG_UDF_EOL2 | 15,
102				/* End of L2, byte offset 32, dst IP[64:79] */
103				CFG_UDF_EOL2 | 16,
104				/* End of L2, byte offset 34, dst IP[80:95] */
105				CFG_UDF_EOL2 | 17,
106				/* End of L2, byte offset 36, dst IP[96:111] */
107				CFG_UDF_EOL2 | 18,
108				/* End of L2, byte offset 38, dst IP[112:127] */
109				CFG_UDF_EOL2 | 19,
110				/* End of L3, byte offset 2, dst port */
111				CFG_UDF_EOL3 | 1,
112			},
113			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
114			.base_offset = CORE_UDF_0_D_0_11_PORT_0,
115		},
116	},
117};
118
119static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
120{
121	unsigned int i, count = 0;
122
123	for (i = 0; i < UDFS_PER_SLICE; i++) {
124		if (layout[i] != 0)
125			count++;
126	}
127
128	return count;
129}
130
131static inline u32 udf_upper_bits(int num_udf)
132{
133	return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
134}
135
136static inline u32 udf_lower_bits(int num_udf)
137{
138	return (u8)GENMASK(num_udf - 1, 0);
139}
140
141static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
142					     unsigned int start)
143{
144	const struct cfp_udf_slice_layout *slice_layout;
145	unsigned int slice_idx;
146
147	for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
148		slice_layout = &l->udfs[slice_idx];
149		if (memcmp(slice_layout->slices, zero_slice,
150			   sizeof(zero_slice)))
151			break;
152	}
153
154	return slice_idx;
155}
156
157static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
158				const struct cfp_udf_layout *layout,
159				unsigned int slice_num)
160{
161	u32 offset = layout->udfs[slice_num].base_offset;
162	unsigned int i;
163
164	for (i = 0; i < UDFS_PER_SLICE; i++)
165		core_writel(priv, layout->udfs[slice_num].slices[i],
166			    offset + i * 4);
167}
168
169static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
170{
171	unsigned int timeout = 1000;
172	u32 reg;
173
174	reg = core_readl(priv, CORE_CFP_ACC);
175	reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
176	reg |= OP_STR_DONE | op;
177	core_writel(priv, reg, CORE_CFP_ACC);
178
179	do {
180		reg = core_readl(priv, CORE_CFP_ACC);
181		if (!(reg & OP_STR_DONE))
182			break;
183
184		cpu_relax();
185	} while (timeout--);
186
187	if (!timeout)
188		return -ETIMEDOUT;
189
190	return 0;
191}
192
193static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
194					     unsigned int addr)
195{
196	u32 reg;
197
198	WARN_ON(addr >= priv->num_cfp_rules);
199
200	reg = core_readl(priv, CORE_CFP_ACC);
201	reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
202	reg |= addr << XCESS_ADDR_SHIFT;
203	core_writel(priv, reg, CORE_CFP_ACC);
204}
205
206static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
207{
208	/* Entry #0 is reserved */
209	return priv->num_cfp_rules - 1;
210}
211
212static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
213				   unsigned int rule_index,
214				   int src_port,
215				   unsigned int port_num,
216				   unsigned int queue_num,
217				   bool fwd_map_change)
218{
219	int ret;
220	u32 reg;
221
222	/* Replace ARL derived destination with DST_MAP derived, define
223	 * which port and queue this should be forwarded to.
224	 */
225	if (fwd_map_change)
226		reg = CHANGE_FWRD_MAP_IB_REP_ARL |
227		      BIT(port_num + DST_MAP_IB_SHIFT) |
228		      CHANGE_TC | queue_num << NEW_TC_SHIFT;
229	else
230		reg = 0;
231
232	/* Enable looping back to the original port */
233	if (src_port == port_num)
234		reg |= LOOP_BK_EN;
235
236	core_writel(priv, reg, CORE_ACT_POL_DATA0);
237
238	/* Set classification ID that needs to be put in Broadcom tag */
239	core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
240
241	core_writel(priv, 0, CORE_ACT_POL_DATA2);
242
243	/* Configure policer RAM now */
244	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
245	if (ret) {
246		pr_err("Policer entry at %d failed\n", rule_index);
247		return ret;
248	}
249
250	/* Disable the policer */
251	core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
252
253	/* Now the rate meter */
254	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
255	if (ret) {
256		pr_err("Meter entry at %d failed\n", rule_index);
257		return ret;
258	}
259
260	return 0;
261}
262
263static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
264				   struct flow_dissector_key_ipv4_addrs *addrs,
265				   struct flow_dissector_key_ports *ports,
266				   const __be16 vlan_tci,
267				   unsigned int slice_num, u8 num_udf,
268				   bool mask)
269{
270	u32 reg, offset;
271
272	/* UDF_Valid[7:0]	[31:24]
273	 * S-Tag		[23:8]
274	 * C-Tag		[7:0]
275	 */
276	reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
277	if (mask)
278		core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
279	else
280		core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
281
282	/* C-Tag		[31:24]
283	 * UDF_n_A8		[23:8]
284	 * UDF_n_A7		[7:0]
285	 */
286	reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
287	if (mask)
288		offset = CORE_CFP_MASK_PORT(4);
289	else
290		offset = CORE_CFP_DATA_PORT(4);
291	core_writel(priv, reg, offset);
292
293	/* UDF_n_A7		[31:24]
294	 * UDF_n_A6		[23:8]
295	 * UDF_n_A5		[7:0]
296	 */
297	reg = be16_to_cpu(ports->dst) >> 8;
298	if (mask)
299		offset = CORE_CFP_MASK_PORT(3);
300	else
301		offset = CORE_CFP_DATA_PORT(3);
302	core_writel(priv, reg, offset);
303
304	/* UDF_n_A5		[31:24]
305	 * UDF_n_A4		[23:8]
306	 * UDF_n_A3		[7:0]
307	 */
308	reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
309	      (u32)be16_to_cpu(ports->src) << 8 |
310	      (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
311	if (mask)
312		offset = CORE_CFP_MASK_PORT(2);
313	else
314		offset = CORE_CFP_DATA_PORT(2);
315	core_writel(priv, reg, offset);
316
317	/* UDF_n_A3		[31:24]
318	 * UDF_n_A2		[23:8]
319	 * UDF_n_A1		[7:0]
320	 */
321	reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
322	      (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
323	      (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
324	if (mask)
325		offset = CORE_CFP_MASK_PORT(1);
326	else
327		offset = CORE_CFP_DATA_PORT(1);
328	core_writel(priv, reg, offset);
329
330	/* UDF_n_A1		[31:24]
331	 * UDF_n_A0		[23:8]
332	 * Reserved		[7:4]
333	 * Slice ID		[3:2]
334	 * Slice valid		[1:0]
335	 */
336	reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
337	      (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
338	      SLICE_NUM(slice_num) | SLICE_VALID;
339	if (mask)
340		offset = CORE_CFP_MASK_PORT(0);
341	else
342		offset = CORE_CFP_DATA_PORT(0);
343	core_writel(priv, reg, offset);
344}
345
346static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
347				     unsigned int port_num,
348				     unsigned int queue_num,
349				     struct ethtool_rx_flow_spec *fs)
350{
351	__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
352	struct ethtool_rx_flow_spec_input input = {};
353	const struct cfp_udf_layout *layout;
354	unsigned int slice_num, rule_index;
355	struct ethtool_rx_flow_rule *flow;
356	struct flow_match_ipv4_addrs ipv4;
357	struct flow_match_ports ports;
358	struct flow_match_ip ip;
359	u8 ip_proto, ip_frag;
360	u8 num_udf;
361	u32 reg;
362	int ret;
363
364	switch (fs->flow_type & ~FLOW_EXT) {
365	case TCP_V4_FLOW:
366		ip_proto = IPPROTO_TCP;
367		break;
368	case UDP_V4_FLOW:
369		ip_proto = IPPROTO_UDP;
370		break;
371	default:
372		return -EINVAL;
373	}
374
375	ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
376
377	/* Extract VLAN TCI */
378	if (fs->flow_type & FLOW_EXT) {
379		vlan_tci = fs->h_ext.vlan_tci;
380		vlan_m_tci = fs->m_ext.vlan_tci;
381	}
382
383	/* Locate the first rule available */
384	if (fs->location == RX_CLS_LOC_ANY)
385		rule_index = find_first_zero_bit(priv->cfp.used,
386						 priv->num_cfp_rules);
387	else
388		rule_index = fs->location;
389
390	if (rule_index > bcm_sf2_cfp_rule_size(priv))
391		return -ENOSPC;
392
393	input.fs = fs;
394	flow = ethtool_rx_flow_rule_create(&input);
395	if (IS_ERR(flow))
396		return PTR_ERR(flow);
397
398	flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
399	flow_rule_match_ports(flow->rule, &ports);
400	flow_rule_match_ip(flow->rule, &ip);
401
402	layout = &udf_tcpip4_layout;
403	/* We only use one UDF slice for now */
404	slice_num = bcm_sf2_get_slice_number(layout, 0);
405	if (slice_num == UDF_NUM_SLICES) {
406		ret = -EINVAL;
407		goto out_err_flow_rule;
408	}
409
410	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
411
412	/* Apply the UDF layout for this filter */
413	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
414
415	/* Apply to all packets received through this port */
416	core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
417
418	/* Source port map match */
419	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
420
421	/* S-Tag status		[31:30]
422	 * C-Tag status		[29:28]
423	 * L2 framing		[27:26]
424	 * L3 framing		[25:24]
425	 * IP ToS		[23:16]
426	 * IP proto		[15:08]
427	 * IP Fragm		[7]
428	 * Non 1st frag		[6]
429	 * IP Authen		[5]
430	 * TTL range		[4:3]
431	 * PPPoE session	[2]
432	 * Reserved		[1]
433	 * UDF_Valid[8]		[0]
434	 */
435	core_writel(priv, ip.key->tos << IPTOS_SHIFT |
436		    ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
437		    udf_upper_bits(num_udf),
438		    CORE_CFP_DATA_PORT(6));
439
440	/* Mask with the specific layout for IPv4 packets */
441	core_writel(priv, layout->udfs[slice_num].mask_value |
442		    udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
443
444	/* Program the match and the mask */
445	bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
446			       slice_num, num_udf, false);
447	bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
448			       SLICE_NUM_MASK, num_udf, true);
449
450	/* Insert into TCAM now */
451	bcm_sf2_cfp_rule_addr_set(priv, rule_index);
452
453	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
454	if (ret) {
455		pr_err("TCAM entry at addr %d failed\n", rule_index);
456		goto out_err_flow_rule;
457	}
458
459	/* Insert into Action and policer RAMs now */
460	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
461				      queue_num, true);
462	if (ret)
463		goto out_err_flow_rule;
464
465	/* Turn on CFP for this rule now */
466	reg = core_readl(priv, CORE_CFP_CTL_REG);
467	reg |= BIT(port);
468	core_writel(priv, reg, CORE_CFP_CTL_REG);
469
470	/* Flag the rule as being used and return it */
471	set_bit(rule_index, priv->cfp.used);
472	set_bit(rule_index, priv->cfp.unique);
473	fs->location = rule_index;
474
475	return 0;
476
477out_err_flow_rule:
478	ethtool_rx_flow_rule_destroy(flow);
479	return ret;
480}
481
482static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
483				   const __be32 *ip6_addr, const __be16 port,
484				   const __be16 vlan_tci,
485				   unsigned int slice_num, u32 udf_bits,
486				   bool mask)
487{
488	u32 reg, tmp, val, offset;
489
490	/* UDF_Valid[7:0]	[31:24]
491	 * S-Tag		[23:8]
492	 * C-Tag		[7:0]
493	 */
494	reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
495	if (mask)
496		core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
497	else
498		core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
499
500	/* C-Tag		[31:24]
501	 * UDF_n_B8		[23:8]	(port)
502	 * UDF_n_B7 (upper)	[7:0]	(addr[15:8])
503	 */
504	reg = be32_to_cpu(ip6_addr[3]);
505	val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
506	val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
507	if (mask)
508		offset = CORE_CFP_MASK_PORT(4);
509	else
510		offset = CORE_CFP_DATA_PORT(4);
511	core_writel(priv, val, offset);
512
513	/* UDF_n_B7 (lower)	[31:24]	(addr[7:0])
514	 * UDF_n_B6		[23:8] (addr[31:16])
515	 * UDF_n_B5 (upper)	[7:0] (addr[47:40])
516	 */
517	tmp = be32_to_cpu(ip6_addr[2]);
518	val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
519	      ((tmp >> 8) & 0xff);
520	if (mask)
521		offset = CORE_CFP_MASK_PORT(3);
522	else
523		offset = CORE_CFP_DATA_PORT(3);
524	core_writel(priv, val, offset);
525
526	/* UDF_n_B5 (lower)	[31:24] (addr[39:32])
527	 * UDF_n_B4		[23:8] (addr[63:48])
528	 * UDF_n_B3 (upper)	[7:0] (addr[79:72])
529	 */
530	reg = be32_to_cpu(ip6_addr[1]);
531	val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
532	      ((reg >> 8) & 0xff);
533	if (mask)
534		offset = CORE_CFP_MASK_PORT(2);
535	else
536		offset = CORE_CFP_DATA_PORT(2);
537	core_writel(priv, val, offset);
538
539	/* UDF_n_B3 (lower)	[31:24] (addr[71:64])
540	 * UDF_n_B2		[23:8] (addr[95:80])
541	 * UDF_n_B1 (upper)	[7:0] (addr[111:104])
542	 */
543	tmp = be32_to_cpu(ip6_addr[0]);
544	val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
545	      ((tmp >> 8) & 0xff);
546	if (mask)
547		offset = CORE_CFP_MASK_PORT(1);
548	else
549		offset = CORE_CFP_DATA_PORT(1);
550	core_writel(priv, val, offset);
551
552	/* UDF_n_B1 (lower)	[31:24] (addr[103:96])
553	 * UDF_n_B0		[23:8] (addr[127:112])
554	 * Reserved		[7:4]
555	 * Slice ID		[3:2]
556	 * Slice valid		[1:0]
557	 */
558	reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
559	       SLICE_NUM(slice_num) | SLICE_VALID;
560	if (mask)
561		offset = CORE_CFP_MASK_PORT(0);
562	else
563		offset = CORE_CFP_DATA_PORT(0);
564	core_writel(priv, reg, offset);
565}
566
567static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
568					      int port, u32 location)
569{
570	struct cfp_rule *rule;
571
572	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
573		if (rule->port == port && rule->fs.location == location)
574			return rule;
575	}
576
577	return NULL;
578}
579
580static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
581				struct ethtool_rx_flow_spec *fs)
582{
583	struct cfp_rule *rule = NULL;
584	size_t fs_size = 0;
585	int ret = 1;
586
587	if (list_empty(&priv->cfp.rules_list))
588		return ret;
589
590	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
591		ret = 1;
592		if (rule->port != port)
593			continue;
594
595		if (rule->fs.flow_type != fs->flow_type ||
596		    rule->fs.ring_cookie != fs->ring_cookie ||
597		    rule->fs.h_ext.data[0] != fs->h_ext.data[0])
598			continue;
599
600		switch (fs->flow_type & ~FLOW_EXT) {
601		case TCP_V6_FLOW:
602		case UDP_V6_FLOW:
603			fs_size = sizeof(struct ethtool_tcpip6_spec);
604			break;
605		case TCP_V4_FLOW:
606		case UDP_V4_FLOW:
607			fs_size = sizeof(struct ethtool_tcpip4_spec);
608			break;
609		default:
610			continue;
611		}
612
613		ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
614		ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
615		/* Compare VLAN TCI values as well */
616		if (rule->fs.flow_type & FLOW_EXT) {
617			ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
618			ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
619		}
620		if (ret == 0)
621			break;
622	}
623
624	return ret;
625}
626
627static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
628				     unsigned int port_num,
629				     unsigned int queue_num,
630				     struct ethtool_rx_flow_spec *fs)
631{
632	__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
633	struct ethtool_rx_flow_spec_input input = {};
634	unsigned int slice_num, rule_index[2];
635	const struct cfp_udf_layout *layout;
636	struct ethtool_rx_flow_rule *flow;
637	struct flow_match_ipv6_addrs ipv6;
638	struct flow_match_ports ports;
639	u8 ip_proto, ip_frag;
640	int ret = 0;
641	u8 num_udf;
642	u32 reg;
643
644	switch (fs->flow_type & ~FLOW_EXT) {
645	case TCP_V6_FLOW:
646		ip_proto = IPPROTO_TCP;
647		break;
648	case UDP_V6_FLOW:
649		ip_proto = IPPROTO_UDP;
650		break;
651	default:
652		return -EINVAL;
653	}
654
655	ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
656
657	/* Extract VLAN TCI */
658	if (fs->flow_type & FLOW_EXT) {
659		vlan_tci = fs->h_ext.vlan_tci;
660		vlan_m_tci = fs->m_ext.vlan_tci;
661	}
662
663	layout = &udf_tcpip6_layout;
664	slice_num = bcm_sf2_get_slice_number(layout, 0);
665	if (slice_num == UDF_NUM_SLICES)
666		return -EINVAL;
667
668	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
669
670	/* Negotiate two indexes, one for the second half which we are chained
671	 * from, which is what we will return to user-space, and a second one
672	 * which is used to store its first half. That first half does not
673	 * allow any choice of placement, so it just needs to find the next
674	 * available bit. We return the second half as fs->location because
675	 * that helps with the rule lookup later on since the second half is
676	 * chained from its first half, we can easily identify IPv6 CFP rules
677	 * by looking whether they carry a CHAIN_ID.
678	 *
679	 * We also want the second half to have a lower rule_index than its
680	 * first half because the HW search is by incrementing addresses.
681	 */
682	if (fs->location == RX_CLS_LOC_ANY)
683		rule_index[1] = find_first_zero_bit(priv->cfp.used,
684						    priv->num_cfp_rules);
685	else
686		rule_index[1] = fs->location;
687	if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
688		return -ENOSPC;
689
690	/* Flag it as used (cleared on error path) such that we can immediately
691	 * obtain a second one to chain from.
692	 */
693	set_bit(rule_index[1], priv->cfp.used);
694
695	rule_index[0] = find_first_zero_bit(priv->cfp.used,
696					    priv->num_cfp_rules);
697	if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
698		ret = -ENOSPC;
699		goto out_err;
700	}
701
702	input.fs = fs;
703	flow = ethtool_rx_flow_rule_create(&input);
704	if (IS_ERR(flow)) {
705		ret = PTR_ERR(flow);
706		goto out_err;
707	}
708	flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
709	flow_rule_match_ports(flow->rule, &ports);
710
711	/* Apply the UDF layout for this filter */
712	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
713
714	/* Apply to all packets received through this port */
715	core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
716
717	/* Source port map match */
718	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
719
720	/* S-Tag status		[31:30]
721	 * C-Tag status		[29:28]
722	 * L2 framing		[27:26]
723	 * L3 framing		[25:24]
724	 * IP ToS		[23:16]
725	 * IP proto		[15:08]
726	 * IP Fragm		[7]
727	 * Non 1st frag		[6]
728	 * IP Authen		[5]
729	 * TTL range		[4:3]
730	 * PPPoE session	[2]
731	 * Reserved		[1]
732	 * UDF_Valid[8]		[0]
733	 */
734	reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
735		ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
736	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
737
738	/* Mask with the specific layout for IPv6 packets including
739	 * UDF_Valid[8]
740	 */
741	reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
742	core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
743
744	/* Slice the IPv6 source address and port */
745	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
746			       ports.key->src, vlan_tci, slice_num,
747			       udf_lower_bits(num_udf), false);
748	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
749			       ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
750			       udf_lower_bits(num_udf), true);
751
752	/* Insert into TCAM now because we need to insert a second rule */
753	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
754
755	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
756	if (ret) {
757		pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
758		goto out_err_flow_rule;
759	}
760
761	/* Insert into Action and policer RAMs now */
762	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
763				      queue_num, false);
764	if (ret)
765		goto out_err_flow_rule;
766
767	/* Now deal with the second slice to chain this rule */
768	slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
769	if (slice_num == UDF_NUM_SLICES) {
770		ret = -EINVAL;
771		goto out_err_flow_rule;
772	}
773
774	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
775
776	/* Apply the UDF layout for this filter */
777	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
778
779	/* Chained rule, source port match is coming from the rule we are
780	 * chained from.
781	 */
782	core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
783	core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
784
785	/*
786	 * CHAIN ID		[31:24] chain to previous slice
787	 * Reserved		[23:20]
788	 * UDF_Valid[11:8]	[19:16]
789	 * UDF_Valid[7:0]	[15:8]
790	 * UDF_n_D11		[7:0]
791	 */
792	reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
793		udf_lower_bits(num_udf) << 8;
794	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
795
796	/* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
797	reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
798		udf_lower_bits(num_udf) << 8;
799	core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
800
801	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
802			       ports.key->dst, 0, slice_num,
803			       0, false);
804	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
805			       ports.key->dst, 0, SLICE_NUM_MASK,
806			       0, true);
807
808	/* Insert into TCAM now */
809	bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
810
811	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
812	if (ret) {
813		pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
814		goto out_err_flow_rule;
815	}
816
817	/* Insert into Action and policer RAMs now, set chain ID to
818	 * the one we are chained to
819	 */
820	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
821				      queue_num, true);
822	if (ret)
823		goto out_err_flow_rule;
824
825	/* Turn on CFP for this rule now */
826	reg = core_readl(priv, CORE_CFP_CTL_REG);
827	reg |= BIT(port);
828	core_writel(priv, reg, CORE_CFP_CTL_REG);
829
830	/* Flag the second half rule as being used now, return it as the
831	 * location, and flag it as unique while dumping rules
832	 */
833	set_bit(rule_index[0], priv->cfp.used);
834	set_bit(rule_index[1], priv->cfp.unique);
835	fs->location = rule_index[1];
836
837	return ret;
838
839out_err_flow_rule:
840	ethtool_rx_flow_rule_destroy(flow);
841out_err:
842	clear_bit(rule_index[1], priv->cfp.used);
843	return ret;
844}
845
846static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
847				   struct ethtool_rx_flow_spec *fs)
848{
849	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
850	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
851	__u64 ring_cookie = fs->ring_cookie;
852	struct switchdev_obj_port_vlan vlan;
853	unsigned int queue_num, port_num;
854	u16 vid;
855	int ret;
856
857	/* This rule is a Wake-on-LAN filter and we must specifically
858	 * target the CPU port in order for it to be working.
859	 */
860	if (ring_cookie == RX_CLS_FLOW_WAKE)
861		ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
862
863	/* We do not support discarding packets, check that the
864	 * destination port is enabled and that we are within the
865	 * number of ports supported by the switch
866	 */
867	port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
868
869	if (ring_cookie == RX_CLS_FLOW_DISC ||
870	    !(dsa_is_user_port(ds, port_num) ||
871	      dsa_is_cpu_port(ds, port_num)) ||
872	    port_num >= priv->hw_params.num_ports)
873		return -EINVAL;
874
875	/* If the rule is matching a particular VLAN, make sure that we honor
876	 * the matching and have it tagged or untagged on the destination port,
877	 * we do this on egress with a VLAN entry. The egress tagging attribute
878	 * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
879	 * a 0 means tagged.
880	 */
881	if (fs->flow_type & FLOW_EXT) {
882		/* We cannot support matching multiple VLAN IDs yet */
883		if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
884		    VLAN_VID_MASK)
885			return -EINVAL;
886
887		vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
888		vlan.vid = vid;
889		if (be32_to_cpu(fs->h_ext.data[1]) & 1)
890			vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
891		else
892			vlan.flags = 0;
893
894		ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
895		if (ret)
896			return ret;
897	}
898
899	/*
900	 * We have a small oddity where Port 6 just does not have a
901	 * valid bit here (so we substract by one).
902	 */
903	queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
904	if (port_num >= 7)
905		port_num -= 1;
906
907	switch (fs->flow_type & ~FLOW_EXT) {
908	case TCP_V4_FLOW:
909	case UDP_V4_FLOW:
910		ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
911						queue_num, fs);
912		break;
913	case TCP_V6_FLOW:
914	case UDP_V6_FLOW:
915		ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
916						queue_num, fs);
917		break;
918	default:
919		ret = -EINVAL;
920		break;
921	}
922
923	return ret;
924}
925
926static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
927				struct ethtool_rx_flow_spec *fs)
928{
929	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
930	struct cfp_rule *rule = NULL;
931	int ret = -EINVAL;
932
933	/* Check for unsupported extensions */
934	if (fs->flow_type & FLOW_MAC_EXT)
935		return -EINVAL;
936
937	if (fs->location != RX_CLS_LOC_ANY &&
938	    fs->location > bcm_sf2_cfp_rule_size(priv))
939		return -EINVAL;
940
941	if ((fs->flow_type & FLOW_EXT) &&
942	    !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
943		return -EOPNOTSUPP;
944
945	if (fs->location != RX_CLS_LOC_ANY &&
946	    test_bit(fs->location, priv->cfp.used))
947		return -EBUSY;
948
949	ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
950	if (ret == 0)
951		return -EEXIST;
952
953	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
954	if (!rule)
955		return -ENOMEM;
956
957	ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
958	if (ret) {
959		kfree(rule);
960		return ret;
961	}
962
963	rule->port = port;
964	memcpy(&rule->fs, fs, sizeof(*fs));
965	list_add_tail(&rule->next, &priv->cfp.rules_list);
966
967	return ret;
968}
969
970static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
971				    u32 loc, u32 *next_loc)
972{
973	int ret;
974	u32 reg;
975
976	/* Indicate which rule we want to read */
977	bcm_sf2_cfp_rule_addr_set(priv, loc);
978
979	ret =  bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
980	if (ret)
981		return ret;
982
983	/* Check if this is possibly an IPv6 rule that would
984	 * indicate we need to delete its companion rule
985	 * as well
986	 */
987	reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
988	if (next_loc)
989		*next_loc = (reg >> 24) & CHAIN_ID_MASK;
990
991	/* Clear its valid bits */
992	reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
993	reg &= ~SLICE_VALID;
994	core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
995
996	/* Write back this entry into the TCAM now */
997	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
998	if (ret)
999		return ret;
1000
1001	clear_bit(loc, priv->cfp.used);
1002	clear_bit(loc, priv->cfp.unique);
1003
1004	return 0;
1005}
1006
1007static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
1008				   u32 loc)
1009{
1010	u32 next_loc = 0;
1011	int ret;
1012
1013	ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
1014	if (ret)
1015		return ret;
1016
1017	/* If this was an IPv6 rule, delete is companion rule too */
1018	if (next_loc)
1019		ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
1020
1021	return ret;
1022}
1023
1024static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
1025{
1026	struct cfp_rule *rule;
1027	int ret;
1028
1029	if (loc > bcm_sf2_cfp_rule_size(priv))
1030		return -EINVAL;
1031
1032	/* Refuse deleting unused rules, and those that are not unique since
1033	 * that could leave IPv6 rules with one of the chained rule in the
1034	 * table.
1035	 */
1036	if (!test_bit(loc, priv->cfp.unique) || loc == 0)
1037		return -EINVAL;
1038
1039	rule = bcm_sf2_cfp_rule_find(priv, port, loc);
1040	if (!rule)
1041		return -EINVAL;
1042
1043	ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
1044
1045	list_del(&rule->next);
1046	kfree(rule);
1047
1048	return ret;
1049}
1050
1051static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
1052{
1053	unsigned int i;
1054
1055	for (i = 0; i < sizeof(flow->m_u); i++)
1056		flow->m_u.hdata[i] ^= 0xff;
1057
1058	flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1059	flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1060	flow->m_ext.data[0] ^= cpu_to_be32(~0);
1061	flow->m_ext.data[1] ^= cpu_to_be32(~0);
1062}
1063
1064static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1065				struct ethtool_rxnfc *nfc)
1066{
1067	struct cfp_rule *rule;
1068
1069	rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1070	if (!rule)
1071		return -EINVAL;
1072
1073	memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1074
1075	bcm_sf2_invert_masks(&nfc->fs);
1076
1077	/* Put the TCAM size here */
1078	nfc->data = bcm_sf2_cfp_rule_size(priv);
1079
1080	return 0;
1081}
1082
1083/* We implement the search doing a TCAM search operation */
1084static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1085				    int port, struct ethtool_rxnfc *nfc,
1086				    u32 *rule_locs)
1087{
1088	unsigned int index = 1, rules_cnt = 0;
1089
1090	for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1091		rule_locs[rules_cnt] = index;
1092		rules_cnt++;
1093	}
1094
1095	/* Put the TCAM size here */
1096	nfc->data = bcm_sf2_cfp_rule_size(priv);
1097	nfc->rule_cnt = rules_cnt;
1098
1099	return 0;
1100}
1101
1102int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1103		      struct ethtool_rxnfc *nfc, u32 *rule_locs)
1104{
1105	struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
1106	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1107	int ret = 0;
1108
1109	mutex_lock(&priv->cfp.lock);
1110
1111	switch (nfc->cmd) {
1112	case ETHTOOL_GRXCLSRLCNT:
1113		/* Subtract the default, unusable rule */
1114		nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1115					      priv->num_cfp_rules) - 1;
1116		/* We support specifying rule locations */
1117		nfc->data |= RX_CLS_LOC_SPECIAL;
1118		break;
1119	case ETHTOOL_GRXCLSRULE:
1120		ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1121		break;
1122	case ETHTOOL_GRXCLSRLALL:
1123		ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1124		break;
1125	default:
1126		ret = -EOPNOTSUPP;
1127		break;
1128	}
1129
1130	mutex_unlock(&priv->cfp.lock);
1131
1132	if (ret)
1133		return ret;
1134
1135	/* Pass up the commands to the attached master network device */
1136	if (p->ethtool_ops->get_rxnfc) {
1137		ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1138		if (ret == -EOPNOTSUPP)
1139			ret = 0;
1140	}
1141
1142	return ret;
1143}
1144
1145int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1146		      struct ethtool_rxnfc *nfc)
1147{
1148	struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
1149	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1150	int ret = 0;
1151
1152	mutex_lock(&priv->cfp.lock);
1153
1154	switch (nfc->cmd) {
1155	case ETHTOOL_SRXCLSRLINS:
1156		ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1157		break;
1158
1159	case ETHTOOL_SRXCLSRLDEL:
1160		ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1161		break;
1162	default:
1163		ret = -EOPNOTSUPP;
1164		break;
1165	}
1166
1167	mutex_unlock(&priv->cfp.lock);
1168
1169	if (ret)
1170		return ret;
1171
1172	/* Pass up the commands to the attached master network device.
1173	 * This can fail, so rollback the operation if we need to.
1174	 */
1175	if (p->ethtool_ops->set_rxnfc) {
1176		ret = p->ethtool_ops->set_rxnfc(p, nfc);
1177		if (ret && ret != -EOPNOTSUPP) {
1178			mutex_lock(&priv->cfp.lock);
1179			bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1180			mutex_unlock(&priv->cfp.lock);
1181		} else {
1182			ret = 0;
1183		}
1184	}
1185
1186	return ret;
1187}
1188
1189int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1190{
1191	unsigned int timeout = 1000;
1192	u32 reg;
1193
1194	reg = core_readl(priv, CORE_CFP_ACC);
1195	reg |= TCAM_RESET;
1196	core_writel(priv, reg, CORE_CFP_ACC);
1197
1198	do {
1199		reg = core_readl(priv, CORE_CFP_ACC);
1200		if (!(reg & TCAM_RESET))
1201			break;
1202
1203		cpu_relax();
1204	} while (timeout--);
1205
1206	if (!timeout)
1207		return -ETIMEDOUT;
1208
1209	return 0;
1210}
1211
1212void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1213{
1214	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1215	struct cfp_rule *rule, *n;
1216
1217	if (list_empty(&priv->cfp.rules_list))
1218		return;
1219
1220	list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1221		bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1222}
1223
1224int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1225{
1226	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1227	struct cfp_rule *rule;
1228	int ret = 0;
1229	u32 reg;
1230
1231	if (list_empty(&priv->cfp.rules_list))
1232		return ret;
1233
1234	reg = core_readl(priv, CORE_CFP_CTL_REG);
1235	reg &= ~CFP_EN_MAP_MASK;
1236	core_writel(priv, reg, CORE_CFP_CTL_REG);
1237
1238	ret = bcm_sf2_cfp_rst(priv);
1239	if (ret)
1240		return ret;
1241
1242	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1243		ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1244					      rule->fs.location);
1245		if (ret) {
1246			dev_err(ds->dev, "failed to remove rule\n");
1247			return ret;
1248		}
1249
1250		ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1251		if (ret) {
1252			dev_err(ds->dev, "failed to restore rule\n");
1253			return ret;
1254		}
1255	}
1256
1257	return ret;
1258}
1259
1260static const struct bcm_sf2_cfp_stat {
1261	unsigned int offset;
1262	unsigned int ram_loc;
1263	const char *name;
1264} bcm_sf2_cfp_stats[] = {
1265	{
1266		.offset = CORE_STAT_GREEN_CNTR,
1267		.ram_loc = GREEN_STAT_RAM,
1268		.name = "Green"
1269	},
1270	{
1271		.offset = CORE_STAT_YELLOW_CNTR,
1272		.ram_loc = YELLOW_STAT_RAM,
1273		.name = "Yellow"
1274	},
1275	{
1276		.offset = CORE_STAT_RED_CNTR,
1277		.ram_loc = RED_STAT_RAM,
1278		.name = "Red"
1279	},
1280};
1281
1282void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1283			     u32 stringset, uint8_t *data)
1284{
1285	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1286	unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1287	char buf[ETH_GSTRING_LEN];
1288	unsigned int i, j, iter;
1289
1290	if (stringset != ETH_SS_STATS)
1291		return;
1292
1293	for (i = 1; i < priv->num_cfp_rules; i++) {
1294		for (j = 0; j < s; j++) {
1295			snprintf(buf, sizeof(buf),
1296				 "CFP%03d_%sCntr",
1297				 i, bcm_sf2_cfp_stats[j].name);
1298			iter = (i - 1) * s + j;
1299			strscpy(data + iter * ETH_GSTRING_LEN,
1300				buf, ETH_GSTRING_LEN);
1301		}
1302	}
1303}
1304
1305void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1306				   uint64_t *data)
1307{
1308	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1309	unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1310	const struct bcm_sf2_cfp_stat *stat;
1311	unsigned int i, j, iter;
1312	struct cfp_rule *rule;
1313	int ret;
1314
1315	mutex_lock(&priv->cfp.lock);
1316	for (i = 1; i < priv->num_cfp_rules; i++) {
1317		rule = bcm_sf2_cfp_rule_find(priv, port, i);
1318		if (!rule)
1319			continue;
1320
1321		for (j = 0; j < s; j++) {
1322			stat = &bcm_sf2_cfp_stats[j];
1323
1324			bcm_sf2_cfp_rule_addr_set(priv, i);
1325			ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1326			if (ret)
1327				continue;
1328
1329			iter = (i - 1) * s + j;
1330			data[iter] = core_readl(priv, stat->offset);
1331		}
1332
1333	}
1334	mutex_unlock(&priv->cfp.lock);
1335}
1336
1337int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1338{
1339	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1340
1341	if (sset != ETH_SS_STATS)
1342		return 0;
1343
1344	/* 3 counters per CFP rules */
1345	return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
1346}
1347