1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Broadcom Starfighter 2 DSA switch CFP support
4 *
5 * Copyright (C) 2016, Broadcom
6 */
7
8#include <linux/list.h>
9#include <linux/ethtool.h>
10#include <linux/if_ether.h>
11#include <linux/in.h>
12#include <linux/netdevice.h>
13#include <net/dsa.h>
14#include <linux/bitmap.h>
15#include <net/flow_offload.h>
16#include <net/switchdev.h>
17#include <uapi/linux/if_bridge.h>
18
19#include "bcm_sf2.h"
20#include "bcm_sf2_regs.h"
21
22struct cfp_rule {
23	int port;
24	struct ethtool_rx_flow_spec fs;
25	struct list_head next;
26};
27
28struct cfp_udf_slice_layout {
29	u8 slices[UDFS_PER_SLICE];
30	u32 mask_value;
31	u32 base_offset;
32};
33
34struct cfp_udf_layout {
35	struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
36};
37
38static const u8 zero_slice[UDFS_PER_SLICE] = { };
39
40/* UDF slices layout for a TCPv4/UDPv4 specification */
41static const struct cfp_udf_layout udf_tcpip4_layout = {
42	.udfs = {
43		[1] = {
44			.slices = {
45				/* End of L2, byte offset 12, src IP[0:15] */
46				CFG_UDF_EOL2 | 6,
47				/* End of L2, byte offset 14, src IP[16:31] */
48				CFG_UDF_EOL2 | 7,
49				/* End of L2, byte offset 16, dst IP[0:15] */
50				CFG_UDF_EOL2 | 8,
51				/* End of L2, byte offset 18, dst IP[16:31] */
52				CFG_UDF_EOL2 | 9,
53				/* End of L3, byte offset 0, src port */
54				CFG_UDF_EOL3 | 0,
55				/* End of L3, byte offset 2, dst port */
56				CFG_UDF_EOL3 | 1,
57				0, 0, 0
58			},
59			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
60			.base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
61		},
62	},
63};
64
65/* UDF slices layout for a TCPv6/UDPv6 specification */
66static const struct cfp_udf_layout udf_tcpip6_layout = {
67	.udfs = {
68		[0] = {
69			.slices = {
70				/* End of L2, byte offset 8, src IP[0:15] */
71				CFG_UDF_EOL2 | 4,
72				/* End of L2, byte offset 10, src IP[16:31] */
73				CFG_UDF_EOL2 | 5,
74				/* End of L2, byte offset 12, src IP[32:47] */
75				CFG_UDF_EOL2 | 6,
76				/* End of L2, byte offset 14, src IP[48:63] */
77				CFG_UDF_EOL2 | 7,
78				/* End of L2, byte offset 16, src IP[64:79] */
79				CFG_UDF_EOL2 | 8,
80				/* End of L2, byte offset 18, src IP[80:95] */
81				CFG_UDF_EOL2 | 9,
82				/* End of L2, byte offset 20, src IP[96:111] */
83				CFG_UDF_EOL2 | 10,
84				/* End of L2, byte offset 22, src IP[112:127] */
85				CFG_UDF_EOL2 | 11,
86				/* End of L3, byte offset 0, src port */
87				CFG_UDF_EOL3 | 0,
88			},
89			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
90			.base_offset = CORE_UDF_0_B_0_8_PORT_0,
91		},
92		[3] = {
93			.slices = {
94				/* End of L2, byte offset 24, dst IP[0:15] */
95				CFG_UDF_EOL2 | 12,
96				/* End of L2, byte offset 26, dst IP[16:31] */
97				CFG_UDF_EOL2 | 13,
98				/* End of L2, byte offset 28, dst IP[32:47] */
99				CFG_UDF_EOL2 | 14,
100				/* End of L2, byte offset 30, dst IP[48:63] */
101				CFG_UDF_EOL2 | 15,
102				/* End of L2, byte offset 32, dst IP[64:79] */
103				CFG_UDF_EOL2 | 16,
104				/* End of L2, byte offset 34, dst IP[80:95] */
105				CFG_UDF_EOL2 | 17,
106				/* End of L2, byte offset 36, dst IP[96:111] */
107				CFG_UDF_EOL2 | 18,
108				/* End of L2, byte offset 38, dst IP[112:127] */
109				CFG_UDF_EOL2 | 19,
110				/* End of L3, byte offset 2, dst port */
111				CFG_UDF_EOL3 | 1,
112			},
113			.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
114			.base_offset = CORE_UDF_0_D_0_11_PORT_0,
115		},
116	},
117};
118
119static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
120{
121	unsigned int i, count = 0;
122
123	for (i = 0; i < UDFS_PER_SLICE; i++) {
124		if (layout[i] != 0)
125			count++;
126	}
127
128	return count;
129}
130
131static inline u32 udf_upper_bits(int num_udf)
132{
133	return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
134}
135
136static inline u32 udf_lower_bits(int num_udf)
137{
138	return (u8)GENMASK(num_udf - 1, 0);
139}
140
141static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
142					     unsigned int start)
143{
144	const struct cfp_udf_slice_layout *slice_layout;
145	unsigned int slice_idx;
146
147	for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
148		slice_layout = &l->udfs[slice_idx];
149		if (memcmp(slice_layout->slices, zero_slice,
150			   sizeof(zero_slice)))
151			break;
152	}
153
154	return slice_idx;
155}
156
157static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
158				const struct cfp_udf_layout *layout,
159				unsigned int slice_num)
160{
161	u32 offset = layout->udfs[slice_num].base_offset;
162	unsigned int i;
163
164	for (i = 0; i < UDFS_PER_SLICE; i++)
165		core_writel(priv, layout->udfs[slice_num].slices[i],
166			    offset + i * 4);
167}
168
169static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
170{
171	unsigned int timeout = 1000;
172	u32 reg;
173
174	reg = core_readl(priv, CORE_CFP_ACC);
175	reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
176	reg |= OP_STR_DONE | op;
177	core_writel(priv, reg, CORE_CFP_ACC);
178
179	do {
180		reg = core_readl(priv, CORE_CFP_ACC);
181		if (!(reg & OP_STR_DONE))
182			break;
183
184		cpu_relax();
185	} while (timeout--);
186
187	if (!timeout)
188		return -ETIMEDOUT;
189
190	return 0;
191}
192
193static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
194					     unsigned int addr)
195{
196	u32 reg;
197
198	WARN_ON(addr >= priv->num_cfp_rules);
199
200	reg = core_readl(priv, CORE_CFP_ACC);
201	reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
202	reg |= addr << XCESS_ADDR_SHIFT;
203	core_writel(priv, reg, CORE_CFP_ACC);
204}
205
206static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
207{
208	/* Entry #0 is reserved */
209	return priv->num_cfp_rules - 1;
210}
211
212static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
213				   unsigned int rule_index,
214				   int src_port,
215				   unsigned int port_num,
216				   unsigned int queue_num,
217				   bool fwd_map_change)
218{
219	int ret;
220	u32 reg;
221
222	/* Replace ARL derived destination with DST_MAP derived, define
223	 * which port and queue this should be forwarded to.
224	 */
225	if (fwd_map_change)
226		reg = CHANGE_FWRD_MAP_IB_REP_ARL |
227		      BIT(port_num + DST_MAP_IB_SHIFT) |
228		      CHANGE_TC | queue_num << NEW_TC_SHIFT;
229	else
230		reg = 0;
231
232	/* Enable looping back to the original port */
233	if (src_port == port_num)
234		reg |= LOOP_BK_EN;
235
236	core_writel(priv, reg, CORE_ACT_POL_DATA0);
237
238	/* Set classification ID that needs to be put in Broadcom tag */
239	core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
240
241	core_writel(priv, 0, CORE_ACT_POL_DATA2);
242
243	/* Configure policer RAM now */
244	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
245	if (ret) {
246		pr_err("Policer entry at %d failed\n", rule_index);
247		return ret;
248	}
249
250	/* Disable the policer */
251	core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
252
253	/* Now the rate meter */
254	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
255	if (ret) {
256		pr_err("Meter entry at %d failed\n", rule_index);
257		return ret;
258	}
259
260	return 0;
261}
262
263static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
264				   struct flow_dissector_key_ipv4_addrs *addrs,
265				   struct flow_dissector_key_ports *ports,
266				   const __be16 vlan_tci,
267				   unsigned int slice_num, u8 num_udf,
268				   bool mask)
269{
270	u32 reg, offset;
271
272	/* UDF_Valid[7:0]	[31:24]
273	 * S-Tag		[23:8]
274	 * C-Tag		[7:0]
275	 */
276	reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
277	if (mask)
278		core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
279	else
280		core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
281
282	/* C-Tag		[31:24]
283	 * UDF_n_A8		[23:8]
284	 * UDF_n_A7		[7:0]
285	 */
286	reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
287	if (mask)
288		offset = CORE_CFP_MASK_PORT(4);
289	else
290		offset = CORE_CFP_DATA_PORT(4);
291	core_writel(priv, reg, offset);
292
293	/* UDF_n_A7		[31:24]
294	 * UDF_n_A6		[23:8]
295	 * UDF_n_A5		[7:0]
296	 */
297	reg = be16_to_cpu(ports->dst) >> 8;
298	if (mask)
299		offset = CORE_CFP_MASK_PORT(3);
300	else
301		offset = CORE_CFP_DATA_PORT(3);
302	core_writel(priv, reg, offset);
303
304	/* UDF_n_A5		[31:24]
305	 * UDF_n_A4		[23:8]
306	 * UDF_n_A3		[7:0]
307	 */
308	reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
309	      (u32)be16_to_cpu(ports->src) << 8 |
310	      (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
311	if (mask)
312		offset = CORE_CFP_MASK_PORT(2);
313	else
314		offset = CORE_CFP_DATA_PORT(2);
315	core_writel(priv, reg, offset);
316
317	/* UDF_n_A3		[31:24]
318	 * UDF_n_A2		[23:8]
319	 * UDF_n_A1		[7:0]
320	 */
321	reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
322	      (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
323	      (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
324	if (mask)
325		offset = CORE_CFP_MASK_PORT(1);
326	else
327		offset = CORE_CFP_DATA_PORT(1);
328	core_writel(priv, reg, offset);
329
330	/* UDF_n_A1		[31:24]
331	 * UDF_n_A0		[23:8]
332	 * Reserved		[7:4]
333	 * Slice ID		[3:2]
334	 * Slice valid		[1:0]
335	 */
336	reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
337	      (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
338	      SLICE_NUM(slice_num) | SLICE_VALID;
339	if (mask)
340		offset = CORE_CFP_MASK_PORT(0);
341	else
342		offset = CORE_CFP_DATA_PORT(0);
343	core_writel(priv, reg, offset);
344}
345
346static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
347				     unsigned int port_num,
348				     unsigned int queue_num,
349				     struct ethtool_rx_flow_spec *fs)
350{
351	__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
352	struct ethtool_rx_flow_spec_input input = {};
353	const struct cfp_udf_layout *layout;
354	unsigned int slice_num, rule_index;
355	struct ethtool_rx_flow_rule *flow;
356	struct flow_match_ipv4_addrs ipv4;
357	struct flow_match_ports ports;
358	struct flow_match_ip ip;
359	u8 ip_proto, ip_frag;
360	u8 num_udf;
361	u32 reg;
362	int ret;
363
364	switch (fs->flow_type & ~FLOW_EXT) {
365	case TCP_V4_FLOW:
366		ip_proto = IPPROTO_TCP;
367		break;
368	case UDP_V4_FLOW:
369		ip_proto = IPPROTO_UDP;
370		break;
371	default:
372		return -EINVAL;
373	}
374
375	ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
376
377	/* Extract VLAN TCI */
378	if (fs->flow_type & FLOW_EXT) {
379		vlan_tci = fs->h_ext.vlan_tci;
380		vlan_m_tci = fs->m_ext.vlan_tci;
381	}
382
383	/* Locate the first rule available */
384	if (fs->location == RX_CLS_LOC_ANY)
385		rule_index = find_first_zero_bit(priv->cfp.used,
386						 priv->num_cfp_rules);
387	else
388		rule_index = fs->location;
389
390	if (rule_index > bcm_sf2_cfp_rule_size(priv))
391		return -ENOSPC;
392
393	input.fs = fs;
394	flow = ethtool_rx_flow_rule_create(&input);
395	if (IS_ERR(flow))
396		return PTR_ERR(flow);
397
398	flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
399	flow_rule_match_ports(flow->rule, &ports);
400	flow_rule_match_ip(flow->rule, &ip);
401
402	layout = &udf_tcpip4_layout;
403	/* We only use one UDF slice for now */
404	slice_num = bcm_sf2_get_slice_number(layout, 0);
405	if (slice_num == UDF_NUM_SLICES) {
406		ret = -EINVAL;
407		goto out_err_flow_rule;
408	}
409
410	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
411
412	/* Apply the UDF layout for this filter */
413	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
414
415	/* Apply to all packets received through this port */
416	core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
417
418	/* Source port map match */
419	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
420
421	/* S-Tag status		[31:30]
422	 * C-Tag status		[29:28]
423	 * L2 framing		[27:26]
424	 * L3 framing		[25:24]
425	 * IP ToS		[23:16]
426	 * IP proto		[15:08]
427	 * IP Fragm		[7]
428	 * Non 1st frag		[6]
429	 * IP Authen		[5]
430	 * TTL range		[4:3]
431	 * PPPoE session	[2]
432	 * Reserved		[1]
433	 * UDF_Valid[8]		[0]
434	 */
435	core_writel(priv, ip.key->tos << IPTOS_SHIFT |
436		    ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
437		    udf_upper_bits(num_udf),
438		    CORE_CFP_DATA_PORT(6));
439
440	/* Mask with the specific layout for IPv4 packets */
441	core_writel(priv, layout->udfs[slice_num].mask_value |
442		    udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
443
444	/* Program the match and the mask */
445	bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
446			       slice_num, num_udf, false);
447	bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
448			       SLICE_NUM_MASK, num_udf, true);
449
450	/* Insert into TCAM now */
451	bcm_sf2_cfp_rule_addr_set(priv, rule_index);
452
453	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
454	if (ret) {
455		pr_err("TCAM entry at addr %d failed\n", rule_index);
456		goto out_err_flow_rule;
457	}
458
459	/* Insert into Action and policer RAMs now */
460	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
461				      queue_num, true);
462	if (ret)
463		goto out_err_flow_rule;
464
465	/* Turn on CFP for this rule now */
466	reg = core_readl(priv, CORE_CFP_CTL_REG);
467	reg |= BIT(port);
468	core_writel(priv, reg, CORE_CFP_CTL_REG);
469
470	/* Flag the rule as being used and return it */
471	set_bit(rule_index, priv->cfp.used);
472	set_bit(rule_index, priv->cfp.unique);
473	fs->location = rule_index;
474
475	return 0;
476
477out_err_flow_rule:
478	ethtool_rx_flow_rule_destroy(flow);
479	return ret;
480}
481
482static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
483				   const __be32 *ip6_addr, const __be16 port,
484				   const __be16 vlan_tci,
485				   unsigned int slice_num, u32 udf_bits,
486				   bool mask)
487{
488	u32 reg, tmp, val, offset;
489
490	/* UDF_Valid[7:0]	[31:24]
491	 * S-Tag		[23:8]
492	 * C-Tag		[7:0]
493	 */
494	reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
495	if (mask)
496		core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
497	else
498		core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
499
500	/* C-Tag		[31:24]
501	 * UDF_n_B8		[23:8]	(port)
502	 * UDF_n_B7 (upper)	[7:0]	(addr[15:8])
503	 */
504	reg = be32_to_cpu(ip6_addr[3]);
505	val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
506	val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
507	if (mask)
508		offset = CORE_CFP_MASK_PORT(4);
509	else
510		offset = CORE_CFP_DATA_PORT(4);
511	core_writel(priv, val, offset);
512
513	/* UDF_n_B7 (lower)	[31:24]	(addr[7:0])
514	 * UDF_n_B6		[23:8] (addr[31:16])
515	 * UDF_n_B5 (upper)	[7:0] (addr[47:40])
516	 */
517	tmp = be32_to_cpu(ip6_addr[2]);
518	val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
519	      ((tmp >> 8) & 0xff);
520	if (mask)
521		offset = CORE_CFP_MASK_PORT(3);
522	else
523		offset = CORE_CFP_DATA_PORT(3);
524	core_writel(priv, val, offset);
525
526	/* UDF_n_B5 (lower)	[31:24] (addr[39:32])
527	 * UDF_n_B4		[23:8] (addr[63:48])
528	 * UDF_n_B3 (upper)	[7:0] (addr[79:72])
529	 */
530	reg = be32_to_cpu(ip6_addr[1]);
531	val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
532	      ((reg >> 8) & 0xff);
533	if (mask)
534		offset = CORE_CFP_MASK_PORT(2);
535	else
536		offset = CORE_CFP_DATA_PORT(2);
537	core_writel(priv, val, offset);
538
539	/* UDF_n_B3 (lower)	[31:24] (addr[71:64])
540	 * UDF_n_B2		[23:8] (addr[95:80])
541	 * UDF_n_B1 (upper)	[7:0] (addr[111:104])
542	 */
543	tmp = be32_to_cpu(ip6_addr[0]);
544	val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
545	      ((tmp >> 8) & 0xff);
546	if (mask)
547		offset = CORE_CFP_MASK_PORT(1);
548	else
549		offset = CORE_CFP_DATA_PORT(1);
550	core_writel(priv, val, offset);
551
552	/* UDF_n_B1 (lower)	[31:24] (addr[103:96])
553	 * UDF_n_B0		[23:8] (addr[127:112])
554	 * Reserved		[7:4]
555	 * Slice ID		[3:2]
556	 * Slice valid		[1:0]
557	 */
558	reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
559	       SLICE_NUM(slice_num) | SLICE_VALID;
560	if (mask)
561		offset = CORE_CFP_MASK_PORT(0);
562	else
563		offset = CORE_CFP_DATA_PORT(0);
564	core_writel(priv, reg, offset);
565}
566
567static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
568					      int port, u32 location)
569{
570	struct cfp_rule *rule;
571
572	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
573		if (rule->port == port && rule->fs.location == location)
574			return rule;
575	}
576
577	return NULL;
578}
579
580static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
581				struct ethtool_rx_flow_spec *fs)
582{
583	struct cfp_rule *rule = NULL;
584	size_t fs_size = 0;
585	int ret = 1;
586
587	if (list_empty(&priv->cfp.rules_list))
588		return ret;
589
590	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
591		ret = 1;
592		if (rule->port != port)
593			continue;
594
595		if (rule->fs.flow_type != fs->flow_type ||
596		    rule->fs.ring_cookie != fs->ring_cookie ||
597		    rule->fs.h_ext.data[0] != fs->h_ext.data[0])
598			continue;
599
600		switch (fs->flow_type & ~FLOW_EXT) {
601		case TCP_V6_FLOW:
602		case UDP_V6_FLOW:
603			fs_size = sizeof(struct ethtool_tcpip6_spec);
604			break;
605		case TCP_V4_FLOW:
606		case UDP_V4_FLOW:
607			fs_size = sizeof(struct ethtool_tcpip4_spec);
608			break;
609		default:
610			continue;
611		}
612
613		ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
614		ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
615		/* Compare VLAN TCI values as well */
616		if (rule->fs.flow_type & FLOW_EXT) {
617			ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
618			ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
619		}
620		if (ret == 0)
621			break;
622	}
623
624	return ret;
625}
626
627static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
628				     unsigned int port_num,
629				     unsigned int queue_num,
630				     struct ethtool_rx_flow_spec *fs)
631{
632	__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
633	struct ethtool_rx_flow_spec_input input = {};
634	unsigned int slice_num, rule_index[2];
635	const struct cfp_udf_layout *layout;
636	struct ethtool_rx_flow_rule *flow;
637	struct flow_match_ipv6_addrs ipv6;
638	struct flow_match_ports ports;
639	u8 ip_proto, ip_frag;
640	int ret = 0;
641	u8 num_udf;
642	u32 reg;
643
644	switch (fs->flow_type & ~FLOW_EXT) {
645	case TCP_V6_FLOW:
646		ip_proto = IPPROTO_TCP;
647		break;
648	case UDP_V6_FLOW:
649		ip_proto = IPPROTO_UDP;
650		break;
651	default:
652		return -EINVAL;
653	}
654
655	ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
656
657	/* Extract VLAN TCI */
658	if (fs->flow_type & FLOW_EXT) {
659		vlan_tci = fs->h_ext.vlan_tci;
660		vlan_m_tci = fs->m_ext.vlan_tci;
661	}
662
663	layout = &udf_tcpip6_layout;
664	slice_num = bcm_sf2_get_slice_number(layout, 0);
665	if (slice_num == UDF_NUM_SLICES)
666		return -EINVAL;
667
668	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
669
670	/* Negotiate two indexes, one for the second half which we are chained
671	 * from, which is what we will return to user-space, and a second one
672	 * which is used to store its first half. That first half does not
673	 * allow any choice of placement, so it just needs to find the next
674	 * available bit. We return the second half as fs->location because
675	 * that helps with the rule lookup later on since the second half is
676	 * chained from its first half, we can easily identify IPv6 CFP rules
677	 * by looking whether they carry a CHAIN_ID.
678	 *
679	 * We also want the second half to have a lower rule_index than its
680	 * first half because the HW search is by incrementing addresses.
681	 */
682	if (fs->location == RX_CLS_LOC_ANY)
683		rule_index[1] = find_first_zero_bit(priv->cfp.used,
684						    priv->num_cfp_rules);
685	else
686		rule_index[1] = fs->location;
687	if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
688		return -ENOSPC;
689
690	/* Flag it as used (cleared on error path) such that we can immediately
691	 * obtain a second one to chain from.
692	 */
693	set_bit(rule_index[1], priv->cfp.used);
694
695	rule_index[0] = find_first_zero_bit(priv->cfp.used,
696					    priv->num_cfp_rules);
697	if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
698		ret = -ENOSPC;
699		goto out_err;
700	}
701
702	input.fs = fs;
703	flow = ethtool_rx_flow_rule_create(&input);
704	if (IS_ERR(flow)) {
705		ret = PTR_ERR(flow);
706		goto out_err;
707	}
708	flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
709	flow_rule_match_ports(flow->rule, &ports);
710
711	/* Apply the UDF layout for this filter */
712	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
713
714	/* Apply to all packets received through this port */
715	core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
716
717	/* Source port map match */
718	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
719
720	/* S-Tag status		[31:30]
721	 * C-Tag status		[29:28]
722	 * L2 framing		[27:26]
723	 * L3 framing		[25:24]
724	 * IP ToS		[23:16]
725	 * IP proto		[15:08]
726	 * IP Fragm		[7]
727	 * Non 1st frag		[6]
728	 * IP Authen		[5]
729	 * TTL range		[4:3]
730	 * PPPoE session	[2]
731	 * Reserved		[1]
732	 * UDF_Valid[8]		[0]
733	 */
734	reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
735		ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
736	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
737
738	/* Mask with the specific layout for IPv6 packets including
739	 * UDF_Valid[8]
740	 */
741	reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
742	core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
743
744	/* Slice the IPv6 source address and port */
745	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
746			       ports.key->src, vlan_tci, slice_num,
747			       udf_lower_bits(num_udf), false);
748	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
749			       ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
750			       udf_lower_bits(num_udf), true);
751
752	/* Insert into TCAM now because we need to insert a second rule */
753	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
754
755	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
756	if (ret) {
757		pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
758		goto out_err_flow_rule;
759	}
760
761	/* Insert into Action and policer RAMs now */
762	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
763				      queue_num, false);
764	if (ret)
765		goto out_err_flow_rule;
766
767	/* Now deal with the second slice to chain this rule */
768	slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
769	if (slice_num == UDF_NUM_SLICES) {
770		ret = -EINVAL;
771		goto out_err_flow_rule;
772	}
773
774	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
775
776	/* Apply the UDF layout for this filter */
777	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
778
779	/* Chained rule, source port match is coming from the rule we are
780	 * chained from.
781	 */
782	core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
783	core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
784
785	/*
786	 * CHAIN ID		[31:24] chain to previous slice
787	 * Reserved		[23:20]
788	 * UDF_Valid[11:8]	[19:16]
789	 * UDF_Valid[7:0]	[15:8]
790	 * UDF_n_D11		[7:0]
791	 */
792	reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
793		udf_lower_bits(num_udf) << 8;
794	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
795
796	/* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
797	reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
798		udf_lower_bits(num_udf) << 8;
799	core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
800
801	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
802			       ports.key->dst, 0, slice_num,
803			       0, false);
804	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
805			       ports.key->dst, 0, SLICE_NUM_MASK,
806			       0, true);
807
808	/* Insert into TCAM now */
809	bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
810
811	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
812	if (ret) {
813		pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
814		goto out_err_flow_rule;
815	}
816
817	/* Insert into Action and policer RAMs now, set chain ID to
818	 * the one we are chained to
819	 */
820	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
821				      queue_num, true);
822	if (ret)
823		goto out_err_flow_rule;
824
825	/* Turn on CFP for this rule now */
826	reg = core_readl(priv, CORE_CFP_CTL_REG);
827	reg |= BIT(port);
828	core_writel(priv, reg, CORE_CFP_CTL_REG);
829
830	/* Flag the second half rule as being used now, return it as the
831	 * location, and flag it as unique while dumping rules
832	 */
833	set_bit(rule_index[0], priv->cfp.used);
834	set_bit(rule_index[1], priv->cfp.unique);
835	fs->location = rule_index[1];
836
837	return ret;
838
839out_err_flow_rule:
840	ethtool_rx_flow_rule_destroy(flow);
841out_err:
842	clear_bit(rule_index[1], priv->cfp.used);
843	return ret;
844}
845
846static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
847				   struct ethtool_rx_flow_spec *fs)
848{
849	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
850	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
851	__u64 ring_cookie = fs->ring_cookie;
852	struct switchdev_obj_port_vlan vlan;
853	unsigned int queue_num, port_num;
854	u16 vid;
855	int ret;
856
857	/* This rule is a Wake-on-LAN filter and we must specifically
858	 * target the CPU port in order for it to be working.
859	 */
860	if (ring_cookie == RX_CLS_FLOW_WAKE)
861		ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
862
863	/* We do not support discarding packets, check that the
864	 * destination port is enabled and that we are within the
865	 * number of ports supported by the switch
866	 */
867	port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
868
869	if (ring_cookie == RX_CLS_FLOW_DISC ||
870	    !(dsa_is_user_port(ds, port_num) ||
871	      dsa_is_cpu_port(ds, port_num)) ||
872	    port_num >= priv->hw_params.num_ports)
873		return -EINVAL;
874
875	/* If the rule is matching a particular VLAN, make sure that we honor
876	 * the matching and have it tagged or untagged on the destination port,
877	 * we do this on egress with a VLAN entry. The egress tagging attribute
878	 * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
879	 * a 0 means tagged.
880	 */
881	if (fs->flow_type & FLOW_EXT) {
882		/* We cannot support matching multiple VLAN IDs yet */
883		if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
884		    VLAN_VID_MASK)
885			return -EINVAL;
886
887		vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
888		vlan.vid_begin = vid;
889		vlan.vid_end = vid;
890		if (cpu_to_be32(fs->h_ext.data[1]) & 1)
891			vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
892		else
893			vlan.flags = 0;
894
895		ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
896		if (ret)
897			return ret;
898
899		ds->ops->port_vlan_add(ds, port_num, &vlan);
900	}
901
902	/*
903	 * We have a small oddity where Port 6 just does not have a
904	 * valid bit here (so we substract by one).
905	 */
906	queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
907	if (port_num >= 7)
908		port_num -= 1;
909
910	switch (fs->flow_type & ~FLOW_EXT) {
911	case TCP_V4_FLOW:
912	case UDP_V4_FLOW:
913		ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
914						queue_num, fs);
915		break;
916	case TCP_V6_FLOW:
917	case UDP_V6_FLOW:
918		ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
919						queue_num, fs);
920		break;
921	default:
922		ret = -EINVAL;
923		break;
924	}
925
926	return ret;
927}
928
929static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
930				struct ethtool_rx_flow_spec *fs)
931{
932	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
933	struct cfp_rule *rule = NULL;
934	int ret = -EINVAL;
935
936	/* Check for unsupported extensions */
937	if (fs->flow_type & FLOW_MAC_EXT)
938		return -EINVAL;
939
940	if (fs->location != RX_CLS_LOC_ANY &&
941	    fs->location > bcm_sf2_cfp_rule_size(priv))
942		return -EINVAL;
943
944	if ((fs->flow_type & FLOW_EXT) &&
945	    !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
946	      ds->ops->port_vlan_del))
947		return -EOPNOTSUPP;
948
949	if (fs->location != RX_CLS_LOC_ANY &&
950	    test_bit(fs->location, priv->cfp.used))
951		return -EBUSY;
952
953	ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
954	if (ret == 0)
955		return -EEXIST;
956
957	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
958	if (!rule)
959		return -ENOMEM;
960
961	ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
962	if (ret) {
963		kfree(rule);
964		return ret;
965	}
966
967	rule->port = port;
968	memcpy(&rule->fs, fs, sizeof(*fs));
969	list_add_tail(&rule->next, &priv->cfp.rules_list);
970
971	return ret;
972}
973
974static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
975				    u32 loc, u32 *next_loc)
976{
977	int ret;
978	u32 reg;
979
980	/* Indicate which rule we want to read */
981	bcm_sf2_cfp_rule_addr_set(priv, loc);
982
983	ret =  bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
984	if (ret)
985		return ret;
986
987	/* Check if this is possibly an IPv6 rule that would
988	 * indicate we need to delete its companion rule
989	 * as well
990	 */
991	reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
992	if (next_loc)
993		*next_loc = (reg >> 24) & CHAIN_ID_MASK;
994
995	/* Clear its valid bits */
996	reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
997	reg &= ~SLICE_VALID;
998	core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
999
1000	/* Write back this entry into the TCAM now */
1001	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
1002	if (ret)
1003		return ret;
1004
1005	clear_bit(loc, priv->cfp.used);
1006	clear_bit(loc, priv->cfp.unique);
1007
1008	return 0;
1009}
1010
1011static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
1012				   u32 loc)
1013{
1014	u32 next_loc = 0;
1015	int ret;
1016
1017	ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
1018	if (ret)
1019		return ret;
1020
1021	/* If this was an IPv6 rule, delete is companion rule too */
1022	if (next_loc)
1023		ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
1024
1025	return ret;
1026}
1027
1028static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
1029{
1030	struct cfp_rule *rule;
1031	int ret;
1032
1033	if (loc > bcm_sf2_cfp_rule_size(priv))
1034		return -EINVAL;
1035
1036	/* Refuse deleting unused rules, and those that are not unique since
1037	 * that could leave IPv6 rules with one of the chained rule in the
1038	 * table.
1039	 */
1040	if (!test_bit(loc, priv->cfp.unique) || loc == 0)
1041		return -EINVAL;
1042
1043	rule = bcm_sf2_cfp_rule_find(priv, port, loc);
1044	if (!rule)
1045		return -EINVAL;
1046
1047	ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
1048
1049	list_del(&rule->next);
1050	kfree(rule);
1051
1052	return ret;
1053}
1054
1055static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
1056{
1057	unsigned int i;
1058
1059	for (i = 0; i < sizeof(flow->m_u); i++)
1060		flow->m_u.hdata[i] ^= 0xff;
1061
1062	flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1063	flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1064	flow->m_ext.data[0] ^= cpu_to_be32(~0);
1065	flow->m_ext.data[1] ^= cpu_to_be32(~0);
1066}
1067
1068static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1069				struct ethtool_rxnfc *nfc)
1070{
1071	struct cfp_rule *rule;
1072
1073	rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1074	if (!rule)
1075		return -EINVAL;
1076
1077	memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1078
1079	bcm_sf2_invert_masks(&nfc->fs);
1080
1081	/* Put the TCAM size here */
1082	nfc->data = bcm_sf2_cfp_rule_size(priv);
1083
1084	return 0;
1085}
1086
1087/* We implement the search doing a TCAM search operation */
1088static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1089				    int port, struct ethtool_rxnfc *nfc,
1090				    u32 *rule_locs)
1091{
1092	unsigned int index = 1, rules_cnt = 0;
1093
1094	for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1095		rule_locs[rules_cnt] = index;
1096		rules_cnt++;
1097	}
1098
1099	/* Put the TCAM size here */
1100	nfc->data = bcm_sf2_cfp_rule_size(priv);
1101	nfc->rule_cnt = rules_cnt;
1102
1103	return 0;
1104}
1105
1106int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1107		      struct ethtool_rxnfc *nfc, u32 *rule_locs)
1108{
1109	struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1110	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1111	int ret = 0;
1112
1113	mutex_lock(&priv->cfp.lock);
1114
1115	switch (nfc->cmd) {
1116	case ETHTOOL_GRXCLSRLCNT:
1117		/* Subtract the default, unusable rule */
1118		nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1119					      priv->num_cfp_rules) - 1;
1120		/* We support specifying rule locations */
1121		nfc->data |= RX_CLS_LOC_SPECIAL;
1122		break;
1123	case ETHTOOL_GRXCLSRULE:
1124		ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1125		break;
1126	case ETHTOOL_GRXCLSRLALL:
1127		ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1128		break;
1129	default:
1130		ret = -EOPNOTSUPP;
1131		break;
1132	}
1133
1134	mutex_unlock(&priv->cfp.lock);
1135
1136	if (ret)
1137		return ret;
1138
1139	/* Pass up the commands to the attached master network device */
1140	if (p->ethtool_ops->get_rxnfc) {
1141		ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1142		if (ret == -EOPNOTSUPP)
1143			ret = 0;
1144	}
1145
1146	return ret;
1147}
1148
1149int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1150		      struct ethtool_rxnfc *nfc)
1151{
1152	struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1153	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1154	int ret = 0;
1155
1156	mutex_lock(&priv->cfp.lock);
1157
1158	switch (nfc->cmd) {
1159	case ETHTOOL_SRXCLSRLINS:
1160		ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1161		break;
1162
1163	case ETHTOOL_SRXCLSRLDEL:
1164		ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1165		break;
1166	default:
1167		ret = -EOPNOTSUPP;
1168		break;
1169	}
1170
1171	mutex_unlock(&priv->cfp.lock);
1172
1173	if (ret)
1174		return ret;
1175
1176	/* Pass up the commands to the attached master network device.
1177	 * This can fail, so rollback the operation if we need to.
1178	 */
1179	if (p->ethtool_ops->set_rxnfc) {
1180		ret = p->ethtool_ops->set_rxnfc(p, nfc);
1181		if (ret && ret != -EOPNOTSUPP) {
1182			mutex_lock(&priv->cfp.lock);
1183			bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1184			mutex_unlock(&priv->cfp.lock);
1185		} else {
1186			ret = 0;
1187		}
1188	}
1189
1190	return ret;
1191}
1192
1193int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1194{
1195	unsigned int timeout = 1000;
1196	u32 reg;
1197
1198	reg = core_readl(priv, CORE_CFP_ACC);
1199	reg |= TCAM_RESET;
1200	core_writel(priv, reg, CORE_CFP_ACC);
1201
1202	do {
1203		reg = core_readl(priv, CORE_CFP_ACC);
1204		if (!(reg & TCAM_RESET))
1205			break;
1206
1207		cpu_relax();
1208	} while (timeout--);
1209
1210	if (!timeout)
1211		return -ETIMEDOUT;
1212
1213	return 0;
1214}
1215
1216void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1217{
1218	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1219	struct cfp_rule *rule, *n;
1220
1221	if (list_empty(&priv->cfp.rules_list))
1222		return;
1223
1224	list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1225		bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1226}
1227
1228int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1229{
1230	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1231	struct cfp_rule *rule;
1232	int ret = 0;
1233	u32 reg;
1234
1235	if (list_empty(&priv->cfp.rules_list))
1236		return ret;
1237
1238	reg = core_readl(priv, CORE_CFP_CTL_REG);
1239	reg &= ~CFP_EN_MAP_MASK;
1240	core_writel(priv, reg, CORE_CFP_CTL_REG);
1241
1242	ret = bcm_sf2_cfp_rst(priv);
1243	if (ret)
1244		return ret;
1245
1246	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1247		ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1248					      rule->fs.location);
1249		if (ret) {
1250			dev_err(ds->dev, "failed to remove rule\n");
1251			return ret;
1252		}
1253
1254		ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1255		if (ret) {
1256			dev_err(ds->dev, "failed to restore rule\n");
1257			return ret;
1258		}
1259	}
1260
1261	return ret;
1262}
1263
1264static const struct bcm_sf2_cfp_stat {
1265	unsigned int offset;
1266	unsigned int ram_loc;
1267	const char *name;
1268} bcm_sf2_cfp_stats[] = {
1269	{
1270		.offset = CORE_STAT_GREEN_CNTR,
1271		.ram_loc = GREEN_STAT_RAM,
1272		.name = "Green"
1273	},
1274	{
1275		.offset = CORE_STAT_YELLOW_CNTR,
1276		.ram_loc = YELLOW_STAT_RAM,
1277		.name = "Yellow"
1278	},
1279	{
1280		.offset = CORE_STAT_RED_CNTR,
1281		.ram_loc = RED_STAT_RAM,
1282		.name = "Red"
1283	},
1284};
1285
1286void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1287			     u32 stringset, uint8_t *data)
1288{
1289	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1290	unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1291	char buf[ETH_GSTRING_LEN];
1292	unsigned int i, j, iter;
1293
1294	if (stringset != ETH_SS_STATS)
1295		return;
1296
1297	for (i = 1; i < priv->num_cfp_rules; i++) {
1298		for (j = 0; j < s; j++) {
1299			snprintf(buf, sizeof(buf),
1300				 "CFP%03d_%sCntr",
1301				 i, bcm_sf2_cfp_stats[j].name);
1302			iter = (i - 1) * s + j;
1303			strlcpy(data + iter * ETH_GSTRING_LEN,
1304				buf, ETH_GSTRING_LEN);
1305		}
1306	}
1307}
1308
1309void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1310				   uint64_t *data)
1311{
1312	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1313	unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1314	const struct bcm_sf2_cfp_stat *stat;
1315	unsigned int i, j, iter;
1316	struct cfp_rule *rule;
1317	int ret;
1318
1319	mutex_lock(&priv->cfp.lock);
1320	for (i = 1; i < priv->num_cfp_rules; i++) {
1321		rule = bcm_sf2_cfp_rule_find(priv, port, i);
1322		if (!rule)
1323			continue;
1324
1325		for (j = 0; j < s; j++) {
1326			stat = &bcm_sf2_cfp_stats[j];
1327
1328			bcm_sf2_cfp_rule_addr_set(priv, i);
1329			ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1330			if (ret)
1331				continue;
1332
1333			iter = (i - 1) * s + j;
1334			data[iter] = core_readl(priv, stat->offset);
1335		}
1336
1337	}
1338	mutex_unlock(&priv->cfp.lock);
1339}
1340
1341int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1342{
1343	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1344
1345	if (sset != ETH_SS_STATS)
1346		return 0;
1347
1348	/* 3 counters per CFP rules */
1349	return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
1350}
1351