1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Header Parser helpers for Marvell PPv2 Network Controller
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/netdevice.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <uapi/linux/ppp_defs.h>
15#include <net/ip.h>
16#include <net/ipv6.h>
17
18#include "mvpp2.h"
19#include "mvpp2_prs.h"
20
21/* Update parser tcam and sram hw entries */
22static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
23{
24	int i;
25
26	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
27		return -EINVAL;
28
29	/* Clear entry invalidation bit */
30	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
31
32	/* Write sram index - indirect access */
33	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
34	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
35		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
36
37	/* Write tcam index - indirect access */
38	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
39	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
40		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
41
42	return 0;
43}
44
45/* Initialize tcam entry from hw */
46int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
47			   int tid)
48{
49	int i;
50
51	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
52		return -EINVAL;
53
54	memset(pe, 0, sizeof(*pe));
55	pe->index = tid;
56
57	/* Write tcam index - indirect access */
58	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
59
60	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
61			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
62	if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
63		return MVPP2_PRS_TCAM_ENTRY_INVALID;
64
65	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
66		pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
67
68	/* Write sram index - indirect access */
69	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
70	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
71		pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
72
73	return 0;
74}
75
76/* Invalidate tcam hw entry */
77static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
78{
79	/* Write index - indirect access */
80	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
81	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
82		    MVPP2_PRS_TCAM_INV_MASK);
83}
84
85/* Enable shadow table entry and set its lookup ID */
86static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
87{
88	priv->prs_shadow[index].valid = true;
89	priv->prs_shadow[index].lu = lu;
90}
91
92/* Update ri fields in shadow table entry */
93static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
94				    unsigned int ri, unsigned int ri_mask)
95{
96	priv->prs_shadow[index].ri_mask = ri_mask;
97	priv->prs_shadow[index].ri = ri;
98}
99
100/* Update lookup field in tcam sw entry */
101static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
102{
103	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
104	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
105	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
106	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
107}
108
109/* Update mask for single port in tcam sw entry */
110static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
111				    unsigned int port, bool add)
112{
113	if (add)
114		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
115	else
116		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
117}
118
119/* Update port map in tcam sw entry */
120static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
121					unsigned int ports)
122{
123	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
124	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
125	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
126}
127
128/* Obtain port map from tcam sw entry */
129unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
130{
131	return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
132}
133
134/* Set byte of data and its enable bits in tcam sw entry */
135static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
136					 unsigned int offs, unsigned char byte,
137					 unsigned char enable)
138{
139	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
140
141	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
142	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
143	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
144	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
145}
146
147/* Get byte of data and its enable bits from tcam sw entry */
148void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
149				  unsigned int offs, unsigned char *byte,
150				  unsigned char *enable)
151{
152	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
153
154	*byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
155	*enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
156}
157
158/* Compare tcam data bytes with a pattern */
159static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
160				    u16 data)
161{
162	u16 tcam_data;
163
164	tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
165	return tcam_data == data;
166}
167
168/* Update ai bits in tcam sw entry */
169static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
170				     unsigned int bits, unsigned int enable)
171{
172	int i;
173
174	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
175		if (!(enable & BIT(i)))
176			continue;
177
178		if (bits & BIT(i))
179			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
180		else
181			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
182	}
183
184	pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
185}
186
187/* Get ai bits from tcam sw entry */
188static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
189{
190	return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
191}
192
193/* Set ethertype in tcam sw entry */
194static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
195				  unsigned short ethertype)
196{
197	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
198	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
199}
200
201/* Set vid in tcam sw entry */
202static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
203				unsigned short vid)
204{
205	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
206	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
207}
208
209/* Set bits in sram sw entry */
210static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
211				    u32 val)
212{
213	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
214}
215
216/* Clear bits in sram sw entry */
217static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
218				      u32 val)
219{
220	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
221}
222
223/* Update ri bits in sram sw entry */
224static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
225				     unsigned int bits, unsigned int mask)
226{
227	unsigned int i;
228
229	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
230		if (!(mask & BIT(i)))
231			continue;
232
233		if (bits & BIT(i))
234			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
235						1);
236		else
237			mvpp2_prs_sram_bits_clear(pe,
238						  MVPP2_PRS_SRAM_RI_OFFS + i,
239						  1);
240
241		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
242	}
243}
244
245/* Obtain ri bits from sram sw entry */
246static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
247{
248	return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
249}
250
251/* Update ai bits in sram sw entry */
252static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
253				     unsigned int bits, unsigned int mask)
254{
255	unsigned int i;
256
257	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
258		if (!(mask & BIT(i)))
259			continue;
260
261		if (bits & BIT(i))
262			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
263						1);
264		else
265			mvpp2_prs_sram_bits_clear(pe,
266						  MVPP2_PRS_SRAM_AI_OFFS + i,
267						  1);
268
269		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
270	}
271}
272
273/* Read ai bits from sram sw entry */
274static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
275{
276	u8 bits;
277	/* ai is stored on bits 90->97; so it spreads across two u32 */
278	int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
279	int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
280
281	bits = (pe->sram[ai_off] >> ai_shift) |
282	       (pe->sram[ai_off + 1] << (32 - ai_shift));
283
284	return bits;
285}
286
287/* In sram sw entry set lookup ID field of the tcam key to be used in the next
288 * lookup interation
289 */
290static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
291				       unsigned int lu)
292{
293	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
294
295	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
296				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
297	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
298}
299
300/* In the sram sw entry set sign and value of the next lookup offset
301 * and the offset value generated to the classifier
302 */
303static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
304				     unsigned int op)
305{
306	/* Set sign */
307	if (shift < 0) {
308		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
309		shift = 0 - shift;
310	} else {
311		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
312	}
313
314	/* Set value */
315	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
316		shift & MVPP2_PRS_SRAM_SHIFT_MASK;
317
318	/* Reset and set operation */
319	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
320				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
321	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
322
323	/* Set base offset as current */
324	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
325}
326
327/* In the sram sw entry set sign and value of the user defined offset
328 * generated to the classifier
329 */
330static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
331				      unsigned int type, int offset,
332				      unsigned int op)
333{
334	/* Set sign */
335	if (offset < 0) {
336		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
337		offset = 0 - offset;
338	} else {
339		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
340	}
341
342	/* Set value */
343	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
344				  MVPP2_PRS_SRAM_UDF_MASK);
345	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346				offset & MVPP2_PRS_SRAM_UDF_MASK);
347
348	/* Set offset type */
349	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
350				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
351	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
352
353	/* Set offset operation */
354	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
355				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
356	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357				op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
358
359	/* Set base offset as current */
360	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
361}
362
363/* Find parser flow entry */
364static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
365{
366	struct mvpp2_prs_entry pe;
367	int tid;
368
369	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
371		u8 bits;
372
373		if (!priv->prs_shadow[tid].valid ||
374		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
375			continue;
376
377		mvpp2_prs_init_from_hw(priv, &pe, tid);
378		bits = mvpp2_prs_sram_ai_get(&pe);
379
380		/* Sram store classification lookup ID in AI bits [5:0] */
381		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
382			return tid;
383	}
384
385	return -ENOENT;
386}
387
388/* Return first free tcam index, seeking from start to end */
389static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
390				     unsigned char end)
391{
392	int tid;
393
394	if (start > end)
395		swap(start, end);
396
397	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
398		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
399
400	for (tid = start; tid <= end; tid++) {
401		if (!priv->prs_shadow[tid].valid)
402			return tid;
403	}
404
405	return -EINVAL;
406}
407
408/* Drop flow control pause frames */
409static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
410{
411	unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
412	struct mvpp2_prs_entry pe;
413	unsigned int len;
414
415	memset(&pe, 0, sizeof(pe));
416
417	/* For all ports - drop flow control frames */
418	pe.index = MVPP2_PE_FC_DROP;
419	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
420
421	/* Set match on DA */
422	len = ETH_ALEN;
423	while (len--)
424		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
425
426	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
427				 MVPP2_PRS_RI_DROP_MASK);
428
429	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
430	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
431
432	/* Mask all ports */
433	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
434
435	/* Update shadow table and hw entry */
436	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
437	mvpp2_prs_hw_write(priv, &pe);
438}
439
440/* Enable/disable dropping all mac da's */
441static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
442{
443	struct mvpp2_prs_entry pe;
444
445	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
446		/* Entry exist - update port only */
447		mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
448	} else {
449		/* Entry doesn't exist - create new */
450		memset(&pe, 0, sizeof(pe));
451		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
452		pe.index = MVPP2_PE_DROP_ALL;
453
454		/* Non-promiscuous mode for all ports - DROP unknown packets */
455		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
456					 MVPP2_PRS_RI_DROP_MASK);
457
458		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
459		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
460
461		/* Update shadow table */
462		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
463
464		/* Mask all ports */
465		mvpp2_prs_tcam_port_map_set(&pe, 0);
466	}
467
468	/* Update port mask */
469	mvpp2_prs_tcam_port_set(&pe, port, add);
470
471	mvpp2_prs_hw_write(priv, &pe);
472}
473
474/* Set port to unicast or multicast promiscuous mode */
475void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
476			       enum mvpp2_prs_l2_cast l2_cast, bool add)
477{
478	struct mvpp2_prs_entry pe;
479	unsigned char cast_match;
480	unsigned int ri;
481	int tid;
482
483	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
484		cast_match = MVPP2_PRS_UCAST_VAL;
485		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
486		ri = MVPP2_PRS_RI_L2_UCAST;
487	} else {
488		cast_match = MVPP2_PRS_MCAST_VAL;
489		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
490		ri = MVPP2_PRS_RI_L2_MCAST;
491	}
492
493	/* promiscuous mode - Accept unknown unicast or multicast packets */
494	if (priv->prs_shadow[tid].valid) {
495		mvpp2_prs_init_from_hw(priv, &pe, tid);
496	} else {
497		memset(&pe, 0, sizeof(pe));
498		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
499		pe.index = tid;
500
501		/* Continue - set next lookup */
502		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
503
504		/* Set result info bits */
505		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
506
507		/* Match UC or MC addresses */
508		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
509					     MVPP2_PRS_CAST_MASK);
510
511		/* Shift to ethertype */
512		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
513					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
514
515		/* Mask all ports */
516		mvpp2_prs_tcam_port_map_set(&pe, 0);
517
518		/* Update shadow table */
519		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
520	}
521
522	/* Update port mask */
523	mvpp2_prs_tcam_port_set(&pe, port, add);
524
525	mvpp2_prs_hw_write(priv, &pe);
526}
527
528/* Set entry for dsa packets */
529static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
530				  bool tagged, bool extend)
531{
532	struct mvpp2_prs_entry pe;
533	int tid, shift;
534
535	if (extend) {
536		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
537		shift = 8;
538	} else {
539		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
540		shift = 4;
541	}
542
543	if (priv->prs_shadow[tid].valid) {
544		/* Entry exist - update port only */
545		mvpp2_prs_init_from_hw(priv, &pe, tid);
546	} else {
547		/* Entry doesn't exist - create new */
548		memset(&pe, 0, sizeof(pe));
549		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
550		pe.index = tid;
551
552		/* Update shadow table */
553		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
554
555		if (tagged) {
556			/* Set tagged bit in DSA tag */
557			mvpp2_prs_tcam_data_byte_set(&pe, 0,
558					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
559					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
560
561			/* Set ai bits for next iteration */
562			if (extend)
563				mvpp2_prs_sram_ai_update(&pe, 1,
564							MVPP2_PRS_SRAM_AI_MASK);
565			else
566				mvpp2_prs_sram_ai_update(&pe, 0,
567							MVPP2_PRS_SRAM_AI_MASK);
568
569			/* Set result info bits to 'single vlan' */
570			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
571						 MVPP2_PRS_RI_VLAN_MASK);
572			/* If packet is tagged continue check vid filtering */
573			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
574		} else {
575			/* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
576			mvpp2_prs_sram_shift_set(&pe, shift,
577					MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
578
579			/* Set result info bits to 'no vlans' */
580			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
581						 MVPP2_PRS_RI_VLAN_MASK);
582			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
583		}
584
585		/* Mask all ports */
586		mvpp2_prs_tcam_port_map_set(&pe, 0);
587	}
588
589	/* Update port mask */
590	mvpp2_prs_tcam_port_set(&pe, port, add);
591
592	mvpp2_prs_hw_write(priv, &pe);
593}
594
595/* Set entry for dsa ethertype */
596static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
597					    bool add, bool tagged, bool extend)
598{
599	struct mvpp2_prs_entry pe;
600	int tid, shift, port_mask;
601
602	if (extend) {
603		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
604		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
605		port_mask = 0;
606		shift = 8;
607	} else {
608		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
609		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
610		port_mask = MVPP2_PRS_PORT_MASK;
611		shift = 4;
612	}
613
614	if (priv->prs_shadow[tid].valid) {
615		/* Entry exist - update port only */
616		mvpp2_prs_init_from_hw(priv, &pe, tid);
617	} else {
618		/* Entry doesn't exist - create new */
619		memset(&pe, 0, sizeof(pe));
620		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
621		pe.index = tid;
622
623		/* Set ethertype */
624		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
625		mvpp2_prs_match_etype(&pe, 2, 0);
626
627		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
628					 MVPP2_PRS_RI_DSA_MASK);
629		/* Shift ethertype + 2 byte reserved + tag*/
630		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
631					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
632
633		/* Update shadow table */
634		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
635
636		if (tagged) {
637			/* Set tagged bit in DSA tag */
638			mvpp2_prs_tcam_data_byte_set(&pe,
639						     MVPP2_ETH_TYPE_LEN + 2 + 3,
640						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
641						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
642			/* Clear all ai bits for next iteration */
643			mvpp2_prs_sram_ai_update(&pe, 0,
644						 MVPP2_PRS_SRAM_AI_MASK);
645			/* If packet is tagged continue check vlans */
646			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
647		} else {
648			/* Set result info bits to 'no vlans' */
649			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
650						 MVPP2_PRS_RI_VLAN_MASK);
651			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
652		}
653		/* Mask/unmask all ports, depending on dsa type */
654		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
655	}
656
657	/* Update port mask */
658	mvpp2_prs_tcam_port_set(&pe, port, add);
659
660	mvpp2_prs_hw_write(priv, &pe);
661}
662
663/* Search for existing single/triple vlan entry */
664static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
665{
666	struct mvpp2_prs_entry pe;
667	int tid;
668
669	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
670	for (tid = MVPP2_PE_FIRST_FREE_TID;
671	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
672		unsigned int ri_bits, ai_bits;
673		bool match;
674
675		if (!priv->prs_shadow[tid].valid ||
676		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
677			continue;
678
679		mvpp2_prs_init_from_hw(priv, &pe, tid);
680		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
681		if (!match)
682			continue;
683
684		/* Get vlan type */
685		ri_bits = mvpp2_prs_sram_ri_get(&pe);
686		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
687
688		/* Get current ai value from tcam */
689		ai_bits = mvpp2_prs_tcam_ai_get(&pe);
690		/* Clear double vlan bit */
691		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
692
693		if (ai != ai_bits)
694			continue;
695
696		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
697		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
698			return tid;
699	}
700
701	return -ENOENT;
702}
703
704/* Add/update single/triple vlan entry */
705static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
706			      unsigned int port_map)
707{
708	struct mvpp2_prs_entry pe;
709	int tid_aux, tid;
710	int ret = 0;
711
712	memset(&pe, 0, sizeof(pe));
713
714	tid = mvpp2_prs_vlan_find(priv, tpid, ai);
715
716	if (tid < 0) {
717		/* Create new tcam entry */
718		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
719						MVPP2_PE_FIRST_FREE_TID);
720		if (tid < 0)
721			return tid;
722
723		/* Get last double vlan tid */
724		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
725		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
726			unsigned int ri_bits;
727
728			if (!priv->prs_shadow[tid_aux].valid ||
729			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
730				continue;
731
732			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
733			ri_bits = mvpp2_prs_sram_ri_get(&pe);
734			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
735			    MVPP2_PRS_RI_VLAN_DOUBLE)
736				break;
737		}
738
739		if (tid <= tid_aux)
740			return -EINVAL;
741
742		memset(&pe, 0, sizeof(pe));
743		pe.index = tid;
744		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
745
746		mvpp2_prs_match_etype(&pe, 0, tpid);
747
748		/* VLAN tag detected, proceed with VID filtering */
749		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
750
751		/* Clear all ai bits for next iteration */
752		mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
753
754		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
755			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
756						 MVPP2_PRS_RI_VLAN_MASK);
757		} else {
758			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
759			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
760						 MVPP2_PRS_RI_VLAN_MASK);
761		}
762		mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
763
764		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
765	} else {
766		mvpp2_prs_init_from_hw(priv, &pe, tid);
767	}
768	/* Update ports' mask */
769	mvpp2_prs_tcam_port_map_set(&pe, port_map);
770
771	mvpp2_prs_hw_write(priv, &pe);
772
773	return ret;
774}
775
776/* Get first free double vlan ai number */
777static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
778{
779	int i;
780
781	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
782		if (!priv->prs_double_vlans[i])
783			return i;
784	}
785
786	return -EINVAL;
787}
788
789/* Search for existing double vlan entry */
790static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
791				      unsigned short tpid2)
792{
793	struct mvpp2_prs_entry pe;
794	int tid;
795
796	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
797	for (tid = MVPP2_PE_FIRST_FREE_TID;
798	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
799		unsigned int ri_mask;
800		bool match;
801
802		if (!priv->prs_shadow[tid].valid ||
803		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
804			continue;
805
806		mvpp2_prs_init_from_hw(priv, &pe, tid);
807
808		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
809			mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
810
811		if (!match)
812			continue;
813
814		ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
815		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
816			return tid;
817	}
818
819	return -ENOENT;
820}
821
822/* Add or update double vlan entry */
823static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
824				     unsigned short tpid2,
825				     unsigned int port_map)
826{
827	int tid_aux, tid, ai, ret = 0;
828	struct mvpp2_prs_entry pe;
829
830	memset(&pe, 0, sizeof(pe));
831
832	tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
833
834	if (tid < 0) {
835		/* Create new tcam entry */
836		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
837				MVPP2_PE_LAST_FREE_TID);
838		if (tid < 0)
839			return tid;
840
841		/* Set ai value for new double vlan entry */
842		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
843		if (ai < 0)
844			return ai;
845
846		/* Get first single/triple vlan tid */
847		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
848		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
849			unsigned int ri_bits;
850
851			if (!priv->prs_shadow[tid_aux].valid ||
852			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
853				continue;
854
855			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
856			ri_bits = mvpp2_prs_sram_ri_get(&pe);
857			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
858			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
859			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
860				break;
861		}
862
863		if (tid >= tid_aux)
864			return -ERANGE;
865
866		memset(&pe, 0, sizeof(pe));
867		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
868		pe.index = tid;
869
870		priv->prs_double_vlans[ai] = true;
871
872		mvpp2_prs_match_etype(&pe, 0, tpid1);
873		mvpp2_prs_match_etype(&pe, 4, tpid2);
874
875		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
876		/* Shift 4 bytes - skip outer vlan tag */
877		mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
878					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
879		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
880					 MVPP2_PRS_RI_VLAN_MASK);
881		mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
882					 MVPP2_PRS_SRAM_AI_MASK);
883
884		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
885	} else {
886		mvpp2_prs_init_from_hw(priv, &pe, tid);
887	}
888
889	/* Update ports' mask */
890	mvpp2_prs_tcam_port_map_set(&pe, port_map);
891	mvpp2_prs_hw_write(priv, &pe);
892
893	return ret;
894}
895
896/* IPv4 header parsing for fragmentation and L4 offset */
897static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
898			       unsigned int ri, unsigned int ri_mask)
899{
900	struct mvpp2_prs_entry pe;
901	int tid;
902
903	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
904	    (proto != IPPROTO_IGMP))
905		return -EINVAL;
906
907	/* Not fragmented packet */
908	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
909					MVPP2_PE_LAST_FREE_TID);
910	if (tid < 0)
911		return tid;
912
913	memset(&pe, 0, sizeof(pe));
914	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
915	pe.index = tid;
916
917	/* Set next lu to IPv4 */
918	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
919	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
920	/* Set L4 offset */
921	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
922				  sizeof(struct iphdr) - 4,
923				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
924	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
925				 MVPP2_PRS_IPV4_DIP_AI_BIT);
926	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
927
928	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
929				     MVPP2_PRS_TCAM_PROTO_MASK_L);
930	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
931				     MVPP2_PRS_TCAM_PROTO_MASK);
932
933	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
934	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
935	/* Unmask all ports */
936	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
937
938	/* Update shadow table and hw entry */
939	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
940	mvpp2_prs_hw_write(priv, &pe);
941
942	/* Fragmented packet */
943	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
944					MVPP2_PE_LAST_FREE_TID);
945	if (tid < 0)
946		return tid;
947
948	pe.index = tid;
949	/* Clear ri before updating */
950	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
951	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
952	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
953
954	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
955				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
956
957	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
958	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
959
960	/* Update shadow table and hw entry */
961	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
962	mvpp2_prs_hw_write(priv, &pe);
963
964	return 0;
965}
966
967/* IPv4 L3 multicast or broadcast */
968static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
969{
970	struct mvpp2_prs_entry pe;
971	int mask, tid;
972
973	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
974					MVPP2_PE_LAST_FREE_TID);
975	if (tid < 0)
976		return tid;
977
978	memset(&pe, 0, sizeof(pe));
979	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
980	pe.index = tid;
981
982	switch (l3_cast) {
983	case MVPP2_PRS_L3_MULTI_CAST:
984		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
985					     MVPP2_PRS_IPV4_MC_MASK);
986		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
987					 MVPP2_PRS_RI_L3_ADDR_MASK);
988		break;
989	case  MVPP2_PRS_L3_BROAD_CAST:
990		mask = MVPP2_PRS_IPV4_BC_MASK;
991		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
992		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
993		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
994		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
995		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
996					 MVPP2_PRS_RI_L3_ADDR_MASK);
997		break;
998	default:
999		return -EINVAL;
1000	}
1001
1002	/* Finished: go to flowid generation */
1003	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1004	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1005
1006	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1007				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1008	/* Unmask all ports */
1009	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1010
1011	/* Update shadow table and hw entry */
1012	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1013	mvpp2_prs_hw_write(priv, &pe);
1014
1015	return 0;
1016}
1017
1018/* Set entries for protocols over IPv6  */
1019static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1020			       unsigned int ri, unsigned int ri_mask)
1021{
1022	struct mvpp2_prs_entry pe;
1023	int tid;
1024
1025	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1026	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1027		return -EINVAL;
1028
1029	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1030					MVPP2_PE_LAST_FREE_TID);
1031	if (tid < 0)
1032		return tid;
1033
1034	memset(&pe, 0, sizeof(pe));
1035	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1036	pe.index = tid;
1037
1038	/* Finished: go to flowid generation */
1039	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1040	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1041	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1042	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1043				  sizeof(struct ipv6hdr) - 6,
1044				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1045
1046	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1047	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1048				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1049	/* Unmask all ports */
1050	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1051
1052	/* Write HW */
1053	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1054	mvpp2_prs_hw_write(priv, &pe);
1055
1056	return 0;
1057}
1058
1059/* IPv6 L3 multicast entry */
1060static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1061{
1062	struct mvpp2_prs_entry pe;
1063	int tid;
1064
1065	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1066		return -EINVAL;
1067
1068	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1069					MVPP2_PE_LAST_FREE_TID);
1070	if (tid < 0)
1071		return tid;
1072
1073	memset(&pe, 0, sizeof(pe));
1074	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1075	pe.index = tid;
1076
1077	/* Finished: go to flowid generation */
1078	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1079	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1080				 MVPP2_PRS_RI_L3_ADDR_MASK);
1081	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1082				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1083	/* Shift back to IPv6 NH */
1084	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1085
1086	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1087				     MVPP2_PRS_IPV6_MC_MASK);
1088	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1089	/* Unmask all ports */
1090	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1091
1092	/* Update shadow table and hw entry */
1093	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1094	mvpp2_prs_hw_write(priv, &pe);
1095
1096	return 0;
1097}
1098
1099/* Parser per-port initialization */
1100static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1101				   int lu_max, int offset)
1102{
1103	u32 val;
1104
1105	/* Set lookup ID */
1106	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1107	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1108	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1109	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1110
1111	/* Set maximum number of loops for packet received from port */
1112	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1113	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1114	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1115	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1116
1117	/* Set initial offset for packet header extraction for the first
1118	 * searching loop
1119	 */
1120	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1121	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1122	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1123	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1124}
1125
1126/* Default flow entries initialization for all ports */
1127static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1128{
1129	struct mvpp2_prs_entry pe;
1130	int port;
1131
1132	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1133		memset(&pe, 0, sizeof(pe));
1134		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1135		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1136
1137		/* Mask all ports */
1138		mvpp2_prs_tcam_port_map_set(&pe, 0);
1139
1140		/* Set flow ID*/
1141		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1142		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1143
1144		/* Update shadow table and hw entry */
1145		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1146		mvpp2_prs_hw_write(priv, &pe);
1147	}
1148}
1149
1150/* Set default entry for Marvell Header field */
1151static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1152{
1153	struct mvpp2_prs_entry pe;
1154
1155	memset(&pe, 0, sizeof(pe));
1156
1157	pe.index = MVPP2_PE_MH_DEFAULT;
1158	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1159	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1160				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1161	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1162
1163	/* Unmask all ports */
1164	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1165
1166	/* Update shadow table and hw entry */
1167	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1168	mvpp2_prs_hw_write(priv, &pe);
1169}
1170
1171/* Set default entires (place holder) for promiscuous, non-promiscuous and
1172 * multicast MAC addresses
1173 */
1174static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1175{
1176	struct mvpp2_prs_entry pe;
1177
1178	memset(&pe, 0, sizeof(pe));
1179
1180	/* Non-promiscuous mode for all ports - DROP unknown packets */
1181	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1182	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1183
1184	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1185				 MVPP2_PRS_RI_DROP_MASK);
1186	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1187	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1188
1189	/* Unmask all ports */
1190	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1191
1192	/* Update shadow table and hw entry */
1193	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1194	mvpp2_prs_hw_write(priv, &pe);
1195
1196	/* Create dummy entries for drop all and promiscuous modes */
1197	mvpp2_prs_drop_fc(priv);
1198	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1199	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1200	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1201}
1202
1203/* Set default entries for various types of dsa packets */
1204static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1205{
1206	struct mvpp2_prs_entry pe;
1207
1208	/* None tagged EDSA entry - place holder */
1209	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1210			      MVPP2_PRS_EDSA);
1211
1212	/* Tagged EDSA entry - place holder */
1213	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1214
1215	/* None tagged DSA entry - place holder */
1216	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1217			      MVPP2_PRS_DSA);
1218
1219	/* Tagged DSA entry - place holder */
1220	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1221
1222	/* None tagged EDSA ethertype entry - place holder*/
1223	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1224					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1225
1226	/* Tagged EDSA ethertype entry - place holder*/
1227	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1228					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1229
1230	/* None tagged DSA ethertype entry */
1231	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1232					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1233
1234	/* Tagged DSA ethertype entry */
1235	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1236					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1237
1238	/* Set default entry, in case DSA or EDSA tag not found */
1239	memset(&pe, 0, sizeof(pe));
1240	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1241	pe.index = MVPP2_PE_DSA_DEFAULT;
1242	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1243
1244	/* Shift 0 bytes */
1245	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1246	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1247
1248	/* Clear all sram ai bits for next iteration */
1249	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1250
1251	/* Unmask all ports */
1252	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1253
1254	mvpp2_prs_hw_write(priv, &pe);
1255}
1256
1257/* Initialize parser entries for VID filtering */
1258static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1259{
1260	struct mvpp2_prs_entry pe;
1261
1262	memset(&pe, 0, sizeof(pe));
1263
1264	/* Set default vid entry */
1265	pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1266	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1267
1268	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1269
1270	/* Skip VLAN header - Set offset to 4 bytes */
1271	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1272				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1273
1274	/* Clear all ai bits for next iteration */
1275	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1276
1277	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1278
1279	/* Unmask all ports */
1280	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1281
1282	/* Update shadow table and hw entry */
1283	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1284	mvpp2_prs_hw_write(priv, &pe);
1285
1286	/* Set default vid entry for extended DSA*/
1287	memset(&pe, 0, sizeof(pe));
1288
1289	/* Set default vid entry */
1290	pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1291	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1292
1293	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1294				 MVPP2_PRS_EDSA_VID_AI_BIT);
1295
1296	/* Skip VLAN header - Set offset to 8 bytes */
1297	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1298				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1299
1300	/* Clear all ai bits for next iteration */
1301	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1302
1303	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1304
1305	/* Unmask all ports */
1306	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1307
1308	/* Update shadow table and hw entry */
1309	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1310	mvpp2_prs_hw_write(priv, &pe);
1311}
1312
1313/* Match basic ethertypes */
1314static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1315{
1316	struct mvpp2_prs_entry pe;
1317	int tid;
1318
1319	/* Ethertype: PPPoE */
1320	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1321					MVPP2_PE_LAST_FREE_TID);
1322	if (tid < 0)
1323		return tid;
1324
1325	memset(&pe, 0, sizeof(pe));
1326	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1327	pe.index = tid;
1328
1329	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1330
1331	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1332				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1333	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1334	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1335				 MVPP2_PRS_RI_PPPOE_MASK);
1336
1337	/* Update shadow table and hw entry */
1338	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1339	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1340	priv->prs_shadow[pe.index].finish = false;
1341	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1342				MVPP2_PRS_RI_PPPOE_MASK);
1343	mvpp2_prs_hw_write(priv, &pe);
1344
1345	/* Ethertype: ARP */
1346	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1347					MVPP2_PE_LAST_FREE_TID);
1348	if (tid < 0)
1349		return tid;
1350
1351	memset(&pe, 0, sizeof(pe));
1352	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1353	pe.index = tid;
1354
1355	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1356
1357	/* Generate flow in the next iteration*/
1358	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1359	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1360	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1361				 MVPP2_PRS_RI_L3_PROTO_MASK);
1362	/* Set L3 offset */
1363	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1364				  MVPP2_ETH_TYPE_LEN,
1365				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1366
1367	/* Update shadow table and hw entry */
1368	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1369	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1370	priv->prs_shadow[pe.index].finish = true;
1371	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1372				MVPP2_PRS_RI_L3_PROTO_MASK);
1373	mvpp2_prs_hw_write(priv, &pe);
1374
1375	/* Ethertype: LBTD */
1376	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1377					MVPP2_PE_LAST_FREE_TID);
1378	if (tid < 0)
1379		return tid;
1380
1381	memset(&pe, 0, sizeof(pe));
1382	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1383	pe.index = tid;
1384
1385	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1386
1387	/* Generate flow in the next iteration*/
1388	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1389	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1390	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1391				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1392				 MVPP2_PRS_RI_CPU_CODE_MASK |
1393				 MVPP2_PRS_RI_UDF3_MASK);
1394	/* Set L3 offset */
1395	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1396				  MVPP2_ETH_TYPE_LEN,
1397				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1398
1399	/* Update shadow table and hw entry */
1400	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1401	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1402	priv->prs_shadow[pe.index].finish = true;
1403	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1404				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1405				MVPP2_PRS_RI_CPU_CODE_MASK |
1406				MVPP2_PRS_RI_UDF3_MASK);
1407	mvpp2_prs_hw_write(priv, &pe);
1408
1409	/* Ethertype: IPv4 without options */
1410	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1411					MVPP2_PE_LAST_FREE_TID);
1412	if (tid < 0)
1413		return tid;
1414
1415	memset(&pe, 0, sizeof(pe));
1416	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1417	pe.index = tid;
1418
1419	mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1420	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1421				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1422				     MVPP2_PRS_IPV4_HEAD_MASK |
1423				     MVPP2_PRS_IPV4_IHL_MASK);
1424
1425	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1426	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1427				 MVPP2_PRS_RI_L3_PROTO_MASK);
1428	/* Skip eth_type + 4 bytes of IP header */
1429	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1430				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1431	/* Set L3 offset */
1432	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1433				  MVPP2_ETH_TYPE_LEN,
1434				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1435
1436	/* Update shadow table and hw entry */
1437	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1438	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1439	priv->prs_shadow[pe.index].finish = false;
1440	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1441				MVPP2_PRS_RI_L3_PROTO_MASK);
1442	mvpp2_prs_hw_write(priv, &pe);
1443
1444	/* Ethertype: IPv4 with options */
1445	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1446					MVPP2_PE_LAST_FREE_TID);
1447	if (tid < 0)
1448		return tid;
1449
1450	pe.index = tid;
1451
1452	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1453				     MVPP2_PRS_IPV4_HEAD,
1454				     MVPP2_PRS_IPV4_HEAD_MASK);
1455
1456	/* Clear ri before updating */
1457	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1458	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1459	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1460				 MVPP2_PRS_RI_L3_PROTO_MASK);
1461
1462	/* Update shadow table and hw entry */
1463	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1464	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1465	priv->prs_shadow[pe.index].finish = false;
1466	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1467				MVPP2_PRS_RI_L3_PROTO_MASK);
1468	mvpp2_prs_hw_write(priv, &pe);
1469
1470	/* Ethertype: IPv6 without options */
1471	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1472					MVPP2_PE_LAST_FREE_TID);
1473	if (tid < 0)
1474		return tid;
1475
1476	memset(&pe, 0, sizeof(pe));
1477	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1478	pe.index = tid;
1479
1480	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1481
1482	/* Skip DIP of IPV6 header */
1483	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1484				 MVPP2_MAX_L3_ADDR_SIZE,
1485				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1486	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1487	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1488				 MVPP2_PRS_RI_L3_PROTO_MASK);
1489	/* Set L3 offset */
1490	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1491				  MVPP2_ETH_TYPE_LEN,
1492				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1493
1494	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1495	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1496	priv->prs_shadow[pe.index].finish = false;
1497	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1498				MVPP2_PRS_RI_L3_PROTO_MASK);
1499	mvpp2_prs_hw_write(priv, &pe);
1500
1501	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1502	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1503	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1504	pe.index = MVPP2_PE_ETH_TYPE_UN;
1505
1506	/* Unmask all ports */
1507	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1508
1509	/* Generate flow in the next iteration*/
1510	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1511	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1512	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1513				 MVPP2_PRS_RI_L3_PROTO_MASK);
1514	/* Set L3 offset even it's unknown L3 */
1515	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1516				  MVPP2_ETH_TYPE_LEN,
1517				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1518
1519	/* Update shadow table and hw entry */
1520	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1521	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1522	priv->prs_shadow[pe.index].finish = true;
1523	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1524				MVPP2_PRS_RI_L3_PROTO_MASK);
1525	mvpp2_prs_hw_write(priv, &pe);
1526
1527	return 0;
1528}
1529
1530/* Configure vlan entries and detect up to 2 successive VLAN tags.
1531 * Possible options:
1532 * 0x8100, 0x88A8
1533 * 0x8100, 0x8100
1534 * 0x8100
1535 * 0x88A8
1536 */
1537static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1538{
1539	struct mvpp2_prs_entry pe;
1540	int err;
1541
1542	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1543					      MVPP2_PRS_DBL_VLANS_MAX,
1544					      GFP_KERNEL);
1545	if (!priv->prs_double_vlans)
1546		return -ENOMEM;
1547
1548	/* Double VLAN: 0x8100, 0x88A8 */
1549	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1550					MVPP2_PRS_PORT_MASK);
1551	if (err)
1552		return err;
1553
1554	/* Double VLAN: 0x8100, 0x8100 */
1555	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1556					MVPP2_PRS_PORT_MASK);
1557	if (err)
1558		return err;
1559
1560	/* Single VLAN: 0x88a8 */
1561	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1562				 MVPP2_PRS_PORT_MASK);
1563	if (err)
1564		return err;
1565
1566	/* Single VLAN: 0x8100 */
1567	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1568				 MVPP2_PRS_PORT_MASK);
1569	if (err)
1570		return err;
1571
1572	/* Set default double vlan entry */
1573	memset(&pe, 0, sizeof(pe));
1574	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1575	pe.index = MVPP2_PE_VLAN_DBL;
1576
1577	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1578
1579	/* Clear ai for next iterations */
1580	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1581	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1582				 MVPP2_PRS_RI_VLAN_MASK);
1583
1584	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1585				 MVPP2_PRS_DBL_VLAN_AI_BIT);
1586	/* Unmask all ports */
1587	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1588
1589	/* Update shadow table and hw entry */
1590	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1591	mvpp2_prs_hw_write(priv, &pe);
1592
1593	/* Set default vlan none entry */
1594	memset(&pe, 0, sizeof(pe));
1595	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1596	pe.index = MVPP2_PE_VLAN_NONE;
1597
1598	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1599	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1600				 MVPP2_PRS_RI_VLAN_MASK);
1601
1602	/* Unmask all ports */
1603	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1604
1605	/* Update shadow table and hw entry */
1606	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1607	mvpp2_prs_hw_write(priv, &pe);
1608
1609	return 0;
1610}
1611
1612/* Set entries for PPPoE ethertype */
1613static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1614{
1615	struct mvpp2_prs_entry pe;
1616	int tid;
1617
1618	/* IPv4 over PPPoE with options */
1619	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1620					MVPP2_PE_LAST_FREE_TID);
1621	if (tid < 0)
1622		return tid;
1623
1624	memset(&pe, 0, sizeof(pe));
1625	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1626	pe.index = tid;
1627
1628	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1629
1630	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1631	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1632				 MVPP2_PRS_RI_L3_PROTO_MASK);
1633	/* Skip eth_type + 4 bytes of IP header */
1634	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1635				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1636	/* Set L3 offset */
1637	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1638				  MVPP2_ETH_TYPE_LEN,
1639				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1640
1641	/* Update shadow table and hw entry */
1642	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1643	mvpp2_prs_hw_write(priv, &pe);
1644
1645	/* IPv4 over PPPoE without options */
1646	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1647					MVPP2_PE_LAST_FREE_TID);
1648	if (tid < 0)
1649		return tid;
1650
1651	pe.index = tid;
1652
1653	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1654				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1655				     MVPP2_PRS_IPV4_HEAD_MASK |
1656				     MVPP2_PRS_IPV4_IHL_MASK);
1657
1658	/* Clear ri before updating */
1659	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1660	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1661	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1662				 MVPP2_PRS_RI_L3_PROTO_MASK);
1663
1664	/* Update shadow table and hw entry */
1665	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1666	mvpp2_prs_hw_write(priv, &pe);
1667
1668	/* IPv6 over PPPoE */
1669	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1670					MVPP2_PE_LAST_FREE_TID);
1671	if (tid < 0)
1672		return tid;
1673
1674	memset(&pe, 0, sizeof(pe));
1675	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1676	pe.index = tid;
1677
1678	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1679
1680	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1681	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1682				 MVPP2_PRS_RI_L3_PROTO_MASK);
1683	/* Jump to DIP of IPV6 header */
1684	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1685				 MVPP2_MAX_L3_ADDR_SIZE,
1686				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1687	/* Set L3 offset */
1688	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1689				  MVPP2_ETH_TYPE_LEN,
1690				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1691
1692	/* Update shadow table and hw entry */
1693	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1694	mvpp2_prs_hw_write(priv, &pe);
1695
1696	/* Non-IP over PPPoE */
1697	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1698					MVPP2_PE_LAST_FREE_TID);
1699	if (tid < 0)
1700		return tid;
1701
1702	memset(&pe, 0, sizeof(pe));
1703	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1704	pe.index = tid;
1705
1706	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1707				 MVPP2_PRS_RI_L3_PROTO_MASK);
1708
1709	/* Finished: go to flowid generation */
1710	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1711	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1712	/* Set L3 offset even if it's unknown L3 */
1713	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1714				  MVPP2_ETH_TYPE_LEN,
1715				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1716
1717	/* Update shadow table and hw entry */
1718	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1719	mvpp2_prs_hw_write(priv, &pe);
1720
1721	return 0;
1722}
1723
1724/* Initialize entries for IPv4 */
1725static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1726{
1727	struct mvpp2_prs_entry pe;
1728	int err;
1729
1730	/* Set entries for TCP, UDP and IGMP over IPv4 */
1731	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1732				  MVPP2_PRS_RI_L4_PROTO_MASK);
1733	if (err)
1734		return err;
1735
1736	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1737				  MVPP2_PRS_RI_L4_PROTO_MASK);
1738	if (err)
1739		return err;
1740
1741	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1742				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1743				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1744				  MVPP2_PRS_RI_CPU_CODE_MASK |
1745				  MVPP2_PRS_RI_UDF3_MASK);
1746	if (err)
1747		return err;
1748
1749	/* IPv4 Broadcast */
1750	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1751	if (err)
1752		return err;
1753
1754	/* IPv4 Multicast */
1755	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1756	if (err)
1757		return err;
1758
1759	/* Default IPv4 entry for unknown protocols */
1760	memset(&pe, 0, sizeof(pe));
1761	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1762	pe.index = MVPP2_PE_IP4_PROTO_UN;
1763
1764	/* Set next lu to IPv4 */
1765	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1766	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1767	/* Set L4 offset */
1768	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1769				  sizeof(struct iphdr) - 4,
1770				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1771	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1772				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1773	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1774				 MVPP2_PRS_RI_L4_PROTO_MASK);
1775
1776	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1777	/* Unmask all ports */
1778	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1779
1780	/* Update shadow table and hw entry */
1781	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1782	mvpp2_prs_hw_write(priv, &pe);
1783
1784	/* Default IPv4 entry for unicast address */
1785	memset(&pe, 0, sizeof(pe));
1786	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1787	pe.index = MVPP2_PE_IP4_ADDR_UN;
1788
1789	/* Finished: go to flowid generation */
1790	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1791	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1792	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1793				 MVPP2_PRS_RI_L3_ADDR_MASK);
1794
1795	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1796				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1797	/* Unmask all ports */
1798	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1799
1800	/* Update shadow table and hw entry */
1801	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1802	mvpp2_prs_hw_write(priv, &pe);
1803
1804	return 0;
1805}
1806
1807/* Initialize entries for IPv6 */
1808static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1809{
1810	struct mvpp2_prs_entry pe;
1811	int tid, err;
1812
1813	/* Set entries for TCP, UDP and ICMP over IPv6 */
1814	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1815				  MVPP2_PRS_RI_L4_TCP,
1816				  MVPP2_PRS_RI_L4_PROTO_MASK);
1817	if (err)
1818		return err;
1819
1820	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1821				  MVPP2_PRS_RI_L4_UDP,
1822				  MVPP2_PRS_RI_L4_PROTO_MASK);
1823	if (err)
1824		return err;
1825
1826	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1827				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1828				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1829				  MVPP2_PRS_RI_CPU_CODE_MASK |
1830				  MVPP2_PRS_RI_UDF3_MASK);
1831	if (err)
1832		return err;
1833
1834	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1835	/* Result Info: UDF7=1, DS lite */
1836	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1837				  MVPP2_PRS_RI_UDF7_IP6_LITE,
1838				  MVPP2_PRS_RI_UDF7_MASK);
1839	if (err)
1840		return err;
1841
1842	/* IPv6 multicast */
1843	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1844	if (err)
1845		return err;
1846
1847	/* Entry for checking hop limit */
1848	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1849					MVPP2_PE_LAST_FREE_TID);
1850	if (tid < 0)
1851		return tid;
1852
1853	memset(&pe, 0, sizeof(pe));
1854	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1855	pe.index = tid;
1856
1857	/* Finished: go to flowid generation */
1858	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1859	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1860	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1861				 MVPP2_PRS_RI_DROP_MASK,
1862				 MVPP2_PRS_RI_L3_PROTO_MASK |
1863				 MVPP2_PRS_RI_DROP_MASK);
1864
1865	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1866	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1867				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1868
1869	/* Update shadow table and hw entry */
1870	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1871	mvpp2_prs_hw_write(priv, &pe);
1872
1873	/* Default IPv6 entry for unknown protocols */
1874	memset(&pe, 0, sizeof(pe));
1875	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1876	pe.index = MVPP2_PE_IP6_PROTO_UN;
1877
1878	/* Finished: go to flowid generation */
1879	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1880	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1881	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1882				 MVPP2_PRS_RI_L4_PROTO_MASK);
1883	/* Set L4 offset relatively to our current place */
1884	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1885				  sizeof(struct ipv6hdr) - 4,
1886				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1887
1888	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1889				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1890	/* Unmask all ports */
1891	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1892
1893	/* Update shadow table and hw entry */
1894	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1895	mvpp2_prs_hw_write(priv, &pe);
1896
1897	/* Default IPv6 entry for unknown ext protocols */
1898	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1899	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1900	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1901
1902	/* Finished: go to flowid generation */
1903	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1904	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1905	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1906				 MVPP2_PRS_RI_L4_PROTO_MASK);
1907
1908	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1909				 MVPP2_PRS_IPV6_EXT_AI_BIT);
1910	/* Unmask all ports */
1911	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1912
1913	/* Update shadow table and hw entry */
1914	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1915	mvpp2_prs_hw_write(priv, &pe);
1916
1917	/* Default IPv6 entry for unicast address */
1918	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1919	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1920	pe.index = MVPP2_PE_IP6_ADDR_UN;
1921
1922	/* Finished: go to IPv6 again */
1923	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1924	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1925				 MVPP2_PRS_RI_L3_ADDR_MASK);
1926	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1927				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1928	/* Shift back to IPV6 NH */
1929	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1930
1931	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1932	/* Unmask all ports */
1933	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1934
1935	/* Update shadow table and hw entry */
1936	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1937	mvpp2_prs_hw_write(priv, &pe);
1938
1939	return 0;
1940}
1941
1942/* Find tcam entry with matched pair <vid,port> */
1943static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1944{
1945	unsigned char byte[2], enable[2];
1946	struct mvpp2_prs_entry pe;
1947	u16 rvid, rmask;
1948	int tid;
1949
1950	/* Go through the all entries with MVPP2_PRS_LU_VID */
1951	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1952	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1953		if (!port->priv->prs_shadow[tid].valid ||
1954		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1955			continue;
1956
1957		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1958
1959		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1960		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1961
1962		rvid = ((byte[0] & 0xf) << 8) + byte[1];
1963		rmask = ((enable[0] & 0xf) << 8) + enable[1];
1964
1965		if (rvid != vid || rmask != mask)
1966			continue;
1967
1968		return tid;
1969	}
1970
1971	return -ENOENT;
1972}
1973
1974/* Write parser entry for VID filtering */
1975int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1976{
1977	unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1978				 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1979	unsigned int mask = 0xfff, reg_val, shift;
1980	struct mvpp2 *priv = port->priv;
1981	struct mvpp2_prs_entry pe;
1982	int tid;
1983
1984	memset(&pe, 0, sizeof(pe));
1985
1986	/* Scan TCAM and see if entry with this <vid,port> already exist */
1987	tid = mvpp2_prs_vid_range_find(port, vid, mask);
1988
1989	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1990	if (reg_val & MVPP2_DSA_EXTENDED)
1991		shift = MVPP2_VLAN_TAG_EDSA_LEN;
1992	else
1993		shift = MVPP2_VLAN_TAG_LEN;
1994
1995	/* No such entry */
1996	if (tid < 0) {
1997
1998		/* Go through all entries from first to last in vlan range */
1999		tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2000						vid_start +
2001						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2002
2003		/* There isn't room for a new VID filter */
2004		if (tid < 0)
2005			return tid;
2006
2007		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2008		pe.index = tid;
2009
2010		/* Mask all ports */
2011		mvpp2_prs_tcam_port_map_set(&pe, 0);
2012	} else {
2013		mvpp2_prs_init_from_hw(priv, &pe, tid);
2014	}
2015
2016	/* Enable the current port */
2017	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2018
2019	/* Continue - set next lookup */
2020	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2021
2022	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2023	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2024
2025	/* Set match on VID */
2026	mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2027
2028	/* Clear all ai bits for next iteration */
2029	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2030
2031	/* Update shadow table */
2032	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2033	mvpp2_prs_hw_write(priv, &pe);
2034
2035	return 0;
2036}
2037
2038/* Write parser entry for VID filtering */
2039void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2040{
2041	struct mvpp2 *priv = port->priv;
2042	int tid;
2043
2044	/* Scan TCAM and see if entry with this <vid,port> already exist */
2045	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2046
2047	/* No such entry */
2048	if (tid < 0)
2049		return;
2050
2051	mvpp2_prs_hw_inv(priv, tid);
2052	priv->prs_shadow[tid].valid = false;
2053}
2054
2055/* Remove all existing VID filters on this port */
2056void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2057{
2058	struct mvpp2 *priv = port->priv;
2059	int tid;
2060
2061	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2062	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2063		if (priv->prs_shadow[tid].valid) {
2064			mvpp2_prs_hw_inv(priv, tid);
2065			priv->prs_shadow[tid].valid = false;
2066		}
2067	}
2068}
2069
2070/* Remove VID filering entry for this port */
2071void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2072{
2073	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2074	struct mvpp2 *priv = port->priv;
2075
2076	/* Invalidate the guard entry */
2077	mvpp2_prs_hw_inv(priv, tid);
2078
2079	priv->prs_shadow[tid].valid = false;
2080}
2081
2082/* Add guard entry that drops packets when no VID is matched on this port */
2083void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2084{
2085	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2086	struct mvpp2 *priv = port->priv;
2087	unsigned int reg_val, shift;
2088	struct mvpp2_prs_entry pe;
2089
2090	if (priv->prs_shadow[tid].valid)
2091		return;
2092
2093	memset(&pe, 0, sizeof(pe));
2094
2095	pe.index = tid;
2096
2097	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2098	if (reg_val & MVPP2_DSA_EXTENDED)
2099		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2100	else
2101		shift = MVPP2_VLAN_TAG_LEN;
2102
2103	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2104
2105	/* Mask all ports */
2106	mvpp2_prs_tcam_port_map_set(&pe, 0);
2107
2108	/* Update port mask */
2109	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2110
2111	/* Continue - set next lookup */
2112	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2113
2114	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2115	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2116
2117	/* Drop VLAN packets that don't belong to any VIDs on this port */
2118	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2119				 MVPP2_PRS_RI_DROP_MASK);
2120
2121	/* Clear all ai bits for next iteration */
2122	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2123
2124	/* Update shadow table */
2125	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2126	mvpp2_prs_hw_write(priv, &pe);
2127}
2128
2129/* Parser default initialization */
2130int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2131{
2132	int err, index, i;
2133
2134	/* Enable tcam table */
2135	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2136
2137	/* Clear all tcam and sram entries */
2138	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2139		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2140		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2141			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2142
2143		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2144		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2145			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2146	}
2147
2148	/* Invalidate all tcam entries */
2149	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2150		mvpp2_prs_hw_inv(priv, index);
2151
2152	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2153					sizeof(*priv->prs_shadow),
2154					GFP_KERNEL);
2155	if (!priv->prs_shadow)
2156		return -ENOMEM;
2157
2158	/* Always start from lookup = 0 */
2159	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2160		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2161				       MVPP2_PRS_PORT_LU_MAX, 0);
2162
2163	mvpp2_prs_def_flow_init(priv);
2164
2165	mvpp2_prs_mh_init(priv);
2166
2167	mvpp2_prs_mac_init(priv);
2168
2169	mvpp2_prs_dsa_init(priv);
2170
2171	mvpp2_prs_vid_init(priv);
2172
2173	err = mvpp2_prs_etype_init(priv);
2174	if (err)
2175		return err;
2176
2177	err = mvpp2_prs_vlan_init(pdev, priv);
2178	if (err)
2179		return err;
2180
2181	err = mvpp2_prs_pppoe_init(priv);
2182	if (err)
2183		return err;
2184
2185	err = mvpp2_prs_ip6_init(priv);
2186	if (err)
2187		return err;
2188
2189	err = mvpp2_prs_ip4_init(priv);
2190	if (err)
2191		return err;
2192
2193	return 0;
2194}
2195
2196/* Compare MAC DA with tcam entry data */
2197static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2198				       const u8 *da, unsigned char *mask)
2199{
2200	unsigned char tcam_byte, tcam_mask;
2201	int index;
2202
2203	for (index = 0; index < ETH_ALEN; index++) {
2204		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2205		if (tcam_mask != mask[index])
2206			return false;
2207
2208		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2209			return false;
2210	}
2211
2212	return true;
2213}
2214
2215/* Find tcam entry with matched pair <MAC DA, port> */
2216static int
2217mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2218			    unsigned char *mask, int udf_type)
2219{
2220	struct mvpp2_prs_entry pe;
2221	int tid;
2222
2223	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2224	for (tid = MVPP2_PE_MAC_RANGE_START;
2225	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2226		unsigned int entry_pmap;
2227
2228		if (!priv->prs_shadow[tid].valid ||
2229		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2230		    (priv->prs_shadow[tid].udf != udf_type))
2231			continue;
2232
2233		mvpp2_prs_init_from_hw(priv, &pe, tid);
2234		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2235
2236		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2237		    entry_pmap == pmap)
2238			return tid;
2239	}
2240
2241	return -ENOENT;
2242}
2243
2244/* Update parser's mac da entry */
2245int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2246{
2247	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2248	struct mvpp2 *priv = port->priv;
2249	unsigned int pmap, len, ri;
2250	struct mvpp2_prs_entry pe;
2251	int tid;
2252
2253	memset(&pe, 0, sizeof(pe));
2254
2255	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2256	tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2257					  MVPP2_PRS_UDF_MAC_DEF);
2258
2259	/* No such entry */
2260	if (tid < 0) {
2261		if (!add)
2262			return 0;
2263
2264		/* Create new TCAM entry */
2265		/* Go through the all entries from first to last */
2266		tid = mvpp2_prs_tcam_first_free(priv,
2267						MVPP2_PE_MAC_RANGE_START,
2268						MVPP2_PE_MAC_RANGE_END);
2269		if (tid < 0)
2270			return tid;
2271
2272		pe.index = tid;
2273
2274		/* Mask all ports */
2275		mvpp2_prs_tcam_port_map_set(&pe, 0);
2276	} else {
2277		mvpp2_prs_init_from_hw(priv, &pe, tid);
2278	}
2279
2280	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2281
2282	/* Update port mask */
2283	mvpp2_prs_tcam_port_set(&pe, port->id, add);
2284
2285	/* Invalidate the entry if no ports are left enabled */
2286	pmap = mvpp2_prs_tcam_port_map_get(&pe);
2287	if (pmap == 0) {
2288		if (add)
2289			return -EINVAL;
2290
2291		mvpp2_prs_hw_inv(priv, pe.index);
2292		priv->prs_shadow[pe.index].valid = false;
2293		return 0;
2294	}
2295
2296	/* Continue - set next lookup */
2297	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2298
2299	/* Set match on DA */
2300	len = ETH_ALEN;
2301	while (len--)
2302		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2303
2304	/* Set result info bits */
2305	if (is_broadcast_ether_addr(da)) {
2306		ri = MVPP2_PRS_RI_L2_BCAST;
2307	} else if (is_multicast_ether_addr(da)) {
2308		ri = MVPP2_PRS_RI_L2_MCAST;
2309	} else {
2310		ri = MVPP2_PRS_RI_L2_UCAST;
2311
2312		if (ether_addr_equal(da, port->dev->dev_addr))
2313			ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2314	}
2315
2316	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2317				 MVPP2_PRS_RI_MAC_ME_MASK);
2318	mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2319				MVPP2_PRS_RI_MAC_ME_MASK);
2320
2321	/* Shift to ethertype */
2322	mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2323				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2324
2325	/* Update shadow table and hw entry */
2326	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2327	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2328	mvpp2_prs_hw_write(priv, &pe);
2329
2330	return 0;
2331}
2332
2333int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2334{
2335	struct mvpp2_port *port = netdev_priv(dev);
2336	int err;
2337
2338	/* Remove old parser entry */
2339	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2340	if (err)
2341		return err;
2342
2343	/* Add new parser entry */
2344	err = mvpp2_prs_mac_da_accept(port, da, true);
2345	if (err)
2346		return err;
2347
2348	/* Set addr in the device */
2349	ether_addr_copy(dev->dev_addr, da);
2350
2351	return 0;
2352}
2353
2354void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2355{
2356	struct mvpp2 *priv = port->priv;
2357	struct mvpp2_prs_entry pe;
2358	unsigned long pmap;
2359	int index, tid;
2360
2361	for (tid = MVPP2_PE_MAC_RANGE_START;
2362	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2363		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2364
2365		if (!priv->prs_shadow[tid].valid ||
2366		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2367		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2368			continue;
2369
2370		mvpp2_prs_init_from_hw(priv, &pe, tid);
2371
2372		pmap = mvpp2_prs_tcam_port_map_get(&pe);
2373
2374		/* We only want entries active on this port */
2375		if (!test_bit(port->id, &pmap))
2376			continue;
2377
2378		/* Read mac addr from entry */
2379		for (index = 0; index < ETH_ALEN; index++)
2380			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2381						     &da_mask[index]);
2382
2383		/* Special cases : Don't remove broadcast and port's own
2384		 * address
2385		 */
2386		if (is_broadcast_ether_addr(da) ||
2387		    ether_addr_equal(da, port->dev->dev_addr))
2388			continue;
2389
2390		/* Remove entry from TCAM */
2391		mvpp2_prs_mac_da_accept(port, da, false);
2392	}
2393}
2394
2395int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2396{
2397	switch (type) {
2398	case MVPP2_TAG_TYPE_EDSA:
2399		/* Add port to EDSA entries */
2400		mvpp2_prs_dsa_tag_set(priv, port, true,
2401				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2402		mvpp2_prs_dsa_tag_set(priv, port, true,
2403				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2404		/* Remove port from DSA entries */
2405		mvpp2_prs_dsa_tag_set(priv, port, false,
2406				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2407		mvpp2_prs_dsa_tag_set(priv, port, false,
2408				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2409		break;
2410
2411	case MVPP2_TAG_TYPE_DSA:
2412		/* Add port to DSA entries */
2413		mvpp2_prs_dsa_tag_set(priv, port, true,
2414				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2415		mvpp2_prs_dsa_tag_set(priv, port, true,
2416				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2417		/* Remove port from EDSA entries */
2418		mvpp2_prs_dsa_tag_set(priv, port, false,
2419				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2420		mvpp2_prs_dsa_tag_set(priv, port, false,
2421				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2422		break;
2423
2424	case MVPP2_TAG_TYPE_MH:
2425	case MVPP2_TAG_TYPE_NONE:
2426		/* Remove port form EDSA and DSA entries */
2427		mvpp2_prs_dsa_tag_set(priv, port, false,
2428				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2429		mvpp2_prs_dsa_tag_set(priv, port, false,
2430				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2431		mvpp2_prs_dsa_tag_set(priv, port, false,
2432				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2433		mvpp2_prs_dsa_tag_set(priv, port, false,
2434				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2435		break;
2436
2437	default:
2438		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2439			return -EINVAL;
2440	}
2441
2442	return 0;
2443}
2444
2445int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2446{
2447	struct mvpp2_prs_entry pe;
2448	u8 *ri_byte, *ri_byte_mask;
2449	int tid, i;
2450
2451	memset(&pe, 0, sizeof(pe));
2452
2453	tid = mvpp2_prs_tcam_first_free(priv,
2454					MVPP2_PE_LAST_FREE_TID,
2455					MVPP2_PE_FIRST_FREE_TID);
2456	if (tid < 0)
2457		return tid;
2458
2459	pe.index = tid;
2460
2461	ri_byte = (u8 *)&ri;
2462	ri_byte_mask = (u8 *)&ri_mask;
2463
2464	mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2465	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2466
2467	for (i = 0; i < 4; i++) {
2468		mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2469					     ri_byte_mask[i]);
2470	}
2471
2472	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2473	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2474	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2475	mvpp2_prs_hw_write(priv, &pe);
2476
2477	return 0;
2478}
2479
2480/* Set prs flow for the port */
2481int mvpp2_prs_def_flow(struct mvpp2_port *port)
2482{
2483	struct mvpp2_prs_entry pe;
2484	int tid;
2485
2486	memset(&pe, 0, sizeof(pe));
2487
2488	tid = mvpp2_prs_flow_find(port->priv, port->id);
2489
2490	/* Such entry not exist */
2491	if (tid < 0) {
2492		/* Go through the all entires from last to first */
2493		tid = mvpp2_prs_tcam_first_free(port->priv,
2494						MVPP2_PE_LAST_FREE_TID,
2495					       MVPP2_PE_FIRST_FREE_TID);
2496		if (tid < 0)
2497			return tid;
2498
2499		pe.index = tid;
2500
2501		/* Set flow ID*/
2502		mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2503		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2504
2505		/* Update shadow table */
2506		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2507	} else {
2508		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2509	}
2510
2511	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2512	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2513	mvpp2_prs_hw_write(port->priv, &pe);
2514
2515	return 0;
2516}
2517
2518int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2519{
2520	u32 val;
2521
2522	if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2523		return -EINVAL;
2524
2525	mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2526
2527	val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2528
2529	val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2530
2531	return val;
2532}
2533