1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
4 *					        implementation
5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <linux/hashtable.h>
13#include <linux/crc32.h>
14#include <linux/netdevice.h>
15#include <linux/inetdevice.h>
16#include <linux/if_vlan.h>
17#include <linux/if_bridge.h>
18#include <net/neighbour.h>
19#include <net/switchdev.h>
20#include <net/ip_fib.h>
21#include <net/nexthop.h>
22#include <net/arp.h>
23
24#include "rocker.h"
25#include "rocker_tlv.h"
26
27struct ofdpa_flow_tbl_key {
28	u32 priority;
29	enum rocker_of_dpa_table_id tbl_id;
30	union {
31		struct {
32			u32 in_pport;
33			u32 in_pport_mask;
34			enum rocker_of_dpa_table_id goto_tbl;
35		} ig_port;
36		struct {
37			u32 in_pport;
38			__be16 vlan_id;
39			__be16 vlan_id_mask;
40			enum rocker_of_dpa_table_id goto_tbl;
41			bool untagged;
42			__be16 new_vlan_id;
43		} vlan;
44		struct {
45			u32 in_pport;
46			u32 in_pport_mask;
47			__be16 eth_type;
48			u8 eth_dst[ETH_ALEN];
49			u8 eth_dst_mask[ETH_ALEN];
50			__be16 vlan_id;
51			__be16 vlan_id_mask;
52			enum rocker_of_dpa_table_id goto_tbl;
53			bool copy_to_cpu;
54		} term_mac;
55		struct {
56			__be16 eth_type;
57			__be32 dst4;
58			__be32 dst4_mask;
59			enum rocker_of_dpa_table_id goto_tbl;
60			u32 group_id;
61		} ucast_routing;
62		struct {
63			u8 eth_dst[ETH_ALEN];
64			u8 eth_dst_mask[ETH_ALEN];
65			int has_eth_dst;
66			int has_eth_dst_mask;
67			__be16 vlan_id;
68			u32 tunnel_id;
69			enum rocker_of_dpa_table_id goto_tbl;
70			u32 group_id;
71			bool copy_to_cpu;
72		} bridge;
73		struct {
74			u32 in_pport;
75			u32 in_pport_mask;
76			u8 eth_src[ETH_ALEN];
77			u8 eth_src_mask[ETH_ALEN];
78			u8 eth_dst[ETH_ALEN];
79			u8 eth_dst_mask[ETH_ALEN];
80			__be16 eth_type;
81			__be16 vlan_id;
82			__be16 vlan_id_mask;
83			u8 ip_proto;
84			u8 ip_proto_mask;
85			u8 ip_tos;
86			u8 ip_tos_mask;
87			u32 group_id;
88		} acl;
89	};
90};
91
92struct ofdpa_flow_tbl_entry {
93	struct hlist_node entry;
94	u32 cmd;
95	u64 cookie;
96	struct ofdpa_flow_tbl_key key;
97	size_t key_len;
98	u32 key_crc32; /* key */
99	struct fib_info *fi;
100};
101
102struct ofdpa_group_tbl_entry {
103	struct hlist_node entry;
104	u32 cmd;
105	u32 group_id; /* key */
106	u16 group_count;
107	u32 *group_ids;
108	union {
109		struct {
110			u8 pop_vlan;
111		} l2_interface;
112		struct {
113			u8 eth_src[ETH_ALEN];
114			u8 eth_dst[ETH_ALEN];
115			__be16 vlan_id;
116			u32 group_id;
117		} l2_rewrite;
118		struct {
119			u8 eth_src[ETH_ALEN];
120			u8 eth_dst[ETH_ALEN];
121			__be16 vlan_id;
122			bool ttl_check;
123			u32 group_id;
124		} l3_unicast;
125	};
126};
127
128struct ofdpa_fdb_tbl_entry {
129	struct hlist_node entry;
130	u32 key_crc32; /* key */
131	bool learned;
132	unsigned long touched;
133	struct ofdpa_fdb_tbl_key {
134		struct ofdpa_port *ofdpa_port;
135		u8 addr[ETH_ALEN];
136		__be16 vlan_id;
137	} key;
138};
139
140struct ofdpa_internal_vlan_tbl_entry {
141	struct hlist_node entry;
142	int ifindex; /* key */
143	u32 ref_count;
144	__be16 vlan_id;
145};
146
147struct ofdpa_neigh_tbl_entry {
148	struct hlist_node entry;
149	__be32 ip_addr; /* key */
150	struct net_device *dev;
151	u32 ref_count;
152	u32 index;
153	u8 eth_dst[ETH_ALEN];
154	bool ttl_check;
155};
156
157enum {
158	OFDPA_CTRL_LINK_LOCAL_MCAST,
159	OFDPA_CTRL_LOCAL_ARP,
160	OFDPA_CTRL_IPV4_MCAST,
161	OFDPA_CTRL_IPV6_MCAST,
162	OFDPA_CTRL_DFLT_BRIDGING,
163	OFDPA_CTRL_DFLT_OVS,
164	OFDPA_CTRL_MAX,
165};
166
167#define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
168#define OFDPA_N_INTERNAL_VLANS		255
169#define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
170#define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
171#define OFDPA_UNTAGGED_VID 0
172
173struct ofdpa {
174	struct rocker *rocker;
175	DECLARE_HASHTABLE(flow_tbl, 16);
176	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
177	u64 flow_tbl_next_cookie;
178	DECLARE_HASHTABLE(group_tbl, 16);
179	spinlock_t group_tbl_lock;		/* for group tbl accesses */
180	struct timer_list fdb_cleanup_timer;
181	DECLARE_HASHTABLE(fdb_tbl, 16);
182	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
183	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
184	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
185	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
186	DECLARE_HASHTABLE(neigh_tbl, 16);
187	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
188	u32 neigh_tbl_next_index;
189	unsigned long ageing_time;
190	bool fib_aborted;
191};
192
193struct ofdpa_port {
194	struct ofdpa *ofdpa;
195	struct rocker_port *rocker_port;
196	struct net_device *dev;
197	u32 pport;
198	struct net_device *bridge_dev;
199	__be16 internal_vlan_id;
200	int stp_state;
201	u32 brport_flags;
202	unsigned long ageing_time;
203	bool ctrls[OFDPA_CTRL_MAX];
204	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
205};
206
207static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
208static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
209static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
210static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
211static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
212static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
213static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
214static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
215static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
216
217/* Rocker priority levels for flow table entries.  Higher
218 * priority match takes precedence over lower priority match.
219 */
220
221enum {
222	OFDPA_PRIORITY_UNKNOWN = 0,
223	OFDPA_PRIORITY_IG_PORT = 1,
224	OFDPA_PRIORITY_VLAN = 1,
225	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
226	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
227	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
228	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
229	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
230	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
231	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
232	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
233	OFDPA_PRIORITY_ACL_CTRL = 3,
234	OFDPA_PRIORITY_ACL_NORMAL = 2,
235	OFDPA_PRIORITY_ACL_DFLT = 1,
236};
237
238static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
239{
240	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
241	u16 end = 0xffe;
242	u16 _vlan_id = ntohs(vlan_id);
243
244	return (_vlan_id >= start && _vlan_id <= end);
245}
246
247static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
248				     u16 vid, bool *pop_vlan)
249{
250	__be16 vlan_id;
251
252	if (pop_vlan)
253		*pop_vlan = false;
254	vlan_id = htons(vid);
255	if (!vlan_id) {
256		vlan_id = ofdpa_port->internal_vlan_id;
257		if (pop_vlan)
258			*pop_vlan = true;
259	}
260
261	return vlan_id;
262}
263
264static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
265				  __be16 vlan_id)
266{
267	if (ofdpa_vlan_id_is_internal(vlan_id))
268		return 0;
269
270	return ntohs(vlan_id);
271}
272
273static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
274				const char *kind)
275{
276	return ofdpa_port->bridge_dev &&
277		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
278}
279
280static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
281{
282	return ofdpa_port_is_slave(ofdpa_port, "bridge");
283}
284
285static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
286{
287	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
288}
289
290#define OFDPA_OP_FLAG_REMOVE		BIT(0)
291#define OFDPA_OP_FLAG_NOWAIT		BIT(1)
292#define OFDPA_OP_FLAG_LEARNED		BIT(2)
293#define OFDPA_OP_FLAG_REFRESH		BIT(3)
294
295static bool ofdpa_flags_nowait(int flags)
296{
297	return flags & OFDPA_OP_FLAG_NOWAIT;
298}
299
300/*************************************************************
301 * Flow, group, FDB, internal VLAN and neigh command prepares
302 *************************************************************/
303
304static int
305ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
306			       const struct ofdpa_flow_tbl_entry *entry)
307{
308	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
309			       entry->key.ig_port.in_pport))
310		return -EMSGSIZE;
311	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
312			       entry->key.ig_port.in_pport_mask))
313		return -EMSGSIZE;
314	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
315			       entry->key.ig_port.goto_tbl))
316		return -EMSGSIZE;
317
318	return 0;
319}
320
321static int
322ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
323			    const struct ofdpa_flow_tbl_entry *entry)
324{
325	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
326			       entry->key.vlan.in_pport))
327		return -EMSGSIZE;
328	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
329				entry->key.vlan.vlan_id))
330		return -EMSGSIZE;
331	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
332				entry->key.vlan.vlan_id_mask))
333		return -EMSGSIZE;
334	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
335			       entry->key.vlan.goto_tbl))
336		return -EMSGSIZE;
337	if (entry->key.vlan.untagged &&
338	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
339				entry->key.vlan.new_vlan_id))
340		return -EMSGSIZE;
341
342	return 0;
343}
344
345static int
346ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
347				const struct ofdpa_flow_tbl_entry *entry)
348{
349	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
350			       entry->key.term_mac.in_pport))
351		return -EMSGSIZE;
352	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
353			       entry->key.term_mac.in_pport_mask))
354		return -EMSGSIZE;
355	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
356				entry->key.term_mac.eth_type))
357		return -EMSGSIZE;
358	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
359			   ETH_ALEN, entry->key.term_mac.eth_dst))
360		return -EMSGSIZE;
361	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
362			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
363		return -EMSGSIZE;
364	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
365				entry->key.term_mac.vlan_id))
366		return -EMSGSIZE;
367	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
368				entry->key.term_mac.vlan_id_mask))
369		return -EMSGSIZE;
370	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
371			       entry->key.term_mac.goto_tbl))
372		return -EMSGSIZE;
373	if (entry->key.term_mac.copy_to_cpu &&
374	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
375			      entry->key.term_mac.copy_to_cpu))
376		return -EMSGSIZE;
377
378	return 0;
379}
380
381static int
382ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
383				     const struct ofdpa_flow_tbl_entry *entry)
384{
385	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
386				entry->key.ucast_routing.eth_type))
387		return -EMSGSIZE;
388	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
389				entry->key.ucast_routing.dst4))
390		return -EMSGSIZE;
391	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
392				entry->key.ucast_routing.dst4_mask))
393		return -EMSGSIZE;
394	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
395			       entry->key.ucast_routing.goto_tbl))
396		return -EMSGSIZE;
397	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
398			       entry->key.ucast_routing.group_id))
399		return -EMSGSIZE;
400
401	return 0;
402}
403
404static int
405ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
406			      const struct ofdpa_flow_tbl_entry *entry)
407{
408	if (entry->key.bridge.has_eth_dst &&
409	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
410			   ETH_ALEN, entry->key.bridge.eth_dst))
411		return -EMSGSIZE;
412	if (entry->key.bridge.has_eth_dst_mask &&
413	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
414			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
415		return -EMSGSIZE;
416	if (entry->key.bridge.vlan_id &&
417	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
418				entry->key.bridge.vlan_id))
419		return -EMSGSIZE;
420	if (entry->key.bridge.tunnel_id &&
421	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
422			       entry->key.bridge.tunnel_id))
423		return -EMSGSIZE;
424	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
425			       entry->key.bridge.goto_tbl))
426		return -EMSGSIZE;
427	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
428			       entry->key.bridge.group_id))
429		return -EMSGSIZE;
430	if (entry->key.bridge.copy_to_cpu &&
431	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
432			      entry->key.bridge.copy_to_cpu))
433		return -EMSGSIZE;
434
435	return 0;
436}
437
438static int
439ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
440			   const struct ofdpa_flow_tbl_entry *entry)
441{
442	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
443			       entry->key.acl.in_pport))
444		return -EMSGSIZE;
445	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
446			       entry->key.acl.in_pport_mask))
447		return -EMSGSIZE;
448	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
449			   ETH_ALEN, entry->key.acl.eth_src))
450		return -EMSGSIZE;
451	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
452			   ETH_ALEN, entry->key.acl.eth_src_mask))
453		return -EMSGSIZE;
454	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
455			   ETH_ALEN, entry->key.acl.eth_dst))
456		return -EMSGSIZE;
457	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
458			   ETH_ALEN, entry->key.acl.eth_dst_mask))
459		return -EMSGSIZE;
460	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
461				entry->key.acl.eth_type))
462		return -EMSGSIZE;
463	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
464				entry->key.acl.vlan_id))
465		return -EMSGSIZE;
466	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
467				entry->key.acl.vlan_id_mask))
468		return -EMSGSIZE;
469
470	switch (ntohs(entry->key.acl.eth_type)) {
471	case ETH_P_IP:
472	case ETH_P_IPV6:
473		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
474				      entry->key.acl.ip_proto))
475			return -EMSGSIZE;
476		if (rocker_tlv_put_u8(desc_info,
477				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
478				      entry->key.acl.ip_proto_mask))
479			return -EMSGSIZE;
480		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
481				      entry->key.acl.ip_tos & 0x3f))
482			return -EMSGSIZE;
483		if (rocker_tlv_put_u8(desc_info,
484				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
485				      entry->key.acl.ip_tos_mask & 0x3f))
486			return -EMSGSIZE;
487		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
488				      (entry->key.acl.ip_tos & 0xc0) >> 6))
489			return -EMSGSIZE;
490		if (rocker_tlv_put_u8(desc_info,
491				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
492				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
493			return -EMSGSIZE;
494		break;
495	}
496
497	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
498	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
499			       entry->key.acl.group_id))
500		return -EMSGSIZE;
501
502	return 0;
503}
504
505static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
506				  struct rocker_desc_info *desc_info,
507				  void *priv)
508{
509	const struct ofdpa_flow_tbl_entry *entry = priv;
510	struct rocker_tlv *cmd_info;
511	int err = 0;
512
513	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
514		return -EMSGSIZE;
515	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
516	if (!cmd_info)
517		return -EMSGSIZE;
518	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
519			       entry->key.tbl_id))
520		return -EMSGSIZE;
521	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
522			       entry->key.priority))
523		return -EMSGSIZE;
524	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
525		return -EMSGSIZE;
526	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
527			       entry->cookie))
528		return -EMSGSIZE;
529
530	switch (entry->key.tbl_id) {
531	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
532		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
533		break;
534	case ROCKER_OF_DPA_TABLE_ID_VLAN:
535		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
536		break;
537	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
538		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
539		break;
540	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
541		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
542		break;
543	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
544		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
545		break;
546	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
547		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
548		break;
549	default:
550		err = -ENOTSUPP;
551		break;
552	}
553
554	if (err)
555		return err;
556
557	rocker_tlv_nest_end(desc_info, cmd_info);
558
559	return 0;
560}
561
562static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
563				  struct rocker_desc_info *desc_info,
564				  void *priv)
565{
566	const struct ofdpa_flow_tbl_entry *entry = priv;
567	struct rocker_tlv *cmd_info;
568
569	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
570		return -EMSGSIZE;
571	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
572	if (!cmd_info)
573		return -EMSGSIZE;
574	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
575			       entry->cookie))
576		return -EMSGSIZE;
577	rocker_tlv_nest_end(desc_info, cmd_info);
578
579	return 0;
580}
581
582static int
583ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
584				     struct ofdpa_group_tbl_entry *entry)
585{
586	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
587			       ROCKER_GROUP_PORT_GET(entry->group_id)))
588		return -EMSGSIZE;
589	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
590			      entry->l2_interface.pop_vlan))
591		return -EMSGSIZE;
592
593	return 0;
594}
595
596static int
597ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
598				   const struct ofdpa_group_tbl_entry *entry)
599{
600	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
601			       entry->l2_rewrite.group_id))
602		return -EMSGSIZE;
603	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
604	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
605			   ETH_ALEN, entry->l2_rewrite.eth_src))
606		return -EMSGSIZE;
607	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
608	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
609			   ETH_ALEN, entry->l2_rewrite.eth_dst))
610		return -EMSGSIZE;
611	if (entry->l2_rewrite.vlan_id &&
612	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
613				entry->l2_rewrite.vlan_id))
614		return -EMSGSIZE;
615
616	return 0;
617}
618
619static int
620ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
621				  const struct ofdpa_group_tbl_entry *entry)
622{
623	int i;
624	struct rocker_tlv *group_ids;
625
626	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
627			       entry->group_count))
628		return -EMSGSIZE;
629
630	group_ids = rocker_tlv_nest_start(desc_info,
631					  ROCKER_TLV_OF_DPA_GROUP_IDS);
632	if (!group_ids)
633		return -EMSGSIZE;
634
635	for (i = 0; i < entry->group_count; i++)
636		/* Note TLV array is 1-based */
637		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
638			return -EMSGSIZE;
639
640	rocker_tlv_nest_end(desc_info, group_ids);
641
642	return 0;
643}
644
645static int
646ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
647				   const struct ofdpa_group_tbl_entry *entry)
648{
649	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
650	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
651			   ETH_ALEN, entry->l3_unicast.eth_src))
652		return -EMSGSIZE;
653	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
654	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
655			   ETH_ALEN, entry->l3_unicast.eth_dst))
656		return -EMSGSIZE;
657	if (entry->l3_unicast.vlan_id &&
658	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
659				entry->l3_unicast.vlan_id))
660		return -EMSGSIZE;
661	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
662			      entry->l3_unicast.ttl_check))
663		return -EMSGSIZE;
664	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
665			       entry->l3_unicast.group_id))
666		return -EMSGSIZE;
667
668	return 0;
669}
670
671static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
672				   struct rocker_desc_info *desc_info,
673				   void *priv)
674{
675	struct ofdpa_group_tbl_entry *entry = priv;
676	struct rocker_tlv *cmd_info;
677	int err = 0;
678
679	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
680		return -EMSGSIZE;
681	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
682	if (!cmd_info)
683		return -EMSGSIZE;
684
685	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
686			       entry->group_id))
687		return -EMSGSIZE;
688
689	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
690	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
691		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
692		break;
693	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
694		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
695		break;
696	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
697	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
698		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
699		break;
700	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
701		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
702		break;
703	default:
704		err = -ENOTSUPP;
705		break;
706	}
707
708	if (err)
709		return err;
710
711	rocker_tlv_nest_end(desc_info, cmd_info);
712
713	return 0;
714}
715
716static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
717				   struct rocker_desc_info *desc_info,
718				   void *priv)
719{
720	const struct ofdpa_group_tbl_entry *entry = priv;
721	struct rocker_tlv *cmd_info;
722
723	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
724		return -EMSGSIZE;
725	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
726	if (!cmd_info)
727		return -EMSGSIZE;
728	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
729			       entry->group_id))
730		return -EMSGSIZE;
731	rocker_tlv_nest_end(desc_info, cmd_info);
732
733	return 0;
734}
735
736/***************************************************
737 * Flow, group, FDB, internal VLAN and neigh tables
738 ***************************************************/
739
740static struct ofdpa_flow_tbl_entry *
741ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
742		    const struct ofdpa_flow_tbl_entry *match)
743{
744	struct ofdpa_flow_tbl_entry *found;
745	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
746
747	hash_for_each_possible(ofdpa->flow_tbl, found,
748			       entry, match->key_crc32) {
749		if (memcmp(&found->key, &match->key, key_len) == 0)
750			return found;
751	}
752
753	return NULL;
754}
755
756static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
757			      int flags, struct ofdpa_flow_tbl_entry *match)
758{
759	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
760	struct ofdpa_flow_tbl_entry *found;
761	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
762	unsigned long lock_flags;
763
764	match->key_crc32 = crc32(~0, &match->key, key_len);
765
766	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
767
768	found = ofdpa_flow_tbl_find(ofdpa, match);
769
770	if (found) {
771		match->cookie = found->cookie;
772		hash_del(&found->entry);
773		kfree(found);
774		found = match;
775		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
776	} else {
777		found = match;
778		found->cookie = ofdpa->flow_tbl_next_cookie++;
779		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
780	}
781
782	hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
783	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
784
785	return rocker_cmd_exec(ofdpa_port->rocker_port,
786			       ofdpa_flags_nowait(flags),
787			       ofdpa_cmd_flow_tbl_add,
788			       found, NULL, NULL);
789}
790
791static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
792			      int flags, struct ofdpa_flow_tbl_entry *match)
793{
794	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
795	struct ofdpa_flow_tbl_entry *found;
796	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
797	unsigned long lock_flags;
798	int err = 0;
799
800	match->key_crc32 = crc32(~0, &match->key, key_len);
801
802	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
803
804	found = ofdpa_flow_tbl_find(ofdpa, match);
805
806	if (found) {
807		hash_del(&found->entry);
808		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
809	}
810
811	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
812
813	kfree(match);
814
815	if (found) {
816		err = rocker_cmd_exec(ofdpa_port->rocker_port,
817				      ofdpa_flags_nowait(flags),
818				      ofdpa_cmd_flow_tbl_del,
819				      found, NULL, NULL);
820		kfree(found);
821	}
822
823	return err;
824}
825
826static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
827			     struct ofdpa_flow_tbl_entry *entry)
828{
829	if (flags & OFDPA_OP_FLAG_REMOVE)
830		return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
831	else
832		return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
833}
834
835static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
836				  u32 in_pport, u32 in_pport_mask,
837				  enum rocker_of_dpa_table_id goto_tbl)
838{
839	struct ofdpa_flow_tbl_entry *entry;
840
841	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
842	if (!entry)
843		return -ENOMEM;
844
845	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
846	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
847	entry->key.ig_port.in_pport = in_pport;
848	entry->key.ig_port.in_pport_mask = in_pport_mask;
849	entry->key.ig_port.goto_tbl = goto_tbl;
850
851	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
852}
853
854static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
855			       int flags,
856			       u32 in_pport, __be16 vlan_id,
857			       __be16 vlan_id_mask,
858			       enum rocker_of_dpa_table_id goto_tbl,
859			       bool untagged, __be16 new_vlan_id)
860{
861	struct ofdpa_flow_tbl_entry *entry;
862
863	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
864	if (!entry)
865		return -ENOMEM;
866
867	entry->key.priority = OFDPA_PRIORITY_VLAN;
868	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
869	entry->key.vlan.in_pport = in_pport;
870	entry->key.vlan.vlan_id = vlan_id;
871	entry->key.vlan.vlan_id_mask = vlan_id_mask;
872	entry->key.vlan.goto_tbl = goto_tbl;
873
874	entry->key.vlan.untagged = untagged;
875	entry->key.vlan.new_vlan_id = new_vlan_id;
876
877	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
878}
879
880static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
881				   u32 in_pport, u32 in_pport_mask,
882				   __be16 eth_type, const u8 *eth_dst,
883				   const u8 *eth_dst_mask, __be16 vlan_id,
884				   __be16 vlan_id_mask, bool copy_to_cpu,
885				   int flags)
886{
887	struct ofdpa_flow_tbl_entry *entry;
888
889	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
890	if (!entry)
891		return -ENOMEM;
892
893	if (is_multicast_ether_addr(eth_dst)) {
894		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
895		entry->key.term_mac.goto_tbl =
896			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
897	} else {
898		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
899		entry->key.term_mac.goto_tbl =
900			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
901	}
902
903	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
904	entry->key.term_mac.in_pport = in_pport;
905	entry->key.term_mac.in_pport_mask = in_pport_mask;
906	entry->key.term_mac.eth_type = eth_type;
907	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
908	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
909	entry->key.term_mac.vlan_id = vlan_id;
910	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
911	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
912
913	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
914}
915
916static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
917				 int flags, const u8 *eth_dst,
918				 const u8 *eth_dst_mask,  __be16 vlan_id,
919				 u32 tunnel_id,
920				 enum rocker_of_dpa_table_id goto_tbl,
921				 u32 group_id, bool copy_to_cpu)
922{
923	struct ofdpa_flow_tbl_entry *entry;
924	u32 priority;
925	bool vlan_bridging = !!vlan_id;
926	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
927	bool wild = false;
928
929	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
930	if (!entry)
931		return -ENOMEM;
932
933	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
934
935	if (eth_dst) {
936		entry->key.bridge.has_eth_dst = 1;
937		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
938	}
939	if (eth_dst_mask) {
940		entry->key.bridge.has_eth_dst_mask = 1;
941		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
942		if (!ether_addr_equal(eth_dst_mask, ff_mac))
943			wild = true;
944	}
945
946	priority = OFDPA_PRIORITY_UNKNOWN;
947	if (vlan_bridging && dflt && wild)
948		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
949	else if (vlan_bridging && dflt && !wild)
950		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
951	else if (vlan_bridging && !dflt)
952		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
953	else if (!vlan_bridging && dflt && wild)
954		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
955	else if (!vlan_bridging && dflt && !wild)
956		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
957	else if (!vlan_bridging && !dflt)
958		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
959
960	entry->key.priority = priority;
961	entry->key.bridge.vlan_id = vlan_id;
962	entry->key.bridge.tunnel_id = tunnel_id;
963	entry->key.bridge.goto_tbl = goto_tbl;
964	entry->key.bridge.group_id = group_id;
965	entry->key.bridge.copy_to_cpu = copy_to_cpu;
966
967	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
968}
969
970static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
971					 __be16 eth_type, __be32 dst,
972					 __be32 dst_mask, u32 priority,
973					 enum rocker_of_dpa_table_id goto_tbl,
974					 u32 group_id, struct fib_info *fi,
975					 int flags)
976{
977	struct ofdpa_flow_tbl_entry *entry;
978
979	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
980	if (!entry)
981		return -ENOMEM;
982
983	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
984	entry->key.priority = priority;
985	entry->key.ucast_routing.eth_type = eth_type;
986	entry->key.ucast_routing.dst4 = dst;
987	entry->key.ucast_routing.dst4_mask = dst_mask;
988	entry->key.ucast_routing.goto_tbl = goto_tbl;
989	entry->key.ucast_routing.group_id = group_id;
990	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
991				  ucast_routing.group_id);
992	entry->fi = fi;
993
994	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
995}
996
997static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
998			      u32 in_pport, u32 in_pport_mask,
999			      const u8 *eth_src, const u8 *eth_src_mask,
1000			      const u8 *eth_dst, const u8 *eth_dst_mask,
1001			      __be16 eth_type, __be16 vlan_id,
1002			      __be16 vlan_id_mask, u8 ip_proto,
1003			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1004			      u32 group_id)
1005{
1006	u32 priority;
1007	struct ofdpa_flow_tbl_entry *entry;
1008
1009	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1010	if (!entry)
1011		return -ENOMEM;
1012
1013	priority = OFDPA_PRIORITY_ACL_NORMAL;
1014	if (eth_dst && eth_dst_mask) {
1015		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1016			priority = OFDPA_PRIORITY_ACL_DFLT;
1017		else if (is_link_local_ether_addr(eth_dst))
1018			priority = OFDPA_PRIORITY_ACL_CTRL;
1019	}
1020
1021	entry->key.priority = priority;
1022	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1023	entry->key.acl.in_pport = in_pport;
1024	entry->key.acl.in_pport_mask = in_pport_mask;
1025
1026	if (eth_src)
1027		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1028	if (eth_src_mask)
1029		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1030	if (eth_dst)
1031		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1032	if (eth_dst_mask)
1033		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1034
1035	entry->key.acl.eth_type = eth_type;
1036	entry->key.acl.vlan_id = vlan_id;
1037	entry->key.acl.vlan_id_mask = vlan_id_mask;
1038	entry->key.acl.ip_proto = ip_proto;
1039	entry->key.acl.ip_proto_mask = ip_proto_mask;
1040	entry->key.acl.ip_tos = ip_tos;
1041	entry->key.acl.ip_tos_mask = ip_tos_mask;
1042	entry->key.acl.group_id = group_id;
1043
1044	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1045}
1046
1047static struct ofdpa_group_tbl_entry *
1048ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1049		     const struct ofdpa_group_tbl_entry *match)
1050{
1051	struct ofdpa_group_tbl_entry *found;
1052
1053	hash_for_each_possible(ofdpa->group_tbl, found,
1054			       entry, match->group_id) {
1055		if (found->group_id == match->group_id)
1056			return found;
1057	}
1058
1059	return NULL;
1060}
1061
1062static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1063{
1064	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1065	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1066	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1067		kfree(entry->group_ids);
1068		break;
1069	default:
1070		break;
1071	}
1072	kfree(entry);
1073}
1074
1075static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1076			       struct ofdpa_group_tbl_entry *match)
1077{
1078	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1079	struct ofdpa_group_tbl_entry *found;
1080	unsigned long lock_flags;
1081
1082	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1083
1084	found = ofdpa_group_tbl_find(ofdpa, match);
1085
1086	if (found) {
1087		hash_del(&found->entry);
1088		ofdpa_group_tbl_entry_free(found);
1089		found = match;
1090		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1091	} else {
1092		found = match;
1093		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1094	}
1095
1096	hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1097
1098	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1099
1100	return rocker_cmd_exec(ofdpa_port->rocker_port,
1101			       ofdpa_flags_nowait(flags),
1102			       ofdpa_cmd_group_tbl_add,
1103			       found, NULL, NULL);
1104}
1105
1106static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1107			       struct ofdpa_group_tbl_entry *match)
1108{
1109	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1110	struct ofdpa_group_tbl_entry *found;
1111	unsigned long lock_flags;
1112	int err = 0;
1113
1114	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1115
1116	found = ofdpa_group_tbl_find(ofdpa, match);
1117
1118	if (found) {
1119		hash_del(&found->entry);
1120		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1121	}
1122
1123	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1124
1125	ofdpa_group_tbl_entry_free(match);
1126
1127	if (found) {
1128		err = rocker_cmd_exec(ofdpa_port->rocker_port,
1129				      ofdpa_flags_nowait(flags),
1130				      ofdpa_cmd_group_tbl_del,
1131				      found, NULL, NULL);
1132		ofdpa_group_tbl_entry_free(found);
1133	}
1134
1135	return err;
1136}
1137
1138static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1139			      struct ofdpa_group_tbl_entry *entry)
1140{
1141	if (flags & OFDPA_OP_FLAG_REMOVE)
1142		return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1143	else
1144		return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1145}
1146
1147static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1148				    int flags, __be16 vlan_id,
1149				    u32 out_pport, int pop_vlan)
1150{
1151	struct ofdpa_group_tbl_entry *entry;
1152
1153	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1154	if (!entry)
1155		return -ENOMEM;
1156
1157	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1158	entry->l2_interface.pop_vlan = pop_vlan;
1159
1160	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1161}
1162
1163static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1164				  int flags, u8 group_count,
1165				  const u32 *group_ids, u32 group_id)
1166{
1167	struct ofdpa_group_tbl_entry *entry;
1168
1169	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1170	if (!entry)
1171		return -ENOMEM;
1172
1173	entry->group_id = group_id;
1174	entry->group_count = group_count;
1175
1176	entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1177	if (!entry->group_ids) {
1178		kfree(entry);
1179		return -ENOMEM;
1180	}
1181	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1182
1183	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1184}
1185
1186static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1187				int flags, __be16 vlan_id,
1188				u8 group_count,	const u32 *group_ids,
1189				u32 group_id)
1190{
1191	return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1192				      group_count, group_ids,
1193				      group_id);
1194}
1195
1196static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1197				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1198				  __be16 vlan_id, bool ttl_check, u32 pport)
1199{
1200	struct ofdpa_group_tbl_entry *entry;
1201
1202	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1203	if (!entry)
1204		return -ENOMEM;
1205
1206	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1207	if (src_mac)
1208		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1209	if (dst_mac)
1210		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1211	entry->l3_unicast.vlan_id = vlan_id;
1212	entry->l3_unicast.ttl_check = ttl_check;
1213	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1214
1215	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1216}
1217
1218static struct ofdpa_neigh_tbl_entry *
1219ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1220{
1221	struct ofdpa_neigh_tbl_entry *found;
1222
1223	hash_for_each_possible(ofdpa->neigh_tbl, found,
1224			       entry, be32_to_cpu(ip_addr))
1225		if (found->ip_addr == ip_addr)
1226			return found;
1227
1228	return NULL;
1229}
1230
1231static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1232			    struct ofdpa_neigh_tbl_entry *entry)
1233{
1234	entry->index = ofdpa->neigh_tbl_next_index++;
1235	entry->ref_count++;
1236	hash_add(ofdpa->neigh_tbl, &entry->entry,
1237		 be32_to_cpu(entry->ip_addr));
1238}
1239
1240static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1241{
1242	if (--entry->ref_count == 0) {
1243		hash_del(&entry->entry);
1244		kfree(entry);
1245	}
1246}
1247
1248static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1249			       const u8 *eth_dst, bool ttl_check)
1250{
1251	if (eth_dst) {
1252		ether_addr_copy(entry->eth_dst, eth_dst);
1253		entry->ttl_check = ttl_check;
1254	} else {
1255		entry->ref_count++;
1256	}
1257}
1258
1259static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1260				 int flags, __be32 ip_addr, const u8 *eth_dst)
1261{
1262	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1263	struct ofdpa_neigh_tbl_entry *entry;
1264	struct ofdpa_neigh_tbl_entry *found;
1265	unsigned long lock_flags;
1266	__be16 eth_type = htons(ETH_P_IP);
1267	enum rocker_of_dpa_table_id goto_tbl =
1268			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1269	u32 group_id;
1270	u32 priority = 0;
1271	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1272	bool updating;
1273	bool removing;
1274	int err = 0;
1275
1276	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1277	if (!entry)
1278		return -ENOMEM;
1279
1280	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1281
1282	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1283
1284	updating = found && adding;
1285	removing = found && !adding;
1286	adding = !found && adding;
1287
1288	if (adding) {
1289		entry->ip_addr = ip_addr;
1290		entry->dev = ofdpa_port->dev;
1291		ether_addr_copy(entry->eth_dst, eth_dst);
1292		entry->ttl_check = true;
1293		ofdpa_neigh_add(ofdpa, entry);
1294	} else if (removing) {
1295		memcpy(entry, found, sizeof(*entry));
1296		ofdpa_neigh_del(found);
1297	} else if (updating) {
1298		ofdpa_neigh_update(found, eth_dst, true);
1299		memcpy(entry, found, sizeof(*entry));
1300	} else {
1301		err = -ENOENT;
1302	}
1303
1304	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1305
1306	if (err)
1307		goto err_out;
1308
1309	/* For each active neighbor, we have an L3 unicast group and
1310	 * a /32 route to the neighbor, which uses the L3 unicast
1311	 * group.  The L3 unicast group can also be referred to by
1312	 * other routes' nexthops.
1313	 */
1314
1315	err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1316				     entry->index,
1317				     ofdpa_port->dev->dev_addr,
1318				     entry->eth_dst,
1319				     ofdpa_port->internal_vlan_id,
1320				     entry->ttl_check,
1321				     ofdpa_port->pport);
1322	if (err) {
1323		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1324			   err, entry->index);
1325		goto err_out;
1326	}
1327
1328	if (adding || removing) {
1329		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1330		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1331						    eth_type, ip_addr,
1332						    inet_make_mask(32),
1333						    priority, goto_tbl,
1334						    group_id, NULL, flags);
1335
1336		if (err)
1337			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338				   err, &entry->ip_addr, group_id);
1339	}
1340
1341err_out:
1342	if (!adding)
1343		kfree(entry);
1344
1345	return err;
1346}
1347
1348static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1349				   __be32 ip_addr)
1350{
1351	struct net_device *dev = ofdpa_port->dev;
1352	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1353	int err = 0;
1354
1355	if (!n) {
1356		n = neigh_create(&arp_tbl, &ip_addr, dev);
1357		if (IS_ERR(n))
1358			return PTR_ERR(n);
1359	}
1360
1361	/* If the neigh is already resolved, then go ahead and
1362	 * install the entry, otherwise start the ARP process to
1363	 * resolve the neigh.
1364	 */
1365
1366	if (n->nud_state & NUD_VALID)
1367		err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1368					    ip_addr, n->ha);
1369	else
1370		neigh_event_send(n, NULL);
1371
1372	neigh_release(n);
1373	return err;
1374}
1375
1376static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1377			      int flags, __be32 ip_addr, u32 *index)
1378{
1379	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1380	struct ofdpa_neigh_tbl_entry *entry;
1381	struct ofdpa_neigh_tbl_entry *found;
1382	unsigned long lock_flags;
1383	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1384	bool updating;
1385	bool removing;
1386	bool resolved = true;
1387	int err = 0;
1388
1389	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1390	if (!entry)
1391		return -ENOMEM;
1392
1393	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1394
1395	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1396
1397	updating = found && adding;
1398	removing = found && !adding;
1399	adding = !found && adding;
1400
1401	if (adding) {
1402		entry->ip_addr = ip_addr;
1403		entry->dev = ofdpa_port->dev;
1404		ofdpa_neigh_add(ofdpa, entry);
1405		*index = entry->index;
1406		resolved = false;
1407	} else if (removing) {
1408		*index = found->index;
1409		ofdpa_neigh_del(found);
1410	} else if (updating) {
1411		ofdpa_neigh_update(found, NULL, false);
1412		resolved = !is_zero_ether_addr(found->eth_dst);
1413		*index = found->index;
1414	} else {
1415		err = -ENOENT;
1416	}
1417
1418	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1419
1420	if (!adding)
1421		kfree(entry);
1422
1423	if (err)
1424		return err;
1425
1426	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1427
1428	if (!resolved)
1429		err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1430
1431	return err;
1432}
1433
1434static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1435					 int port_index)
1436{
1437	struct rocker_port *rocker_port;
1438
1439	rocker_port = ofdpa->rocker->ports[port_index];
1440	return rocker_port ? rocker_port->wpriv : NULL;
1441}
1442
1443static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1444				       int flags, __be16 vlan_id)
1445{
1446	struct ofdpa_port *p;
1447	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1448	unsigned int port_count = ofdpa->rocker->port_count;
1449	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1450	u32 *group_ids;
1451	u8 group_count = 0;
1452	int err = 0;
1453	int i;
1454
1455	group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1456	if (!group_ids)
1457		return -ENOMEM;
1458
1459	/* Adjust the flood group for this VLAN.  The flood group
1460	 * references an L2 interface group for each port in this
1461	 * VLAN.
1462	 */
1463
1464	for (i = 0; i < port_count; i++) {
1465		p = ofdpa_port_get(ofdpa, i);
1466		if (!p)
1467			continue;
1468		if (!ofdpa_port_is_bridged(p))
1469			continue;
1470		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1471			group_ids[group_count++] =
1472				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1473		}
1474	}
1475
1476	/* If there are no bridged ports in this VLAN, we're done */
1477	if (group_count == 0)
1478		goto no_ports_in_vlan;
1479
1480	err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1481				   group_count, group_ids, group_id);
1482	if (err)
1483		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1484
1485no_ports_in_vlan:
1486	kfree(group_ids);
1487	return err;
1488}
1489
1490static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1491				     __be16 vlan_id, bool pop_vlan)
1492{
1493	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1494	unsigned int port_count = ofdpa->rocker->port_count;
1495	struct ofdpa_port *p;
1496	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1497	u32 out_pport;
1498	int ref = 0;
1499	int err;
1500	int i;
1501
1502	/* An L2 interface group for this port in this VLAN, but
1503	 * only when port STP state is LEARNING|FORWARDING.
1504	 */
1505
1506	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1507	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1508		out_pport = ofdpa_port->pport;
1509		err = ofdpa_group_l2_interface(ofdpa_port, flags,
1510					       vlan_id, out_pport, pop_vlan);
1511		if (err) {
1512			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1513				   err, out_pport);
1514			return err;
1515		}
1516	}
1517
1518	/* An L2 interface group for this VLAN to CPU port.
1519	 * Add when first port joins this VLAN and destroy when
1520	 * last port leaves this VLAN.
1521	 */
1522
1523	for (i = 0; i < port_count; i++) {
1524		p = ofdpa_port_get(ofdpa, i);
1525		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1526			ref++;
1527	}
1528
1529	if ((!adding || ref != 1) && (adding || ref != 0))
1530		return 0;
1531
1532	out_pport = 0;
1533	err = ofdpa_group_l2_interface(ofdpa_port, flags,
1534				       vlan_id, out_pport, pop_vlan);
1535	if (err) {
1536		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1537		return err;
1538	}
1539
1540	return 0;
1541}
1542
1543static struct ofdpa_ctrl {
1544	const u8 *eth_dst;
1545	const u8 *eth_dst_mask;
1546	__be16 eth_type;
1547	bool acl;
1548	bool bridge;
1549	bool term;
1550	bool copy_to_cpu;
1551} ofdpa_ctrls[] = {
1552	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1553		/* pass link local multicast pkts up to CPU for filtering */
1554		.eth_dst = ll_mac,
1555		.eth_dst_mask = ll_mask,
1556		.acl = true,
1557	},
1558	[OFDPA_CTRL_LOCAL_ARP] = {
1559		/* pass local ARP pkts up to CPU */
1560		.eth_dst = zero_mac,
1561		.eth_dst_mask = zero_mac,
1562		.eth_type = htons(ETH_P_ARP),
1563		.acl = true,
1564	},
1565	[OFDPA_CTRL_IPV4_MCAST] = {
1566		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1567		.eth_dst = ipv4_mcast,
1568		.eth_dst_mask = ipv4_mask,
1569		.eth_type = htons(ETH_P_IP),
1570		.term  = true,
1571		.copy_to_cpu = true,
1572	},
1573	[OFDPA_CTRL_IPV6_MCAST] = {
1574		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1575		.eth_dst = ipv6_mcast,
1576		.eth_dst_mask = ipv6_mask,
1577		.eth_type = htons(ETH_P_IPV6),
1578		.term  = true,
1579		.copy_to_cpu = true,
1580	},
1581	[OFDPA_CTRL_DFLT_BRIDGING] = {
1582		/* flood any pkts on vlan */
1583		.bridge = true,
1584		.copy_to_cpu = true,
1585	},
1586	[OFDPA_CTRL_DFLT_OVS] = {
1587		/* pass all pkts up to CPU */
1588		.eth_dst = zero_mac,
1589		.eth_dst_mask = zero_mac,
1590		.acl = true,
1591	},
1592};
1593
1594static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1595				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1596{
1597	u32 in_pport = ofdpa_port->pport;
1598	u32 in_pport_mask = 0xffffffff;
1599	u32 out_pport = 0;
1600	const u8 *eth_src = NULL;
1601	const u8 *eth_src_mask = NULL;
1602	__be16 vlan_id_mask = htons(0xffff);
1603	u8 ip_proto = 0;
1604	u8 ip_proto_mask = 0;
1605	u8 ip_tos = 0;
1606	u8 ip_tos_mask = 0;
1607	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1608	int err;
1609
1610	err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1611				 in_pport, in_pport_mask,
1612				 eth_src, eth_src_mask,
1613				 ctrl->eth_dst, ctrl->eth_dst_mask,
1614				 ctrl->eth_type,
1615				 vlan_id, vlan_id_mask,
1616				 ip_proto, ip_proto_mask,
1617				 ip_tos, ip_tos_mask,
1618				 group_id);
1619
1620	if (err)
1621		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1622
1623	return err;
1624}
1625
1626static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1627				       int flags, const struct ofdpa_ctrl *ctrl,
1628				       __be16 vlan_id)
1629{
1630	enum rocker_of_dpa_table_id goto_tbl =
1631			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1632	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1633	u32 tunnel_id = 0;
1634	int err;
1635
1636	if (!ofdpa_port_is_bridged(ofdpa_port))
1637		return 0;
1638
1639	err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1640				    ctrl->eth_dst, ctrl->eth_dst_mask,
1641				    vlan_id, tunnel_id,
1642				    goto_tbl, group_id, ctrl->copy_to_cpu);
1643
1644	if (err)
1645		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1646
1647	return err;
1648}
1649
1650static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1651				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1652{
1653	u32 in_pport_mask = 0xffffffff;
1654	__be16 vlan_id_mask = htons(0xffff);
1655	int err;
1656
1657	if (ntohs(vlan_id) == 0)
1658		vlan_id = ofdpa_port->internal_vlan_id;
1659
1660	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1661				      ctrl->eth_type, ctrl->eth_dst,
1662				      ctrl->eth_dst_mask, vlan_id,
1663				      vlan_id_mask, ctrl->copy_to_cpu,
1664				      flags);
1665
1666	if (err)
1667		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1668
1669	return err;
1670}
1671
1672static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1673				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1674{
1675	if (ctrl->acl)
1676		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1677						ctrl, vlan_id);
1678	if (ctrl->bridge)
1679		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1680						   ctrl, vlan_id);
1681
1682	if (ctrl->term)
1683		return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1684						 ctrl, vlan_id);
1685
1686	return -EOPNOTSUPP;
1687}
1688
1689static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1690				    __be16 vlan_id)
1691{
1692	int err = 0;
1693	int i;
1694
1695	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1696		if (ofdpa_port->ctrls[i]) {
1697			err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1698						   &ofdpa_ctrls[i], vlan_id);
1699			if (err)
1700				return err;
1701		}
1702	}
1703
1704	return err;
1705}
1706
1707static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1708			   const struct ofdpa_ctrl *ctrl)
1709{
1710	u16 vid;
1711	int err = 0;
1712
1713	for (vid = 1; vid < VLAN_N_VID; vid++) {
1714		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1715			continue;
1716		err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1717					   ctrl, htons(vid));
1718		if (err)
1719			break;
1720	}
1721
1722	return err;
1723}
1724
1725static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1726			   u16 vid)
1727{
1728	enum rocker_of_dpa_table_id goto_tbl =
1729			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1730	u32 in_pport = ofdpa_port->pport;
1731	__be16 vlan_id = htons(vid);
1732	__be16 vlan_id_mask = htons(0xffff);
1733	__be16 internal_vlan_id;
1734	bool untagged;
1735	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1736	int err;
1737
1738	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1739
1740	if (adding &&
1741	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1742		return 0; /* already added */
1743	else if (!adding &&
1744		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745		return 0; /* already removed */
1746
1747	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1748
1749	if (adding) {
1750		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1751					       internal_vlan_id);
1752		if (err) {
1753			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1754			goto err_vlan_add;
1755		}
1756	}
1757
1758	err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1759					internal_vlan_id, untagged);
1760	if (err) {
1761		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1762		goto err_vlan_l2_groups;
1763	}
1764
1765	err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1766					  internal_vlan_id);
1767	if (err) {
1768		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1769		goto err_flood_group;
1770	}
1771
1772	err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1773				  in_pport, vlan_id, vlan_id_mask,
1774				  goto_tbl, untagged, internal_vlan_id);
1775	if (err)
1776		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1777
1778	return 0;
1779
1780err_vlan_add:
1781err_vlan_l2_groups:
1782err_flood_group:
1783	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1784	return err;
1785}
1786
1787static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1788{
1789	enum rocker_of_dpa_table_id goto_tbl;
1790	u32 in_pport;
1791	u32 in_pport_mask;
1792	int err;
1793
1794	/* Normal Ethernet Frames.  Matches pkts from any local physical
1795	 * ports.  Goto VLAN tbl.
1796	 */
1797
1798	in_pport = 0;
1799	in_pport_mask = 0xffff0000;
1800	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1801
1802	err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1803				     in_pport, in_pport_mask,
1804				     goto_tbl);
1805	if (err)
1806		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1807
1808	return err;
1809}
1810
1811struct ofdpa_fdb_learn_work {
1812	struct work_struct work;
1813	struct ofdpa_port *ofdpa_port;
1814	int flags;
1815	u8 addr[ETH_ALEN];
1816	u16 vid;
1817};
1818
1819static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1820{
1821	const struct ofdpa_fdb_learn_work *lw =
1822		container_of(work, struct ofdpa_fdb_learn_work, work);
1823	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1824	bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1825	struct switchdev_notifier_fdb_info info;
1826
1827	info.addr = lw->addr;
1828	info.vid = lw->vid;
1829
1830	rtnl_lock();
1831	if (learned && removing)
1832		call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1833					 lw->ofdpa_port->dev, &info.info, NULL);
1834	else if (learned && !removing)
1835		call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1836					 lw->ofdpa_port->dev, &info.info, NULL);
1837	rtnl_unlock();
1838
1839	kfree(work);
1840}
1841
1842static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1843				int flags, const u8 *addr, __be16 vlan_id)
1844{
1845	struct ofdpa_fdb_learn_work *lw;
1846	enum rocker_of_dpa_table_id goto_tbl =
1847			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1848	u32 out_pport = ofdpa_port->pport;
1849	u32 tunnel_id = 0;
1850	u32 group_id = ROCKER_GROUP_NONE;
1851	bool copy_to_cpu = false;
1852	int err;
1853
1854	if (ofdpa_port_is_bridged(ofdpa_port))
1855		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1856
1857	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1858		err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1859					    NULL, vlan_id, tunnel_id, goto_tbl,
1860					    group_id, copy_to_cpu);
1861		if (err)
1862			return err;
1863	}
1864
1865	if (!ofdpa_port_is_bridged(ofdpa_port))
1866		return 0;
1867
1868	lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1869	if (!lw)
1870		return -ENOMEM;
1871
1872	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1873
1874	lw->ofdpa_port = ofdpa_port;
1875	lw->flags = flags;
1876	ether_addr_copy(lw->addr, addr);
1877	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1878
1879	schedule_work(&lw->work);
1880	return 0;
1881}
1882
1883static struct ofdpa_fdb_tbl_entry *
1884ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1885		   const struct ofdpa_fdb_tbl_entry *match)
1886{
1887	struct ofdpa_fdb_tbl_entry *found;
1888
1889	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1890		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1891			return found;
1892
1893	return NULL;
1894}
1895
1896static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1897			  const unsigned char *addr,
1898			  __be16 vlan_id, int flags)
1899{
1900	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1901	struct ofdpa_fdb_tbl_entry *fdb;
1902	struct ofdpa_fdb_tbl_entry *found;
1903	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1904	unsigned long lock_flags;
1905
1906	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1907	if (!fdb)
1908		return -ENOMEM;
1909
1910	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1911	fdb->touched = jiffies;
1912	fdb->key.ofdpa_port = ofdpa_port;
1913	ether_addr_copy(fdb->key.addr, addr);
1914	fdb->key.vlan_id = vlan_id;
1915	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1916
1917	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1918
1919	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1920
1921	if (found) {
1922		found->touched = jiffies;
1923		if (removing) {
1924			kfree(fdb);
1925			hash_del(&found->entry);
1926		}
1927	} else if (!removing) {
1928		hash_add(ofdpa->fdb_tbl, &fdb->entry,
1929			 fdb->key_crc32);
1930	}
1931
1932	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1933
1934	/* Check if adding and already exists, or removing and can't find */
1935	if (!found != !removing) {
1936		kfree(fdb);
1937		if (!found && removing)
1938			return 0;
1939		/* Refreshing existing to update aging timers */
1940		flags |= OFDPA_OP_FLAG_REFRESH;
1941	}
1942
1943	return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1944}
1945
1946static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1947{
1948	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1949	struct ofdpa_fdb_tbl_entry *found;
1950	unsigned long lock_flags;
1951	struct hlist_node *tmp;
1952	int bkt;
1953	int err = 0;
1954
1955	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1956	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
1957		return 0;
1958
1959	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1960
1961	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1962
1963	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1964		if (found->key.ofdpa_port != ofdpa_port)
1965			continue;
1966		if (!found->learned)
1967			continue;
1968		err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1969					   found->key.addr,
1970					   found->key.vlan_id);
1971		if (err)
1972			goto err_out;
1973		hash_del(&found->entry);
1974	}
1975
1976err_out:
1977	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1978
1979	return err;
1980}
1981
1982static void ofdpa_fdb_cleanup(struct timer_list *t)
1983{
1984	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1985	struct ofdpa_port *ofdpa_port;
1986	struct ofdpa_fdb_tbl_entry *entry;
1987	struct hlist_node *tmp;
1988	unsigned long next_timer = jiffies + ofdpa->ageing_time;
1989	unsigned long expires;
1990	unsigned long lock_flags;
1991	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1992		    OFDPA_OP_FLAG_LEARNED;
1993	int bkt;
1994
1995	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1996
1997	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1998		if (!entry->learned)
1999			continue;
2000		ofdpa_port = entry->key.ofdpa_port;
2001		expires = entry->touched + ofdpa_port->ageing_time;
2002		if (time_before_eq(expires, jiffies)) {
2003			ofdpa_port_fdb_learn(ofdpa_port, flags,
2004					     entry->key.addr,
2005					     entry->key.vlan_id);
2006			hash_del(&entry->entry);
2007		} else if (time_before(expires, next_timer)) {
2008			next_timer = expires;
2009		}
2010	}
2011
2012	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2013
2014	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2015}
2016
2017static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2018				 int flags, __be16 vlan_id)
2019{
2020	u32 in_pport_mask = 0xffffffff;
2021	__be16 eth_type;
2022	const u8 *dst_mac_mask = ff_mac;
2023	__be16 vlan_id_mask = htons(0xffff);
2024	bool copy_to_cpu = false;
2025	int err;
2026
2027	if (ntohs(vlan_id) == 0)
2028		vlan_id = ofdpa_port->internal_vlan_id;
2029
2030	eth_type = htons(ETH_P_IP);
2031	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2032				      in_pport_mask, eth_type,
2033				      ofdpa_port->dev->dev_addr,
2034				      dst_mac_mask, vlan_id, vlan_id_mask,
2035				      copy_to_cpu, flags);
2036	if (err)
2037		return err;
2038
2039	eth_type = htons(ETH_P_IPV6);
2040	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2041				      in_pport_mask, eth_type,
2042				      ofdpa_port->dev->dev_addr,
2043				      dst_mac_mask, vlan_id, vlan_id_mask,
2044				      copy_to_cpu, flags);
2045
2046	return err;
2047}
2048
2049static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2050{
2051	bool pop_vlan;
2052	u32 out_pport;
2053	__be16 vlan_id;
2054	u16 vid;
2055	int err;
2056
2057	/* Port will be forwarding-enabled if its STP state is LEARNING
2058	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2059	 * port STP state.  Use L2 interface group on port VLANs as a way
2060	 * to toggle port forwarding: if forwarding is disabled, L2
2061	 * interface group will not exist.
2062	 */
2063
2064	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2065	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2066		flags |= OFDPA_OP_FLAG_REMOVE;
2067
2068	out_pport = ofdpa_port->pport;
2069	for (vid = 1; vid < VLAN_N_VID; vid++) {
2070		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2071			continue;
2072		vlan_id = htons(vid);
2073		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2074		err = ofdpa_group_l2_interface(ofdpa_port, flags,
2075					       vlan_id, out_pport, pop_vlan);
2076		if (err) {
2077			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2078				   err, out_pport);
2079			return err;
2080		}
2081	}
2082
2083	return 0;
2084}
2085
2086static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2087				 int flags, u8 state)
2088{
2089	bool want[OFDPA_CTRL_MAX] = { 0, };
2090	bool prev_ctrls[OFDPA_CTRL_MAX];
2091	u8 prev_state;
2092	int err;
2093	int i;
2094
2095	memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2096	prev_state = ofdpa_port->stp_state;
2097
2098	if (ofdpa_port->stp_state == state)
2099		return 0;
2100
2101	ofdpa_port->stp_state = state;
2102
2103	switch (state) {
2104	case BR_STATE_DISABLED:
2105		/* port is completely disabled */
2106		break;
2107	case BR_STATE_LISTENING:
2108	case BR_STATE_BLOCKING:
2109		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2110		break;
2111	case BR_STATE_LEARNING:
2112	case BR_STATE_FORWARDING:
2113		if (!ofdpa_port_is_ovsed(ofdpa_port))
2114			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2115		want[OFDPA_CTRL_IPV4_MCAST] = true;
2116		want[OFDPA_CTRL_IPV6_MCAST] = true;
2117		if (ofdpa_port_is_bridged(ofdpa_port))
2118			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2119		else if (ofdpa_port_is_ovsed(ofdpa_port))
2120			want[OFDPA_CTRL_DFLT_OVS] = true;
2121		else
2122			want[OFDPA_CTRL_LOCAL_ARP] = true;
2123		break;
2124	}
2125
2126	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2127		if (want[i] != ofdpa_port->ctrls[i]) {
2128			int ctrl_flags = flags |
2129					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2130			err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2131					      &ofdpa_ctrls[i]);
2132			if (err)
2133				goto err_port_ctrl;
2134			ofdpa_port->ctrls[i] = want[i];
2135		}
2136	}
2137
2138	err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2139	if (err)
2140		goto err_fdb_flush;
2141
2142	err = ofdpa_port_fwding(ofdpa_port, flags);
2143	if (err)
2144		goto err_port_fwding;
2145
2146	return 0;
2147
2148err_port_ctrl:
2149err_fdb_flush:
2150err_port_fwding:
2151	memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2152	ofdpa_port->stp_state = prev_state;
2153	return err;
2154}
2155
2156static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2157{
2158	if (ofdpa_port_is_bridged(ofdpa_port))
2159		/* bridge STP will enable port */
2160		return 0;
2161
2162	/* port is not bridged, so simulate going to FORWARDING state */
2163	return ofdpa_port_stp_update(ofdpa_port, flags,
2164				     BR_STATE_FORWARDING);
2165}
2166
2167static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2168{
2169	if (ofdpa_port_is_bridged(ofdpa_port))
2170		/* bridge STP will disable port */
2171		return 0;
2172
2173	/* port is not bridged, so simulate going to DISABLED state */
2174	return ofdpa_port_stp_update(ofdpa_port, flags,
2175				     BR_STATE_DISABLED);
2176}
2177
2178static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2179			       u16 vid, u16 flags)
2180{
2181	int err;
2182
2183	/* XXX deal with flags for PVID and untagged */
2184
2185	err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2186	if (err)
2187		return err;
2188
2189	err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2190	if (err)
2191		ofdpa_port_vlan(ofdpa_port,
2192				OFDPA_OP_FLAG_REMOVE, vid);
2193
2194	return err;
2195}
2196
2197static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2198			       u16 vid, u16 flags)
2199{
2200	int err;
2201
2202	err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2203				    htons(vid));
2204	if (err)
2205		return err;
2206
2207	return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2208			       vid);
2209}
2210
2211static struct ofdpa_internal_vlan_tbl_entry *
2212ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2213{
2214	struct ofdpa_internal_vlan_tbl_entry *found;
2215
2216	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2217			       entry, ifindex) {
2218		if (found->ifindex == ifindex)
2219			return found;
2220	}
2221
2222	return NULL;
2223}
2224
2225static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2226					      int ifindex)
2227{
2228	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2229	struct ofdpa_internal_vlan_tbl_entry *entry;
2230	struct ofdpa_internal_vlan_tbl_entry *found;
2231	unsigned long lock_flags;
2232	int i;
2233
2234	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2235	if (!entry)
2236		return 0;
2237
2238	entry->ifindex = ifindex;
2239
2240	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2241
2242	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2243	if (found) {
2244		kfree(entry);
2245		goto found;
2246	}
2247
2248	found = entry;
2249	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2250
2251	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2252		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2253			continue;
2254		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2255		goto found;
2256	}
2257
2258	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2259
2260found:
2261	found->ref_count++;
2262	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2263
2264	return found->vlan_id;
2265}
2266
2267static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
2268			       int dst_len, struct fib_info *fi, u32 tb_id,
2269			       int flags)
2270{
2271	const struct fib_nh *nh;
2272	__be16 eth_type = htons(ETH_P_IP);
2273	__be32 dst_mask = inet_make_mask(dst_len);
2274	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2275	u32 priority = fi->fib_priority;
2276	enum rocker_of_dpa_table_id goto_tbl =
2277		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2278	u32 group_id;
2279	bool nh_on_port;
2280	bool has_gw;
2281	u32 index;
2282	int err;
2283
2284	/* XXX support ECMP */
2285
2286	nh = fib_info_nh(fi, 0);
2287	nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
2288	has_gw = !!nh->fib_nh_gw4;
2289
2290	if (has_gw && nh_on_port) {
2291		err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2292					 nh->fib_nh_gw4, &index);
2293		if (err)
2294			return err;
2295
2296		group_id = ROCKER_GROUP_L3_UNICAST(index);
2297	} else {
2298		/* Send to CPU for processing */
2299		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2300	}
2301
2302	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2303					    dst_mask, priority, goto_tbl,
2304					    group_id, fi, flags);
2305	if (err)
2306		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2307			   err, &dst);
2308
2309	return err;
2310}
2311
2312static void
2313ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2314				int ifindex)
2315{
2316	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2317	struct ofdpa_internal_vlan_tbl_entry *found;
2318	unsigned long lock_flags;
2319	unsigned long bit;
2320
2321	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2322
2323	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2324	if (!found) {
2325		netdev_err(ofdpa_port->dev,
2326			   "ifindex (%d) not found in internal VLAN tbl\n",
2327			   ifindex);
2328		goto not_found;
2329	}
2330
2331	if (--found->ref_count <= 0) {
2332		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2333		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2334		hash_del(&found->entry);
2335		kfree(found);
2336	}
2337
2338not_found:
2339	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2340}
2341
2342/**********************************
2343 * Rocker world ops implementation
2344 **********************************/
2345
2346static int ofdpa_init(struct rocker *rocker)
2347{
2348	struct ofdpa *ofdpa = rocker->wpriv;
2349
2350	ofdpa->rocker = rocker;
2351
2352	hash_init(ofdpa->flow_tbl);
2353	spin_lock_init(&ofdpa->flow_tbl_lock);
2354
2355	hash_init(ofdpa->group_tbl);
2356	spin_lock_init(&ofdpa->group_tbl_lock);
2357
2358	hash_init(ofdpa->fdb_tbl);
2359	spin_lock_init(&ofdpa->fdb_tbl_lock);
2360
2361	hash_init(ofdpa->internal_vlan_tbl);
2362	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2363
2364	hash_init(ofdpa->neigh_tbl);
2365	spin_lock_init(&ofdpa->neigh_tbl_lock);
2366
2367	timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2368	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2369
2370	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2371
2372	return 0;
2373}
2374
2375static void ofdpa_fini(struct rocker *rocker)
2376{
2377	struct ofdpa *ofdpa = rocker->wpriv;
2378
2379	unsigned long flags;
2380	struct ofdpa_flow_tbl_entry *flow_entry;
2381	struct ofdpa_group_tbl_entry *group_entry;
2382	struct ofdpa_fdb_tbl_entry *fdb_entry;
2383	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2384	struct ofdpa_neigh_tbl_entry *neigh_entry;
2385	struct hlist_node *tmp;
2386	int bkt;
2387
2388	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2389	flush_workqueue(rocker->rocker_owq);
2390
2391	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2392	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2393		hash_del(&flow_entry->entry);
2394	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2395
2396	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2397	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2398		hash_del(&group_entry->entry);
2399	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2400
2401	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2402	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2403		hash_del(&fdb_entry->entry);
2404	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2405
2406	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2407	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2408			   tmp, internal_vlan_entry, entry)
2409		hash_del(&internal_vlan_entry->entry);
2410	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2411
2412	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2413	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2414		hash_del(&neigh_entry->entry);
2415	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2416}
2417
2418static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2419{
2420	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2421
2422	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2423	ofdpa_port->rocker_port = rocker_port;
2424	ofdpa_port->dev = rocker_port->dev;
2425	ofdpa_port->pport = rocker_port->pport;
2426	ofdpa_port->brport_flags = BR_LEARNING;
2427	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2428	return 0;
2429}
2430
2431static int ofdpa_port_init(struct rocker_port *rocker_port)
2432{
2433	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2434	int err;
2435
2436	rocker_port_set_learning(rocker_port,
2437				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2438
2439	err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2440	if (err) {
2441		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2442		return err;
2443	}
2444
2445	ofdpa_port->internal_vlan_id =
2446		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2447						ofdpa_port->dev->ifindex);
2448
2449	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2450	if (err) {
2451		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2452		goto err_untagged_vlan;
2453	}
2454	return 0;
2455
2456err_untagged_vlan:
2457	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2458	return err;
2459}
2460
2461static void ofdpa_port_fini(struct rocker_port *rocker_port)
2462{
2463	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2464
2465	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2466}
2467
2468static int ofdpa_port_open(struct rocker_port *rocker_port)
2469{
2470	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2471
2472	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2473}
2474
2475static void ofdpa_port_stop(struct rocker_port *rocker_port)
2476{
2477	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2478
2479	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2480}
2481
2482static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2483					 u8 state)
2484{
2485	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2486
2487	return ofdpa_port_stp_update(ofdpa_port, 0, state);
2488}
2489
2490static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2491					    unsigned long brport_flags,
2492					    struct switchdev_trans *trans)
2493{
2494	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2495	unsigned long orig_flags;
2496	int err = 0;
2497
2498	orig_flags = ofdpa_port->brport_flags;
2499	ofdpa_port->brport_flags = brport_flags;
2500	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2501	    !switchdev_trans_ph_prepare(trans))
2502		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2503					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2504
2505	if (switchdev_trans_ph_prepare(trans))
2506		ofdpa_port->brport_flags = orig_flags;
2507
2508	return err;
2509}
2510
2511static int
2512ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2513					 rocker_port,
2514					 unsigned long *
2515					 p_brport_flags_support)
2516{
2517	*p_brport_flags_support = BR_LEARNING;
2518	return 0;
2519}
2520
2521static int
2522ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2523				       u32 ageing_time,
2524				       struct switchdev_trans *trans)
2525{
2526	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2527	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2528
2529	if (!switchdev_trans_ph_prepare(trans)) {
2530		ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2531		if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2532			ofdpa->ageing_time = ofdpa_port->ageing_time;
2533		mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2534	}
2535
2536	return 0;
2537}
2538
2539static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2540				   const struct switchdev_obj_port_vlan *vlan)
2541{
2542	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2543	u16 vid;
2544	int err;
2545
2546	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2547		err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
2548		if (err)
2549			return err;
2550	}
2551
2552	return 0;
2553}
2554
2555static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2556				   const struct switchdev_obj_port_vlan *vlan)
2557{
2558	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2559	u16 vid;
2560	int err;
2561
2562	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2563		err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2564		if (err)
2565			return err;
2566	}
2567
2568	return 0;
2569}
2570
2571static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2572				  u16 vid, const unsigned char *addr)
2573{
2574	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2575	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2576
2577	if (!ofdpa_port_is_bridged(ofdpa_port))
2578		return -EINVAL;
2579
2580	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2581}
2582
2583static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2584				  u16 vid, const unsigned char *addr)
2585{
2586	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2587	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2588	int flags = OFDPA_OP_FLAG_REMOVE;
2589
2590	if (!ofdpa_port_is_bridged(ofdpa_port))
2591		return -EINVAL;
2592
2593	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2594}
2595
2596static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2597				  struct net_device *bridge)
2598{
2599	int err;
2600
2601	/* Port is joining bridge, so the internal VLAN for the
2602	 * port is going to change to the bridge internal VLAN.
2603	 * Let's remove untagged VLAN (vid=0) from port and
2604	 * re-add once internal VLAN has changed.
2605	 */
2606
2607	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2608	if (err)
2609		return err;
2610
2611	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2612					ofdpa_port->dev->ifindex);
2613	ofdpa_port->internal_vlan_id =
2614		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2615
2616	ofdpa_port->bridge_dev = bridge;
2617
2618	return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2619}
2620
2621static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2622{
2623	int err;
2624
2625	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2626	if (err)
2627		return err;
2628
2629	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2630					ofdpa_port->bridge_dev->ifindex);
2631	ofdpa_port->internal_vlan_id =
2632		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2633						ofdpa_port->dev->ifindex);
2634
2635	ofdpa_port->bridge_dev = NULL;
2636
2637	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2638	if (err)
2639		return err;
2640
2641	if (ofdpa_port->dev->flags & IFF_UP)
2642		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2643
2644	return err;
2645}
2646
2647static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2648				  struct net_device *master)
2649{
2650	int err;
2651
2652	ofdpa_port->bridge_dev = master;
2653
2654	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2655	if (err)
2656		return err;
2657	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2658
2659	return err;
2660}
2661
2662static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2663				    struct net_device *master)
2664{
2665	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2666	int err = 0;
2667
2668	if (netif_is_bridge_master(master))
2669		err = ofdpa_port_bridge_join(ofdpa_port, master);
2670	else if (netif_is_ovs_master(master))
2671		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2672	return err;
2673}
2674
2675static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2676				      struct net_device *master)
2677{
2678	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2679	int err = 0;
2680
2681	if (ofdpa_port_is_bridged(ofdpa_port))
2682		err = ofdpa_port_bridge_leave(ofdpa_port);
2683	else if (ofdpa_port_is_ovsed(ofdpa_port))
2684		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2685	return err;
2686}
2687
2688static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2689				   struct neighbour *n)
2690{
2691	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2693						    OFDPA_OP_FLAG_NOWAIT;
2694	__be32 ip_addr = *(__be32 *) n->primary_key;
2695
2696	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2697}
2698
2699static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2700				    struct neighbour *n)
2701{
2702	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2703	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2704	__be32 ip_addr = *(__be32 *) n->primary_key;
2705
2706	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2707}
2708
2709static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2710				       const unsigned char *addr,
2711				       __be16 vlan_id)
2712{
2713	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2714	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2715
2716	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2717	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2718		return 0;
2719
2720	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2721}
2722
2723static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2724						    struct rocker *rocker)
2725{
2726	struct rocker_port *rocker_port;
2727
2728	rocker_port = rocker_port_dev_lower_find(dev, rocker);
2729	return rocker_port ? rocker_port->wpriv : NULL;
2730}
2731
2732static int ofdpa_fib4_add(struct rocker *rocker,
2733			  const struct fib_entry_notifier_info *fen_info)
2734{
2735	struct ofdpa *ofdpa = rocker->wpriv;
2736	struct ofdpa_port *ofdpa_port;
2737	struct fib_nh *nh;
2738	int err;
2739
2740	if (ofdpa->fib_aborted)
2741		return 0;
2742	nh = fib_info_nh(fen_info->fi, 0);
2743	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2744	if (!ofdpa_port)
2745		return 0;
2746	err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2747				  fen_info->dst_len, fen_info->fi,
2748				  fen_info->tb_id, 0);
2749	if (err)
2750		return err;
2751	nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2752	return 0;
2753}
2754
2755static int ofdpa_fib4_del(struct rocker *rocker,
2756			  const struct fib_entry_notifier_info *fen_info)
2757{
2758	struct ofdpa *ofdpa = rocker->wpriv;
2759	struct ofdpa_port *ofdpa_port;
2760	struct fib_nh *nh;
2761
2762	if (ofdpa->fib_aborted)
2763		return 0;
2764	nh = fib_info_nh(fen_info->fi, 0);
2765	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2766	if (!ofdpa_port)
2767		return 0;
2768	nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2769	return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2770				   fen_info->dst_len, fen_info->fi,
2771				   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2772}
2773
2774static void ofdpa_fib4_abort(struct rocker *rocker)
2775{
2776	struct ofdpa *ofdpa = rocker->wpriv;
2777	struct ofdpa_port *ofdpa_port;
2778	struct ofdpa_flow_tbl_entry *flow_entry;
2779	struct hlist_node *tmp;
2780	unsigned long flags;
2781	int bkt;
2782
2783	if (ofdpa->fib_aborted)
2784		return;
2785
2786	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2787	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2788		struct fib_nh *nh;
2789
2790		if (flow_entry->key.tbl_id !=
2791		    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2792			continue;
2793		nh = fib_info_nh(flow_entry->fi, 0);
2794		ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2795		if (!ofdpa_port)
2796			continue;
2797		nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2798		ofdpa_flow_tbl_del(ofdpa_port,
2799				   OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
2800				   flow_entry);
2801	}
2802	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2803	ofdpa->fib_aborted = true;
2804}
2805
2806struct rocker_world_ops rocker_ofdpa_ops = {
2807	.kind = "ofdpa",
2808	.priv_size = sizeof(struct ofdpa),
2809	.port_priv_size = sizeof(struct ofdpa_port),
2810	.mode = ROCKER_PORT_MODE_OF_DPA,
2811	.init = ofdpa_init,
2812	.fini = ofdpa_fini,
2813	.port_pre_init = ofdpa_port_pre_init,
2814	.port_init = ofdpa_port_init,
2815	.port_fini = ofdpa_port_fini,
2816	.port_open = ofdpa_port_open,
2817	.port_stop = ofdpa_port_stop,
2818	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2819	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2820	.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2821	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2822	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2823	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2824	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2825	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2826	.port_master_linked = ofdpa_port_master_linked,
2827	.port_master_unlinked = ofdpa_port_master_unlinked,
2828	.port_neigh_update = ofdpa_port_neigh_update,
2829	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2830	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2831	.fib4_add = ofdpa_fib4_add,
2832	.fib4_del = ofdpa_fib4_del,
2833	.fib4_abort = ofdpa_fib4_abort,
2834};
2835