1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/slab.h>
6#include <linux/errno.h>
7#include <linux/list.h>
8#include <linux/string.h>
9#include <linux/rhashtable.h>
10#include <linux/netdevice.h>
11#include <linux/mutex.h>
12#include <net/net_namespace.h>
13#include <net/tc_act/tc_vlan.h>
14
15#include "reg.h"
16#include "core.h"
17#include "resources.h"
18#include "spectrum.h"
19#include "core_acl_flex_keys.h"
20#include "core_acl_flex_actions.h"
21#include "spectrum_acl_tcam.h"
22
23struct mlxsw_sp_acl {
24	struct mlxsw_sp *mlxsw_sp;
25	struct mlxsw_afk *afk;
26	struct mlxsw_sp_fid *dummy_fid;
27	struct rhashtable ruleset_ht;
28	struct list_head rules;
29	struct mutex rules_lock; /* Protects rules list */
30	struct {
31		struct delayed_work dw;
32		unsigned long interval;	/* ms */
33#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34	} rule_activity_update;
35	struct mlxsw_sp_acl_tcam tcam;
36};
37
38struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39{
40	return acl->afk;
41}
42
43struct mlxsw_sp_acl_ruleset_ht_key {
44	struct mlxsw_sp_flow_block *block;
45	u32 chain_index;
46	const struct mlxsw_sp_acl_profile_ops *ops;
47};
48
49struct mlxsw_sp_acl_ruleset {
50	struct rhash_head ht_node; /* Member of acl HT */
51	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
52	struct rhashtable rule_ht;
53	unsigned int ref_count;
54	unsigned int min_prio;
55	unsigned int max_prio;
56	unsigned long priv[];
57	/* priv has to be always the last item */
58};
59
60struct mlxsw_sp_acl_rule {
61	struct rhash_head ht_node; /* Member of rule HT */
62	struct list_head list;
63	unsigned long cookie; /* HT key */
64	struct mlxsw_sp_acl_ruleset *ruleset;
65	struct mlxsw_sp_acl_rule_info *rulei;
66	u64 last_used;
67	u64 last_packets;
68	u64 last_bytes;
69	u64 last_drops;
70	unsigned long priv[];
71	/* priv has to be always the last item */
72};
73
74static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
75	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
76	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
77	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
78	.automatic_shrinking = true,
79};
80
81static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
82	.key_len = sizeof(unsigned long),
83	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
84	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
85	.automatic_shrinking = true,
86};
87
88struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
89{
90	return mlxsw_sp->acl->dummy_fid;
91}
92
93static bool
94mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
95{
96	/* We hold a reference on ruleset ourselves */
97	return ruleset->ref_count == 2;
98}
99
100int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
101			      struct mlxsw_sp_flow_block *block,
102			      struct mlxsw_sp_flow_block_binding *binding)
103{
104	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
105	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
106
107	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
108				 binding->mlxsw_sp_port, binding->ingress);
109}
110
111void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
112				 struct mlxsw_sp_flow_block *block,
113				 struct mlxsw_sp_flow_block_binding *binding)
114{
115	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
116	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
117
118	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
119			    binding->mlxsw_sp_port, binding->ingress);
120}
121
122static int
123mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
124				struct mlxsw_sp_acl_ruleset *ruleset,
125				struct mlxsw_sp_flow_block *block)
126{
127	struct mlxsw_sp_flow_block_binding *binding;
128	int err;
129
130	block->ruleset_zero = ruleset;
131	list_for_each_entry(binding, &block->binding_list, list) {
132		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
133		if (err)
134			goto rollback;
135	}
136	return 0;
137
138rollback:
139	list_for_each_entry_continue_reverse(binding, &block->binding_list,
140					     list)
141		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
142	block->ruleset_zero = NULL;
143
144	return err;
145}
146
147static void
148mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
149				  struct mlxsw_sp_acl_ruleset *ruleset,
150				  struct mlxsw_sp_flow_block *block)
151{
152	struct mlxsw_sp_flow_block_binding *binding;
153
154	list_for_each_entry(binding, &block->binding_list, list)
155		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
156	block->ruleset_zero = NULL;
157}
158
159static struct mlxsw_sp_acl_ruleset *
160mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
161			    struct mlxsw_sp_flow_block *block, u32 chain_index,
162			    const struct mlxsw_sp_acl_profile_ops *ops,
163			    struct mlxsw_afk_element_usage *tmplt_elusage)
164{
165	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
166	struct mlxsw_sp_acl_ruleset *ruleset;
167	size_t alloc_size;
168	int err;
169
170	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
171	ruleset = kzalloc(alloc_size, GFP_KERNEL);
172	if (!ruleset)
173		return ERR_PTR(-ENOMEM);
174	ruleset->ref_count = 1;
175	ruleset->ht_key.block = block;
176	ruleset->ht_key.chain_index = chain_index;
177	ruleset->ht_key.ops = ops;
178
179	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
180	if (err)
181		goto err_rhashtable_init;
182
183	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
184			       tmplt_elusage, &ruleset->min_prio,
185			       &ruleset->max_prio);
186	if (err)
187		goto err_ops_ruleset_add;
188
189	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
190				     mlxsw_sp_acl_ruleset_ht_params);
191	if (err)
192		goto err_ht_insert;
193
194	return ruleset;
195
196err_ht_insert:
197	ops->ruleset_del(mlxsw_sp, ruleset->priv);
198err_ops_ruleset_add:
199	rhashtable_destroy(&ruleset->rule_ht);
200err_rhashtable_init:
201	kfree(ruleset);
202	return ERR_PTR(err);
203}
204
205static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
206					 struct mlxsw_sp_acl_ruleset *ruleset)
207{
208	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
209	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
210
211	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
212			       mlxsw_sp_acl_ruleset_ht_params);
213	ops->ruleset_del(mlxsw_sp, ruleset->priv);
214	rhashtable_destroy(&ruleset->rule_ht);
215	kfree(ruleset);
216}
217
218static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
219{
220	ruleset->ref_count++;
221}
222
223static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
224					 struct mlxsw_sp_acl_ruleset *ruleset)
225{
226	if (--ruleset->ref_count)
227		return;
228	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
229}
230
231static struct mlxsw_sp_acl_ruleset *
232__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
233			      struct mlxsw_sp_flow_block *block, u32 chain_index,
234			      const struct mlxsw_sp_acl_profile_ops *ops)
235{
236	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
237
238	memset(&ht_key, 0, sizeof(ht_key));
239	ht_key.block = block;
240	ht_key.chain_index = chain_index;
241	ht_key.ops = ops;
242	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
243				      mlxsw_sp_acl_ruleset_ht_params);
244}
245
246struct mlxsw_sp_acl_ruleset *
247mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
248			    struct mlxsw_sp_flow_block *block, u32 chain_index,
249			    enum mlxsw_sp_acl_profile profile)
250{
251	const struct mlxsw_sp_acl_profile_ops *ops;
252	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
253	struct mlxsw_sp_acl_ruleset *ruleset;
254
255	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
256	if (!ops)
257		return ERR_PTR(-EINVAL);
258	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
259	if (!ruleset)
260		return ERR_PTR(-ENOENT);
261	return ruleset;
262}
263
264struct mlxsw_sp_acl_ruleset *
265mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
266			 struct mlxsw_sp_flow_block *block, u32 chain_index,
267			 enum mlxsw_sp_acl_profile profile,
268			 struct mlxsw_afk_element_usage *tmplt_elusage)
269{
270	const struct mlxsw_sp_acl_profile_ops *ops;
271	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
272	struct mlxsw_sp_acl_ruleset *ruleset;
273
274	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
275	if (!ops)
276		return ERR_PTR(-EINVAL);
277
278	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
279	if (ruleset) {
280		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
281		return ruleset;
282	}
283	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
284					   tmplt_elusage);
285}
286
287void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
288			      struct mlxsw_sp_acl_ruleset *ruleset)
289{
290	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
291}
292
293u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
294{
295	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
296
297	return ops->ruleset_group_id(ruleset->priv);
298}
299
300void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
301				   unsigned int *p_min_prio,
302				   unsigned int *p_max_prio)
303{
304	*p_min_prio = ruleset->min_prio;
305	*p_max_prio = ruleset->max_prio;
306}
307
308struct mlxsw_sp_acl_rule_info *
309mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
310			  struct mlxsw_afa_block *afa_block)
311{
312	struct mlxsw_sp_acl_rule_info *rulei;
313	int err;
314
315	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
316	if (!rulei)
317		return ERR_PTR(-ENOMEM);
318
319	if (afa_block) {
320		rulei->act_block = afa_block;
321		return rulei;
322	}
323
324	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
325	if (IS_ERR(rulei->act_block)) {
326		err = PTR_ERR(rulei->act_block);
327		goto err_afa_block_create;
328	}
329	rulei->action_created = 1;
330	return rulei;
331
332err_afa_block_create:
333	kfree(rulei);
334	return ERR_PTR(err);
335}
336
337void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
338{
339	if (rulei->action_created)
340		mlxsw_afa_block_destroy(rulei->act_block);
341	kfree(rulei);
342}
343
344int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
345{
346	return mlxsw_afa_block_commit(rulei->act_block);
347}
348
349void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
350				 unsigned int priority)
351{
352	rulei->priority = priority;
353}
354
355void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
356				    enum mlxsw_afk_element element,
357				    u32 key_value, u32 mask_value)
358{
359	mlxsw_afk_values_add_u32(&rulei->values, element,
360				 key_value, mask_value);
361}
362
363void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
364				    enum mlxsw_afk_element element,
365				    const char *key_value,
366				    const char *mask_value, unsigned int len)
367{
368	mlxsw_afk_values_add_buf(&rulei->values, element,
369				 key_value, mask_value, len);
370}
371
372int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
373{
374	return mlxsw_afa_block_continue(rulei->act_block);
375}
376
377int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
378				u16 group_id)
379{
380	return mlxsw_afa_block_jump(rulei->act_block, group_id);
381}
382
383int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
384{
385	return mlxsw_afa_block_terminate(rulei->act_block);
386}
387
388int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
389				bool ingress,
390				const struct flow_action_cookie *fa_cookie,
391				struct netlink_ext_ack *extack)
392{
393	return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
394					   fa_cookie, extack);
395}
396
397int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
398{
399	return mlxsw_afa_block_append_trap(rulei->act_block,
400					   MLXSW_TRAP_ID_ACL0);
401}
402
403int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
404			       struct mlxsw_sp_acl_rule_info *rulei,
405			       struct net_device *out_dev,
406			       struct netlink_ext_ack *extack)
407{
408	struct mlxsw_sp_port *mlxsw_sp_port;
409	u8 local_port;
410	bool in_port;
411
412	if (out_dev) {
413		if (!mlxsw_sp_port_dev_check(out_dev)) {
414			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
415			return -EINVAL;
416		}
417		mlxsw_sp_port = netdev_priv(out_dev);
418		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
419			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
420			return -EINVAL;
421		}
422		local_port = mlxsw_sp_port->local_port;
423		in_port = false;
424	} else {
425		/* If out_dev is NULL, the caller wants to
426		 * set forward to ingress port.
427		 */
428		local_port = 0;
429		in_port = true;
430	}
431	return mlxsw_afa_block_append_fwd(rulei->act_block,
432					  local_port, in_port, extack);
433}
434
435int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
436				  struct mlxsw_sp_acl_rule_info *rulei,
437				  struct mlxsw_sp_flow_block *block,
438				  struct net_device *out_dev,
439				  struct netlink_ext_ack *extack)
440{
441	struct mlxsw_sp_flow_block_binding *binding;
442	struct mlxsw_sp_port *in_port;
443
444	if (!list_is_singular(&block->binding_list)) {
445		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
446		return -EOPNOTSUPP;
447	}
448	binding = list_first_entry(&block->binding_list,
449				   struct mlxsw_sp_flow_block_binding, list);
450	in_port = binding->mlxsw_sp_port;
451
452	return mlxsw_afa_block_append_mirror(rulei->act_block,
453					     in_port->local_port,
454					     out_dev,
455					     binding->ingress,
456					     extack);
457}
458
459int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
460				struct mlxsw_sp_acl_rule_info *rulei,
461				u32 action, u16 vid, u16 proto, u8 prio,
462				struct netlink_ext_ack *extack)
463{
464	u8 ethertype;
465
466	if (action == FLOW_ACTION_VLAN_MANGLE) {
467		switch (proto) {
468		case ETH_P_8021Q:
469			ethertype = 0;
470			break;
471		case ETH_P_8021AD:
472			ethertype = 1;
473			break;
474		default:
475			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
476			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
477				proto);
478			return -EINVAL;
479		}
480
481		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
482							  vid, prio, ethertype,
483							  extack);
484	} else {
485		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
486		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
487		return -EINVAL;
488	}
489}
490
491int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
492				    struct mlxsw_sp_acl_rule_info *rulei,
493				    u32 prio, struct netlink_ext_ack *extack)
494{
495	/* Even though both Linux and Spectrum switches support 16 priorities,
496	 * spectrum_qdisc only processes the first eight priomap elements, and
497	 * the DCB and PFC features are tied to 8 priorities as well. Therefore
498	 * bounce attempts to prioritize packets to higher priorities.
499	 */
500	if (prio >= IEEE_8021QAZ_MAX_TCS) {
501		NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
502		return -EINVAL;
503	}
504	return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
505						      extack);
506}
507
508enum mlxsw_sp_acl_mangle_field {
509	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
510	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
511	MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
512	MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
513	MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
514};
515
516struct mlxsw_sp_acl_mangle_action {
517	enum flow_action_mangle_base htype;
518	/* Offset is u32-aligned. */
519	u32 offset;
520	/* Mask bits are unset for the modified field. */
521	u32 mask;
522	/* Shift required to extract the set value. */
523	u32 shift;
524	enum mlxsw_sp_acl_mangle_field field;
525};
526
527#define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
528	{								\
529		.htype = _htype,					\
530		.offset = _offset,					\
531		.mask = _mask,						\
532		.shift = _shift,					\
533		.field = MLXSW_SP_ACL_MANGLE_FIELD_##_field,		\
534	}
535
536#define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
537	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4,       \
538				   _offset, _mask, _shift, _field)
539
540#define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
541	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
542				   _offset, _mask, _shift, _field)
543
544#define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
545	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
546
547#define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
548	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
549
550static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
551	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
552	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
553	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
554
555	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
556	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
557	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
558
559	MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
560	MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0,  IP_DPORT),
561
562	MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
563	MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0,  IP_DPORT),
564};
565
566static int
567mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
568				    struct mlxsw_sp_acl_rule_info *rulei,
569				    struct mlxsw_sp_acl_mangle_action *mact,
570				    u32 val, struct netlink_ext_ack *extack)
571{
572	switch (mact->field) {
573	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
574		return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
575							  val, extack);
576	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
577		return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
578						       val, extack);
579	case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
580		return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
581						      val, extack);
582	default:
583		return -EOPNOTSUPP;
584	}
585}
586
587static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
588						struct mlxsw_sp_acl_rule_info *rulei,
589						struct mlxsw_sp_acl_mangle_action *mact,
590						u32 val, struct netlink_ext_ack *extack)
591{
592	int err;
593
594	err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
595	if (err != -EOPNOTSUPP)
596		return err;
597
598	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
599	return err;
600}
601
602static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
603						struct mlxsw_sp_acl_rule_info *rulei,
604						struct mlxsw_sp_acl_mangle_action *mact,
605						u32 val, struct netlink_ext_ack *extack)
606{
607	int err;
608
609	err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
610	if (err != -EOPNOTSUPP)
611		return err;
612
613	switch (mact->field) {
614	case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
615		return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
616	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
617		return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
618	default:
619		break;
620	}
621
622	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
623	return err;
624}
625
626int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
627				  struct mlxsw_sp_acl_rule_info *rulei,
628				  enum flow_action_mangle_base htype,
629				  u32 offset, u32 mask, u32 val,
630				  struct netlink_ext_ack *extack)
631{
632	const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
633	struct mlxsw_sp_acl_mangle_action *mact;
634	size_t i;
635
636	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
637		mact = &mlxsw_sp_acl_mangle_actions[i];
638		if (mact->htype == htype &&
639		    mact->offset == offset &&
640		    mact->mask == mask) {
641			val >>= mact->shift;
642			return acl_rulei_ops->act_mangle_field(mlxsw_sp,
643							       rulei, mact,
644							       val, extack);
645		}
646	}
647
648	NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
649	return -EINVAL;
650}
651
652int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
653				  struct mlxsw_sp_acl_rule_info *rulei,
654				  u32 index, u64 rate_bytes_ps,
655				  u32 burst, struct netlink_ext_ack *extack)
656{
657	int err;
658
659	err = mlxsw_afa_block_append_police(rulei->act_block, index,
660					    rate_bytes_ps, burst,
661					    &rulei->policer_index, extack);
662	if (err)
663		return err;
664
665	rulei->policer_index_valid = true;
666
667	return 0;
668}
669
670int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
671				 struct mlxsw_sp_acl_rule_info *rulei,
672				 struct netlink_ext_ack *extack)
673{
674	int err;
675
676	err = mlxsw_afa_block_append_counter(rulei->act_block,
677					     &rulei->counter_index, extack);
678	if (err)
679		return err;
680	rulei->counter_valid = true;
681	return 0;
682}
683
684int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
685				   struct mlxsw_sp_acl_rule_info *rulei,
686				   u16 fid, struct netlink_ext_ack *extack)
687{
688	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
689}
690
691struct mlxsw_sp_acl_rule *
692mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
693			 struct mlxsw_sp_acl_ruleset *ruleset,
694			 unsigned long cookie,
695			 struct mlxsw_afa_block *afa_block,
696			 struct netlink_ext_ack *extack)
697{
698	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
699	struct mlxsw_sp_acl_rule *rule;
700	int err;
701
702	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
703	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
704		       GFP_KERNEL);
705	if (!rule) {
706		err = -ENOMEM;
707		goto err_alloc;
708	}
709	rule->cookie = cookie;
710	rule->ruleset = ruleset;
711
712	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
713	if (IS_ERR(rule->rulei)) {
714		err = PTR_ERR(rule->rulei);
715		goto err_rulei_create;
716	}
717
718	return rule;
719
720err_rulei_create:
721	kfree(rule);
722err_alloc:
723	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
724	return ERR_PTR(err);
725}
726
727void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
728			       struct mlxsw_sp_acl_rule *rule)
729{
730	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
731
732	mlxsw_sp_acl_rulei_destroy(rule->rulei);
733	kfree(rule);
734	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
735}
736
737int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
738			  struct mlxsw_sp_acl_rule *rule)
739{
740	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
741	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
742	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
743	int err;
744
745	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
746	if (err)
747		return err;
748
749	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
750				     mlxsw_sp_acl_rule_ht_params);
751	if (err)
752		goto err_rhashtable_insert;
753
754	if (!ruleset->ht_key.chain_index &&
755	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
756		/* We only need ruleset with chain index 0, the implicit
757		 * one, to be directly bound to device. The rest of the
758		 * rulesets are bound by "Goto action set".
759		 */
760		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
761		if (err)
762			goto err_ruleset_block_bind;
763	}
764
765	mutex_lock(&mlxsw_sp->acl->rules_lock);
766	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
767	mutex_unlock(&mlxsw_sp->acl->rules_lock);
768	block->rule_count++;
769	block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
770	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
771	return 0;
772
773err_ruleset_block_bind:
774	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
775			       mlxsw_sp_acl_rule_ht_params);
776err_rhashtable_insert:
777	ops->rule_del(mlxsw_sp, rule->priv);
778	return err;
779}
780
781void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
782			   struct mlxsw_sp_acl_rule *rule)
783{
784	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
785	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
786	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
787
788	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
789	block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
790	block->rule_count--;
791	mutex_lock(&mlxsw_sp->acl->rules_lock);
792	list_del(&rule->list);
793	mutex_unlock(&mlxsw_sp->acl->rules_lock);
794	if (!ruleset->ht_key.chain_index &&
795	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
796		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
797	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
798			       mlxsw_sp_acl_rule_ht_params);
799	ops->rule_del(mlxsw_sp, rule->priv);
800}
801
802int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
803				     struct mlxsw_sp_acl_rule *rule,
804				     struct mlxsw_afa_block *afa_block)
805{
806	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
807	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
808	struct mlxsw_sp_acl_rule_info *rulei;
809
810	rulei = mlxsw_sp_acl_rule_rulei(rule);
811	rulei->act_block = afa_block;
812
813	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
814}
815
816struct mlxsw_sp_acl_rule *
817mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
818			 struct mlxsw_sp_acl_ruleset *ruleset,
819			 unsigned long cookie)
820{
821	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
822				       mlxsw_sp_acl_rule_ht_params);
823}
824
825struct mlxsw_sp_acl_rule_info *
826mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
827{
828	return rule->rulei;
829}
830
831static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
832					     struct mlxsw_sp_acl_rule *rule)
833{
834	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
835	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
836	bool active;
837	int err;
838
839	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
840	if (err)
841		return err;
842	if (active)
843		rule->last_used = jiffies;
844	return 0;
845}
846
847static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
848{
849	struct mlxsw_sp_acl_rule *rule;
850	int err;
851
852	mutex_lock(&acl->rules_lock);
853	list_for_each_entry(rule, &acl->rules, list) {
854		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
855							rule);
856		if (err)
857			goto err_rule_update;
858	}
859	mutex_unlock(&acl->rules_lock);
860	return 0;
861
862err_rule_update:
863	mutex_unlock(&acl->rules_lock);
864	return err;
865}
866
867static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
868{
869	unsigned long interval = acl->rule_activity_update.interval;
870
871	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
872			       msecs_to_jiffies(interval));
873}
874
875static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
876{
877	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
878						rule_activity_update.dw.work);
879	int err;
880
881	err = mlxsw_sp_acl_rules_activity_update(acl);
882	if (err)
883		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
884
885	mlxsw_sp_acl_rule_activity_work_schedule(acl);
886}
887
888int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
889				struct mlxsw_sp_acl_rule *rule,
890				u64 *packets, u64 *bytes, u64 *drops,
891				u64 *last_use,
892				enum flow_action_hw_stats *used_hw_stats)
893
894{
895	enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
896	struct mlxsw_sp_acl_rule_info *rulei;
897	u64 current_packets = 0;
898	u64 current_bytes = 0;
899	u64 current_drops = 0;
900	int err;
901
902	rulei = mlxsw_sp_acl_rule_rulei(rule);
903	if (rulei->counter_valid) {
904		err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
905						&current_packets,
906						&current_bytes);
907		if (err)
908			return err;
909		*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
910	}
911	if (rulei->policer_index_valid) {
912		err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
913							 rulei->policer_index,
914							 &current_drops);
915		if (err)
916			return err;
917	}
918	*packets = current_packets - rule->last_packets;
919	*bytes = current_bytes - rule->last_bytes;
920	*drops = current_drops - rule->last_drops;
921	*last_use = rule->last_used;
922
923	rule->last_bytes = current_bytes;
924	rule->last_packets = current_packets;
925	rule->last_drops = current_drops;
926
927	return 0;
928}
929
930int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
931{
932	struct mlxsw_sp_fid *fid;
933	struct mlxsw_sp_acl *acl;
934	size_t alloc_size;
935	int err;
936
937	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
938	acl = kzalloc(alloc_size, GFP_KERNEL);
939	if (!acl)
940		return -ENOMEM;
941	mlxsw_sp->acl = acl;
942	acl->mlxsw_sp = mlxsw_sp;
943	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
944						       ACL_FLEX_KEYS),
945				    mlxsw_sp->afk_ops);
946	if (!acl->afk) {
947		err = -ENOMEM;
948		goto err_afk_create;
949	}
950
951	err = rhashtable_init(&acl->ruleset_ht,
952			      &mlxsw_sp_acl_ruleset_ht_params);
953	if (err)
954		goto err_rhashtable_init;
955
956	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
957	if (IS_ERR(fid)) {
958		err = PTR_ERR(fid);
959		goto err_fid_get;
960	}
961	acl->dummy_fid = fid;
962
963	INIT_LIST_HEAD(&acl->rules);
964	mutex_init(&acl->rules_lock);
965	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
966	if (err)
967		goto err_acl_ops_init;
968
969	/* Create the delayed work for the rule activity_update */
970	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
971			  mlxsw_sp_acl_rule_activity_update_work);
972	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
973	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
974	return 0;
975
976err_acl_ops_init:
977	mutex_destroy(&acl->rules_lock);
978	mlxsw_sp_fid_put(fid);
979err_fid_get:
980	rhashtable_destroy(&acl->ruleset_ht);
981err_rhashtable_init:
982	mlxsw_afk_destroy(acl->afk);
983err_afk_create:
984	kfree(acl);
985	return err;
986}
987
988void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
989{
990	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
991
992	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
993	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
994	mutex_destroy(&acl->rules_lock);
995	WARN_ON(!list_empty(&acl->rules));
996	mlxsw_sp_fid_put(acl->dummy_fid);
997	rhashtable_destroy(&acl->ruleset_ht);
998	mlxsw_afk_destroy(acl->afk);
999	kfree(acl);
1000}
1001
1002u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
1003{
1004	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1005
1006	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
1007							   &acl->tcam);
1008}
1009
1010int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
1011{
1012	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1013
1014	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
1015							   &acl->tcam, val);
1016}
1017
1018struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1019	.act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1020};
1021
1022struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1023	.act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1024};
1025