1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4#include <linux/hash.h>
5#include <linux/hashtable.h>
6#include <linux/jhash.h>
7#include <linux/math64.h>
8#include <linux/vmalloc.h>
9#include <net/pkt_cls.h>
10
11#include "cmsg.h"
12#include "main.h"
13#include "../nfp_app.h"
14
15struct nfp_mask_id_table {
16	struct hlist_node link;
17	u32 hash_key;
18	u32 ref_cnt;
19	u8 mask_id;
20};
21
22struct nfp_fl_flow_table_cmp_arg {
23	struct net_device *netdev;
24	unsigned long cookie;
25};
26
27struct nfp_fl_stats_ctx_to_flow {
28	struct rhash_head ht_node;
29	u32 stats_cxt;
30	struct nfp_fl_payload *flow;
31};
32
33static const struct rhashtable_params stats_ctx_table_params = {
34	.key_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
35	.head_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
36	.key_len	= sizeof(u32),
37};
38
39static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
40{
41	struct nfp_flower_priv *priv = app->priv;
42	struct circ_buf *ring;
43
44	ring = &priv->stats_ids.free_list;
45	/* Check if buffer is full. */
46	if (!CIRC_SPACE(ring->head, ring->tail,
47			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
48			NFP_FL_STATS_ELEM_RS + 1))
49		return -ENOBUFS;
50
51	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
52	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
53		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
54
55	return 0;
56}
57
58static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
59{
60	struct nfp_flower_priv *priv = app->priv;
61	u32 freed_stats_id, temp_stats_id;
62	struct circ_buf *ring;
63
64	ring = &priv->stats_ids.free_list;
65	freed_stats_id = priv->stats_ring_size;
66	/* Check for unallocated entries first. */
67	if (priv->stats_ids.init_unalloc > 0) {
68		*stats_context_id =
69			FIELD_PREP(NFP_FL_STAT_ID_STAT,
70				   priv->stats_ids.init_unalloc - 1) |
71			FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
72				   priv->active_mem_unit);
73
74		if (++priv->active_mem_unit == priv->total_mem_units) {
75			priv->stats_ids.init_unalloc--;
76			priv->active_mem_unit = 0;
77		}
78
79		return 0;
80	}
81
82	/* Check if buffer is empty. */
83	if (ring->head == ring->tail) {
84		*stats_context_id = freed_stats_id;
85		return -ENOENT;
86	}
87
88	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
89	*stats_context_id = temp_stats_id;
90	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
91	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
92		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
93
94	return 0;
95}
96
97/* Must be called with either RTNL or rcu_read_lock */
98struct nfp_fl_payload *
99nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
100			   struct net_device *netdev)
101{
102	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
103	struct nfp_flower_priv *priv = app->priv;
104
105	flower_cmp_arg.netdev = netdev;
106	flower_cmp_arg.cookie = tc_flower_cookie;
107
108	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
109				      nfp_flower_table_params);
110}
111
112void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
113{
114	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
115	struct nfp_flower_priv *priv = app->priv;
116	struct nfp_fl_stats_frame *stats;
117	unsigned char *msg;
118	u32 ctx_id;
119	int i;
120
121	msg = nfp_flower_cmsg_get_data(skb);
122
123	spin_lock(&priv->stats_lock);
124	for (i = 0; i < msg_len / sizeof(*stats); i++) {
125		stats = (struct nfp_fl_stats_frame *)msg + i;
126		ctx_id = be32_to_cpu(stats->stats_con_id);
127		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
128		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
129		priv->stats[ctx_id].used = jiffies;
130	}
131	spin_unlock(&priv->stats_lock);
132}
133
134static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
135{
136	struct nfp_flower_priv *priv = app->priv;
137	struct circ_buf *ring;
138
139	ring = &priv->mask_ids.mask_id_free_list;
140	/* Checking if buffer is full. */
141	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
142		return -ENOBUFS;
143
144	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
145	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
146		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
147
148	priv->mask_ids.last_used[mask_id] = ktime_get();
149
150	return 0;
151}
152
153static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
154{
155	struct nfp_flower_priv *priv = app->priv;
156	ktime_t reuse_timeout;
157	struct circ_buf *ring;
158	u8 temp_id, freed_id;
159
160	ring = &priv->mask_ids.mask_id_free_list;
161	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
162	/* Checking for unallocated entries first. */
163	if (priv->mask_ids.init_unallocated > 0) {
164		*mask_id = priv->mask_ids.init_unallocated;
165		priv->mask_ids.init_unallocated--;
166		return 0;
167	}
168
169	/* Checking if buffer is empty. */
170	if (ring->head == ring->tail)
171		goto err_not_found;
172
173	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
174	*mask_id = temp_id;
175
176	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
177				     NFP_FL_MASK_REUSE_TIME_NS);
178
179	if (ktime_before(ktime_get(), reuse_timeout))
180		goto err_not_found;
181
182	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
183	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
184		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
185
186	return 0;
187
188err_not_found:
189	*mask_id = freed_id;
190	return -ENOENT;
191}
192
193static int
194nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
195{
196	struct nfp_flower_priv *priv = app->priv;
197	struct nfp_mask_id_table *mask_entry;
198	unsigned long hash_key;
199	u8 mask_id;
200
201	if (nfp_mask_alloc(app, &mask_id))
202		return -ENOENT;
203
204	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
205	if (!mask_entry) {
206		nfp_release_mask_id(app, mask_id);
207		return -ENOMEM;
208	}
209
210	INIT_HLIST_NODE(&mask_entry->link);
211	mask_entry->mask_id = mask_id;
212	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
213	mask_entry->hash_key = hash_key;
214	mask_entry->ref_cnt = 1;
215	hash_add(priv->mask_table, &mask_entry->link, hash_key);
216
217	return mask_id;
218}
219
220static struct nfp_mask_id_table *
221nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
222{
223	struct nfp_flower_priv *priv = app->priv;
224	struct nfp_mask_id_table *mask_entry;
225	unsigned long hash_key;
226
227	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
228
229	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
230		if (mask_entry->hash_key == hash_key)
231			return mask_entry;
232
233	return NULL;
234}
235
236static int
237nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
238{
239	struct nfp_mask_id_table *mask_entry;
240
241	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
242	if (!mask_entry)
243		return -ENOENT;
244
245	mask_entry->ref_cnt++;
246
247	/* Casting u8 to int for later use. */
248	return mask_entry->mask_id;
249}
250
251static bool
252nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
253		   u8 *meta_flags, u8 *mask_id)
254{
255	int id;
256
257	id = nfp_find_in_mask_table(app, mask_data, mask_len);
258	if (id < 0) {
259		id = nfp_add_mask_table(app, mask_data, mask_len);
260		if (id < 0)
261			return false;
262		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
263	}
264	*mask_id = id;
265
266	return true;
267}
268
269static bool
270nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
271		      u8 *meta_flags, u8 *mask_id)
272{
273	struct nfp_mask_id_table *mask_entry;
274
275	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
276	if (!mask_entry)
277		return false;
278
279	*mask_id = mask_entry->mask_id;
280	mask_entry->ref_cnt--;
281	if (!mask_entry->ref_cnt) {
282		hash_del(&mask_entry->link);
283		nfp_release_mask_id(app, *mask_id);
284		kfree(mask_entry);
285		if (meta_flags)
286			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
287	}
288
289	return true;
290}
291
292int nfp_compile_flow_metadata(struct nfp_app *app,
293			      struct flow_cls_offload *flow,
294			      struct nfp_fl_payload *nfp_flow,
295			      struct net_device *netdev,
296			      struct netlink_ext_ack *extack)
297{
298	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
299	struct nfp_flower_priv *priv = app->priv;
300	struct nfp_fl_payload *check_entry;
301	u8 new_mask_id;
302	u32 stats_cxt;
303	int err;
304
305	err = nfp_get_stats_entry(app, &stats_cxt);
306	if (err) {
307		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
308		return err;
309	}
310
311	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
312	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
313	nfp_flow->ingress_dev = netdev;
314
315	ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
316	if (!ctx_entry) {
317		err = -ENOMEM;
318		goto err_release_stats;
319	}
320
321	ctx_entry->stats_cxt = stats_cxt;
322	ctx_entry->flow = nfp_flow;
323
324	if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
325				   stats_ctx_table_params)) {
326		err = -ENOMEM;
327		goto err_free_ctx_entry;
328	}
329
330	/* Do net allocate a mask-id for pre_tun_rules. These flows are used to
331	 * configure the pre_tun table and are never actually send to the
332	 * firmware as an add-flow message. This causes the mask-id allocation
333	 * on the firmware to get out of sync if allocated here.
334	 */
335	new_mask_id = 0;
336	if (!nfp_flow->pre_tun_rule.dev &&
337	    !nfp_check_mask_add(app, nfp_flow->mask_data,
338				nfp_flow->meta.mask_len,
339				&nfp_flow->meta.flags, &new_mask_id)) {
340		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
341		if (nfp_release_stats_entry(app, stats_cxt)) {
342			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
343			err = -EINVAL;
344			goto err_remove_rhash;
345		}
346		err = -ENOENT;
347		goto err_remove_rhash;
348	}
349
350	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
351	priv->flower_version++;
352
353	/* Update flow payload with mask ids. */
354	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
355	priv->stats[stats_cxt].pkts = 0;
356	priv->stats[stats_cxt].bytes = 0;
357	priv->stats[stats_cxt].used = jiffies;
358
359	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
360	if (check_entry) {
361		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
362		if (nfp_release_stats_entry(app, stats_cxt)) {
363			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
364			err = -EINVAL;
365			goto err_remove_mask;
366		}
367
368		if (!nfp_flow->pre_tun_rule.dev &&
369		    !nfp_check_mask_remove(app, nfp_flow->mask_data,
370					   nfp_flow->meta.mask_len,
371					   NULL, &new_mask_id)) {
372			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
373			err = -EINVAL;
374			goto err_remove_mask;
375		}
376
377		err = -EEXIST;
378		goto err_remove_mask;
379	}
380
381	return 0;
382
383err_remove_mask:
384	if (!nfp_flow->pre_tun_rule.dev)
385		nfp_check_mask_remove(app, nfp_flow->mask_data,
386				      nfp_flow->meta.mask_len,
387				      NULL, &new_mask_id);
388err_remove_rhash:
389	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
390					    &ctx_entry->ht_node,
391					    stats_ctx_table_params));
392err_free_ctx_entry:
393	kfree(ctx_entry);
394err_release_stats:
395	nfp_release_stats_entry(app, stats_cxt);
396
397	return err;
398}
399
400void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
401				struct nfp_fl_payload *nfp_flow)
402{
403	nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
404	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
405	priv->flower_version++;
406}
407
408int nfp_modify_flow_metadata(struct nfp_app *app,
409			     struct nfp_fl_payload *nfp_flow)
410{
411	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
412	struct nfp_flower_priv *priv = app->priv;
413	u8 new_mask_id = 0;
414	u32 temp_ctx_id;
415
416	__nfp_modify_flow_metadata(priv, nfp_flow);
417
418	if (!nfp_flow->pre_tun_rule.dev)
419		nfp_check_mask_remove(app, nfp_flow->mask_data,
420				      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
421				      &new_mask_id);
422
423	/* Update flow payload with mask ids. */
424	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
425
426	/* Release the stats ctx id and ctx to flow table entry. */
427	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
428
429	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
430					   stats_ctx_table_params);
431	if (!ctx_entry)
432		return -ENOENT;
433
434	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
435					    &ctx_entry->ht_node,
436					    stats_ctx_table_params));
437	kfree(ctx_entry);
438
439	return nfp_release_stats_entry(app, temp_ctx_id);
440}
441
442struct nfp_fl_payload *
443nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
444{
445	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
446	struct nfp_flower_priv *priv = app->priv;
447
448	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
449					   stats_ctx_table_params);
450	if (!ctx_entry)
451		return NULL;
452
453	return ctx_entry->flow;
454}
455
456static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
457			    const void *obj)
458{
459	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
460	const struct nfp_fl_payload *flow_entry = obj;
461
462	if (flow_entry->ingress_dev == cmp_arg->netdev)
463		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
464
465	return 1;
466}
467
468static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
469{
470	const struct nfp_fl_payload *flower_entry = data;
471
472	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
473		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
474		      seed);
475}
476
477static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
478{
479	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
480
481	return jhash2((u32 *)&cmp_arg->cookie,
482		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
483}
484
485const struct rhashtable_params nfp_flower_table_params = {
486	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
487	.hashfn			= nfp_fl_key_hashfn,
488	.obj_cmpfn		= nfp_fl_obj_cmpfn,
489	.obj_hashfn		= nfp_fl_obj_hashfn,
490	.automatic_shrinking	= true,
491};
492
493const struct rhashtable_params merge_table_params = {
494	.key_offset	= offsetof(struct nfp_merge_info, parent_ctx),
495	.head_offset	= offsetof(struct nfp_merge_info, ht_node),
496	.key_len	= sizeof(u64),
497};
498
499int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
500			     unsigned int host_num_mems)
501{
502	struct nfp_flower_priv *priv = app->priv;
503	int err, stats_size;
504
505	hash_init(priv->mask_table);
506
507	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
508	if (err)
509		return err;
510
511	err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
512	if (err)
513		goto err_free_flow_table;
514
515	err = rhashtable_init(&priv->merge_table, &merge_table_params);
516	if (err)
517		goto err_free_stats_ctx_table;
518
519	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
520
521	/* Init ring buffer and unallocated mask_ids. */
522	priv->mask_ids.mask_id_free_list.buf =
523		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
524			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
525	if (!priv->mask_ids.mask_id_free_list.buf)
526		goto err_free_merge_table;
527
528	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
529
530	/* Init timestamps for mask id*/
531	priv->mask_ids.last_used =
532		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
533			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
534	if (!priv->mask_ids.last_used)
535		goto err_free_mask_id;
536
537	/* Init ring buffer and unallocated stats_ids. */
538	priv->stats_ids.free_list.buf =
539		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
540				   priv->stats_ring_size));
541	if (!priv->stats_ids.free_list.buf)
542		goto err_free_last_used;
543
544	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
545
546	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
547		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
548	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
549				     GFP_KERNEL);
550	if (!priv->stats)
551		goto err_free_ring_buf;
552
553	spin_lock_init(&priv->stats_lock);
554
555	return 0;
556
557err_free_ring_buf:
558	vfree(priv->stats_ids.free_list.buf);
559err_free_last_used:
560	kfree(priv->mask_ids.last_used);
561err_free_mask_id:
562	kfree(priv->mask_ids.mask_id_free_list.buf);
563err_free_merge_table:
564	rhashtable_destroy(&priv->merge_table);
565err_free_stats_ctx_table:
566	rhashtable_destroy(&priv->stats_ctx_table);
567err_free_flow_table:
568	rhashtable_destroy(&priv->flow_table);
569	return -ENOMEM;
570}
571
572void nfp_flower_metadata_cleanup(struct nfp_app *app)
573{
574	struct nfp_flower_priv *priv = app->priv;
575
576	if (!priv)
577		return;
578
579	rhashtable_free_and_destroy(&priv->flow_table,
580				    nfp_check_rhashtable_empty, NULL);
581	rhashtable_free_and_destroy(&priv->stats_ctx_table,
582				    nfp_check_rhashtable_empty, NULL);
583	rhashtable_free_and_destroy(&priv->merge_table,
584				    nfp_check_rhashtable_empty, NULL);
585	kvfree(priv->stats);
586	kfree(priv->mask_ids.mask_id_free_list.buf);
587	kfree(priv->mask_ids.last_used);
588	vfree(priv->stats_ids.free_list.buf);
589}
590