1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/netdevice.h>
7#include <net/pkt_cls.h>
8#include <net/red.h>
9
10#include "spectrum.h"
11#include "spectrum_span.h"
12#include "reg.h"
13
14#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17
18enum mlxsw_sp_qdisc_type {
19	MLXSW_SP_QDISC_NO_QDISC,
20	MLXSW_SP_QDISC_RED,
21	MLXSW_SP_QDISC_PRIO,
22	MLXSW_SP_QDISC_ETS,
23	MLXSW_SP_QDISC_TBF,
24	MLXSW_SP_QDISC_FIFO,
25};
26
27struct mlxsw_sp_qdisc;
28
29struct mlxsw_sp_qdisc_ops {
30	enum mlxsw_sp_qdisc_type type;
31	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32			    void *params);
33	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
34		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
35	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
36		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
37	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
38			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39			 struct tc_qopt_offload_stats *stats_ptr);
40	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
41			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
42			  void *xstats_ptr);
43	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
44			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
45	/* unoffload - to be used for a qdisc that stops being offloaded without
46	 * being destroyed.
47	 */
48	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
49			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
50	struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
51					     u32 parent);
52	unsigned int num_classes;
53
54	u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
55			      struct mlxsw_sp_qdisc *child);
56	int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
57			      struct mlxsw_sp_qdisc *child);
58};
59
60struct mlxsw_sp_qdisc_ets_band {
61	u8 prio_bitmap;
62	int tclass_num;
63};
64
65struct mlxsw_sp_qdisc_ets_data {
66	struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
67};
68
69struct mlxsw_sp_qdisc {
70	u32 handle;
71	union {
72		struct red_stats red;
73	} xstats_base;
74	struct mlxsw_sp_qdisc_stats {
75		u64 tx_bytes;
76		u64 tx_packets;
77		u64 drops;
78		u64 overlimits;
79		u64 backlog;
80	} stats_base;
81
82	union {
83		struct mlxsw_sp_qdisc_ets_data *ets_data;
84	};
85
86	struct mlxsw_sp_qdisc_ops *ops;
87	struct mlxsw_sp_qdisc *parent;
88	struct mlxsw_sp_qdisc *qdiscs;
89	unsigned int num_classes;
90};
91
92struct mlxsw_sp_qdisc_state {
93	struct mlxsw_sp_qdisc root_qdisc;
94
95	/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
96	 * created first. When notifications for these FIFOs arrive, it is not
97	 * known what qdisc their parent handle refers to. It could be a
98	 * newly-created PRIO that will replace the currently-offloaded one, or
99	 * it could be e.g. a RED that will be attached below it.
100	 *
101	 * As the notifications start to arrive, use them to note what the
102	 * future parent handle is, and keep track of which child FIFOs were
103	 * seen. Then when the parent is known, retroactively offload those
104	 * FIFOs.
105	 */
106	u32 future_handle;
107	bool future_fifos[IEEE_8021QAZ_MAX_TCS];
108	struct mutex lock; /* Protects qdisc state. */
109};
110
111static bool
112mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
113{
114	return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
115}
116
117static struct mlxsw_sp_qdisc *
118mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
119		    struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
120						  void *),
121		    void *data)
122{
123	struct mlxsw_sp_qdisc *tmp;
124	unsigned int i;
125
126	if (pre) {
127		tmp = pre(qdisc, data);
128		if (tmp)
129			return tmp;
130	}
131
132	if (qdisc->ops) {
133		for (i = 0; i < qdisc->num_classes; i++) {
134			tmp = &qdisc->qdiscs[i];
135			if (qdisc->ops) {
136				tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
137				if (tmp)
138					return tmp;
139			}
140		}
141	}
142
143	return NULL;
144}
145
146static struct mlxsw_sp_qdisc *
147mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
148{
149	u32 parent = *(u32 *)data;
150
151	if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
152		if (qdisc->ops->find_class)
153			return qdisc->ops->find_class(qdisc, parent);
154	}
155
156	return NULL;
157}
158
159static struct mlxsw_sp_qdisc *
160mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
161{
162	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
163
164	if (!qdisc_state)
165		return NULL;
166	if (parent == TC_H_ROOT)
167		return &qdisc_state->root_qdisc;
168	return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
169				   mlxsw_sp_qdisc_walk_cb_find, &parent);
170}
171
172static struct mlxsw_sp_qdisc *
173mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
174{
175	u32 handle = *(u32 *)data;
176
177	if (qdisc->ops && qdisc->handle == handle)
178		return qdisc;
179	return NULL;
180}
181
182static struct mlxsw_sp_qdisc *
183mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
184{
185	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
186
187	if (!qdisc_state)
188		return NULL;
189	return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
190				   mlxsw_sp_qdisc_walk_cb_find_by_handle,
191				   &handle);
192}
193
194static void
195mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
196{
197	struct mlxsw_sp_qdisc *tmp;
198
199	for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
200		tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
201}
202
203static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
204					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
205{
206	struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
207
208	if (!parent)
209		return 0xff;
210	if (!parent->ops->get_prio_bitmap)
211		return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
212	return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
213}
214
215#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
216
217static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
218					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
219{
220	struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
221
222	if (!parent)
223		return MLXSW_SP_PORT_DEFAULT_TCLASS;
224	if (!parent->ops->get_tclass_num)
225		return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
226	return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
227}
228
229static int
230mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
231		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
232{
233	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
234	int err_hdroom = 0;
235	int err = 0;
236	int i;
237
238	if (!mlxsw_sp_qdisc)
239		return 0;
240
241	if (root_qdisc == mlxsw_sp_qdisc) {
242		struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
243
244		hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
245		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
246		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
247		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
248		err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
249	}
250
251	if (!mlxsw_sp_qdisc->ops)
252		return 0;
253
254	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
255		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
256				       &mlxsw_sp_qdisc->qdiscs[i]);
257	mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
258	if (mlxsw_sp_qdisc->ops->destroy)
259		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
260						   mlxsw_sp_qdisc);
261	if (mlxsw_sp_qdisc->ops->clean_stats)
262		mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
263
264	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
265	mlxsw_sp_qdisc->ops = NULL;
266	mlxsw_sp_qdisc->num_classes = 0;
267	kfree(mlxsw_sp_qdisc->qdiscs);
268	mlxsw_sp_qdisc->qdiscs = NULL;
269	return err_hdroom ?: err;
270}
271
272struct mlxsw_sp_qdisc_tree_validate {
273	bool forbid_ets;
274	bool forbid_root_tbf;
275	bool forbid_tbf;
276	bool forbid_red;
277};
278
279static int
280__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
281			       struct mlxsw_sp_qdisc_tree_validate validate);
282
283static int
284mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
285				      struct mlxsw_sp_qdisc_tree_validate validate)
286{
287	unsigned int i;
288	int err;
289
290	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
291		err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
292						     validate);
293		if (err)
294			return err;
295	}
296
297	return 0;
298}
299
300static int
301__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
302			       struct mlxsw_sp_qdisc_tree_validate validate)
303{
304	if (!mlxsw_sp_qdisc->ops)
305		return 0;
306
307	switch (mlxsw_sp_qdisc->ops->type) {
308	case MLXSW_SP_QDISC_FIFO:
309		break;
310	case MLXSW_SP_QDISC_RED:
311		if (validate.forbid_red)
312			return -EINVAL;
313		validate.forbid_red = true;
314		validate.forbid_root_tbf = true;
315		validate.forbid_ets = true;
316		break;
317	case MLXSW_SP_QDISC_TBF:
318		if (validate.forbid_root_tbf) {
319			if (validate.forbid_tbf)
320				return -EINVAL;
321			/* This is a TC TBF. */
322			validate.forbid_tbf = true;
323			validate.forbid_ets = true;
324		} else {
325			/* This is root TBF. */
326			validate.forbid_root_tbf = true;
327		}
328		break;
329	case MLXSW_SP_QDISC_PRIO:
330	case MLXSW_SP_QDISC_ETS:
331		if (validate.forbid_ets)
332			return -EINVAL;
333		validate.forbid_root_tbf = true;
334		validate.forbid_ets = true;
335		break;
336	default:
337		WARN_ON(1);
338		return -EINVAL;
339	}
340
341	return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
342}
343
344static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
345{
346	struct mlxsw_sp_qdisc_tree_validate validate = {};
347	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
348
349	mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
350	return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
351}
352
353static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
354				 u32 handle,
355				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
356				 struct mlxsw_sp_qdisc_ops *ops, void *params)
357{
358	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
359	struct mlxsw_sp_hdroom orig_hdroom;
360	unsigned int i;
361	int err;
362
363	err = ops->check_params(mlxsw_sp_port, params);
364	if (err)
365		return err;
366
367	if (ops->num_classes) {
368		mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
369						 sizeof(*mlxsw_sp_qdisc->qdiscs),
370						 GFP_KERNEL);
371		if (!mlxsw_sp_qdisc->qdiscs)
372			return -ENOMEM;
373
374		for (i = 0; i < ops->num_classes; i++)
375			mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
376	}
377
378	orig_hdroom = *mlxsw_sp_port->hdroom;
379	if (root_qdisc == mlxsw_sp_qdisc) {
380		struct mlxsw_sp_hdroom hdroom = orig_hdroom;
381
382		hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
383		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
384		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
385		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
386
387		err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
388		if (err)
389			goto err_hdroom_configure;
390	}
391
392	mlxsw_sp_qdisc->num_classes = ops->num_classes;
393	mlxsw_sp_qdisc->ops = ops;
394	mlxsw_sp_qdisc->handle = handle;
395	err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
396	if (err)
397		goto err_replace;
398
399	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
400	if (err)
401		goto err_replace;
402
403	return 0;
404
405err_replace:
406	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
407	mlxsw_sp_qdisc->ops = NULL;
408	mlxsw_sp_qdisc->num_classes = 0;
409	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
410err_hdroom_configure:
411	kfree(mlxsw_sp_qdisc->qdiscs);
412	mlxsw_sp_qdisc->qdiscs = NULL;
413	return err;
414}
415
416static int
417mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
418		      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
419{
420	struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
421	int err;
422
423	err = ops->check_params(mlxsw_sp_port, params);
424	if (err)
425		goto unoffload;
426
427	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
428	if (err)
429		goto unoffload;
430
431	/* Check if the Qdisc changed. That includes a situation where an
432	 * invisible Qdisc replaces another one, or is being added for the
433	 * first time.
434	 */
435	if (mlxsw_sp_qdisc->handle != handle) {
436		if (ops->clean_stats)
437			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
438	}
439
440	mlxsw_sp_qdisc->handle = handle;
441	return 0;
442
443unoffload:
444	if (ops->unoffload)
445		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
446
447	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
448	return err;
449}
450
451static int
452mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
453		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
454		       struct mlxsw_sp_qdisc_ops *ops, void *params)
455{
456	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
457		/* In case this location contained a different qdisc of the
458		 * same type we can override the old qdisc configuration.
459		 * Otherwise, we need to remove the old qdisc before setting the
460		 * new one.
461		 */
462		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
463
464	if (!mlxsw_sp_qdisc->ops)
465		return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
466					     mlxsw_sp_qdisc, ops, params);
467	else
468		return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
469					     mlxsw_sp_qdisc, params);
470}
471
472static int
473mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
474			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
475			 struct tc_qopt_offload_stats *stats_ptr)
476{
477	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
478	    mlxsw_sp_qdisc->ops->get_stats)
479		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
480						      mlxsw_sp_qdisc,
481						      stats_ptr);
482
483	return -EOPNOTSUPP;
484}
485
486static int
487mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
488			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
489			  void *xstats_ptr)
490{
491	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
492	    mlxsw_sp_qdisc->ops->get_xstats)
493		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
494						      mlxsw_sp_qdisc,
495						      xstats_ptr);
496
497	return -EOPNOTSUPP;
498}
499
500static u64
501mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
502{
503	return xstats->backlog[tclass_num] +
504	       xstats->backlog[tclass_num + 8];
505}
506
507static u64
508mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
509{
510	return xstats->tail_drop[tclass_num] +
511	       xstats->tail_drop[tclass_num + 8];
512}
513
514static void
515mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
516				       u8 prio_bitmap, u64 *tx_packets,
517				       u64 *tx_bytes)
518{
519	int i;
520
521	*tx_packets = 0;
522	*tx_bytes = 0;
523	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
524		if (prio_bitmap & BIT(i)) {
525			*tx_packets += xstats->tx_packets[i];
526			*tx_bytes += xstats->tx_bytes[i];
527		}
528	}
529}
530
531static void
532mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
533				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
534				u64 *p_tx_bytes, u64 *p_tx_packets,
535				u64 *p_drops, u64 *p_backlog)
536{
537	struct mlxsw_sp_port_xstats *xstats;
538	u64 tx_bytes, tx_packets;
539	u8 prio_bitmap;
540	int tclass_num;
541
542	prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
543						     mlxsw_sp_qdisc);
544	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
545						   mlxsw_sp_qdisc);
546	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
547	mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
548					       &tx_packets, &tx_bytes);
549
550	*p_tx_packets += tx_packets;
551	*p_tx_bytes += tx_bytes;
552	*p_drops += xstats->wred_drop[tclass_num] +
553		    mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
554	*p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
555}
556
557static void
558mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
559			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
560			    u64 tx_bytes, u64 tx_packets,
561			    u64 drops, u64 backlog,
562			    struct tc_qopt_offload_stats *stats_ptr)
563{
564	struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
565
566	tx_bytes -= stats_base->tx_bytes;
567	tx_packets -= stats_base->tx_packets;
568	drops -= stats_base->drops;
569	backlog -= stats_base->backlog;
570
571	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
572	stats_ptr->qstats->drops += drops;
573	stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
574
575	stats_base->backlog += backlog;
576	stats_base->drops += drops;
577	stats_base->tx_bytes += tx_bytes;
578	stats_base->tx_packets += tx_packets;
579}
580
581static void
582mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
583			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
584			    struct tc_qopt_offload_stats *stats_ptr)
585{
586	u64 tx_packets = 0;
587	u64 tx_bytes = 0;
588	u64 backlog = 0;
589	u64 drops = 0;
590
591	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
592					&tx_bytes, &tx_packets,
593					&drops, &backlog);
594	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
595				    tx_bytes, tx_packets, drops, backlog,
596				    stats_ptr);
597}
598
599static int
600mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
601				  int tclass_num, u32 min, u32 max,
602				  u32 probability, bool is_wred, bool is_ecn)
603{
604	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
605	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
606	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
607	int err;
608
609	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
610	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
611				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
612				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
613				    probability);
614
615	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
616	if (err)
617		return err;
618
619	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
620			     MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
621
622	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
623}
624
625static int
626mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
627				   int tclass_num)
628{
629	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
630	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
631
632	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
633			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
634	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
635}
636
637static void
638mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
639					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
640{
641	struct mlxsw_sp_qdisc_stats *stats_base;
642	struct mlxsw_sp_port_xstats *xstats;
643	struct red_stats *red_base;
644	u8 prio_bitmap;
645	int tclass_num;
646
647	prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
648						     mlxsw_sp_qdisc);
649	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
650						   mlxsw_sp_qdisc);
651	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
652	stats_base = &mlxsw_sp_qdisc->stats_base;
653	red_base = &mlxsw_sp_qdisc->xstats_base.red;
654
655	mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
656					       &stats_base->tx_packets,
657					       &stats_base->tx_bytes);
658	red_base->prob_mark = xstats->tc_ecn[tclass_num];
659	red_base->prob_drop = xstats->wred_drop[tclass_num];
660	red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
661
662	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
663	stats_base->drops = red_base->prob_drop + red_base->pdrop;
664
665	stats_base->backlog = 0;
666}
667
668static int
669mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
670			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
671{
672	int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
673						       mlxsw_sp_qdisc);
674
675	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
676}
677
678static int
679mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
680				void *params)
681{
682	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
683	struct tc_red_qopt_offload_params *p = params;
684
685	if (p->min > p->max) {
686		dev_err(mlxsw_sp->bus_info->dev,
687			"spectrum: RED: min %u is bigger then max %u\n", p->min,
688			p->max);
689		return -EINVAL;
690	}
691	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
692					GUARANTEED_SHARED_BUFFER)) {
693		dev_err(mlxsw_sp->bus_info->dev,
694			"spectrum: RED: max value %u is too big\n", p->max);
695		return -EINVAL;
696	}
697	if (p->min == 0 || p->max == 0) {
698		dev_err(mlxsw_sp->bus_info->dev,
699			"spectrum: RED: 0 value is illegal for min and max\n");
700		return -EINVAL;
701	}
702	return 0;
703}
704
705static int
706mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
707				   u32 handle, unsigned int band,
708				   struct mlxsw_sp_qdisc *child_qdisc);
709static void
710mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
711				 u32 handle);
712
713static int
714mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
715			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
716			   void *params)
717{
718	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
719	struct tc_red_qopt_offload_params *p = params;
720	int tclass_num;
721	u32 min, max;
722	u64 prob;
723	int err;
724
725	err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
726						 &mlxsw_sp_qdisc->qdiscs[0]);
727	if (err)
728		return err;
729	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
730
731	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
732						   mlxsw_sp_qdisc);
733
734	/* calculate probability in percentage */
735	prob = p->probability;
736	prob *= 100;
737	prob = DIV_ROUND_UP(prob, 1 << 16);
738	prob = DIV_ROUND_UP(prob, 1 << 16);
739	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
740	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
741	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
742						 min, max, prob,
743						 !p->is_nodrop, p->is_ecn);
744}
745
746static void
747mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
748			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
749			      struct gnet_stats_queue *qstats)
750{
751	u64 backlog;
752
753	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
754				       mlxsw_sp_qdisc->stats_base.backlog);
755	qstats->backlog -= backlog;
756	mlxsw_sp_qdisc->stats_base.backlog = 0;
757}
758
759static void
760mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
761			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
762			     void *params)
763{
764	struct tc_red_qopt_offload_params *p = params;
765
766	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
767}
768
769static int
770mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
771			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
772			      void *xstats_ptr)
773{
774	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
775	struct mlxsw_sp_port_xstats *xstats;
776	struct red_stats *res = xstats_ptr;
777	int early_drops, marks, pdrops;
778	int tclass_num;
779
780	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
781						   mlxsw_sp_qdisc);
782	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
783
784	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
785	marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
786	pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
787		 xstats_base->pdrop;
788
789	res->pdrop += pdrops;
790	res->prob_drop += early_drops;
791	res->prob_mark += marks;
792
793	xstats_base->pdrop += pdrops;
794	xstats_base->prob_drop += early_drops;
795	xstats_base->prob_mark += marks;
796	return 0;
797}
798
799static int
800mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
801			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
802			     struct tc_qopt_offload_stats *stats_ptr)
803{
804	struct mlxsw_sp_qdisc_stats *stats_base;
805	struct mlxsw_sp_port_xstats *xstats;
806	u64 overlimits;
807	int tclass_num;
808
809	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
810						   mlxsw_sp_qdisc);
811	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
812	stats_base = &mlxsw_sp_qdisc->stats_base;
813
814	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
815	overlimits = xstats->wred_drop[tclass_num] +
816		     xstats->tc_ecn[tclass_num] - stats_base->overlimits;
817
818	stats_ptr->qstats->overlimits += overlimits;
819	stats_base->overlimits += overlimits;
820
821	return 0;
822}
823
824static struct mlxsw_sp_qdisc *
825mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
826			       u32 parent)
827{
828	/* RED and TBF are formally classful qdiscs, but all class references,
829	 * including X:0, just refer to the same one class.
830	 */
831	return &mlxsw_sp_qdisc->qdiscs[0];
832}
833
834static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
835	.type = MLXSW_SP_QDISC_RED,
836	.check_params = mlxsw_sp_qdisc_red_check_params,
837	.replace = mlxsw_sp_qdisc_red_replace,
838	.unoffload = mlxsw_sp_qdisc_red_unoffload,
839	.destroy = mlxsw_sp_qdisc_red_destroy,
840	.get_stats = mlxsw_sp_qdisc_get_red_stats,
841	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
842	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
843	.find_class = mlxsw_sp_qdisc_leaf_find_class,
844	.num_classes = 1,
845};
846
847static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
848				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
849				u8 band, u32 child_handle);
850
851static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
852				   struct tc_red_qopt_offload *p)
853{
854	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
855
856	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
857	if (!mlxsw_sp_qdisc)
858		return -EOPNOTSUPP;
859
860	if (p->command == TC_RED_REPLACE)
861		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
862					      mlxsw_sp_qdisc,
863					      &mlxsw_sp_qdisc_ops_red,
864					      &p->set);
865
866	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
867		return -EOPNOTSUPP;
868
869	switch (p->command) {
870	case TC_RED_DESTROY:
871		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
872	case TC_RED_XSTATS:
873		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
874						 p->xstats);
875	case TC_RED_STATS:
876		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
877						&p->stats);
878	case TC_RED_GRAFT:
879		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
880					    p->child_handle);
881	default:
882		return -EOPNOTSUPP;
883	}
884}
885
886int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
887			  struct tc_red_qopt_offload *p)
888{
889	int err;
890
891	mutex_lock(&mlxsw_sp_port->qdisc->lock);
892	err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
893	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
894
895	return err;
896}
897
898static void
899mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
900					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
901{
902	u64 backlog_cells = 0;
903	u64 tx_packets = 0;
904	u64 tx_bytes = 0;
905	u64 drops = 0;
906
907	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
908					&tx_bytes, &tx_packets,
909					&drops, &backlog_cells);
910
911	mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
912	mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
913	mlxsw_sp_qdisc->stats_base.drops = drops;
914	mlxsw_sp_qdisc->stats_base.backlog = 0;
915}
916
917static enum mlxsw_reg_qeec_hr
918mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
919		      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
920{
921	if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
922		return MLXSW_REG_QEEC_HR_PORT;
923
924	/* Configure subgroup shaper, so that both UC and MC traffic is subject
925	 * to shaping. That is unlike RED, however UC queue lengths are going to
926	 * be different than MC ones due to different pool and quota
927	 * configurations, so the configuration is not applicable. For shaper on
928	 * the other hand, subjecting the overall stream to the configured
929	 * shaper makes sense. Also note that that is what we do for
930	 * ieee_setmaxrate().
931	 */
932	return MLXSW_REG_QEEC_HR_SUBGROUP;
933}
934
935static int
936mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
937			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
938{
939	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
940							  mlxsw_sp_qdisc);
941	int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
942						       mlxsw_sp_qdisc);
943
944	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
945					     MLXSW_REG_QEEC_MAS_DIS, 0);
946}
947
948static int
949mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
950		      u32 max_size, u8 *p_burst_size)
951{
952	/* TBF burst size is configured in bytes. The ASIC burst size value is
953	 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
954	 */
955	u32 bs512 = max_size / 64;
956	u8 bs = fls(bs512);
957
958	if (!bs)
959		return -EINVAL;
960	--bs;
961
962	/* Demand a power of two. */
963	if ((1 << bs) != bs512)
964		return -EINVAL;
965
966	if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
967	    bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
968		return -EINVAL;
969
970	*p_burst_size = bs;
971	return 0;
972}
973
974static u32
975mlxsw_sp_qdisc_tbf_max_size(u8 bs)
976{
977	return (1U << bs) * 64;
978}
979
980static u64
981mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
982{
983	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
984	 * Kbits/s.
985	 */
986	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
987}
988
989static int
990mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
991				void *params)
992{
993	struct tc_tbf_qopt_offload_replace_params *p = params;
994	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
995	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
996	u8 burst_size;
997	int err;
998
999	if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
1000		dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
1001			"spectrum: TBF: rate of %lluKbps must be below %u\n",
1002			rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
1003		return -EINVAL;
1004	}
1005
1006	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1007	if (err) {
1008		u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
1009
1010		dev_err(mlxsw_sp->bus_info->dev,
1011			"spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
1012			p->max_size,
1013			mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
1014			mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
1015		return -EINVAL;
1016	}
1017
1018	return 0;
1019}
1020
1021static int
1022mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1023			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1024			   void *params)
1025{
1026	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
1027							  mlxsw_sp_qdisc);
1028	struct tc_tbf_qopt_offload_replace_params *p = params;
1029	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
1030	int tclass_num;
1031	u8 burst_size;
1032	int err;
1033
1034	err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
1035						 &mlxsw_sp_qdisc->qdiscs[0]);
1036	if (err)
1037		return err;
1038	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1039
1040	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
1041						   mlxsw_sp_qdisc);
1042
1043	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1044	if (WARN_ON_ONCE(err))
1045		/* check_params above was supposed to reject this value. */
1046		return -EINVAL;
1047
1048	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
1049					     rate_kbps, burst_size);
1050}
1051
1052static void
1053mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1054			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1055			     void *params)
1056{
1057	struct tc_tbf_qopt_offload_replace_params *p = params;
1058
1059	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
1060}
1061
1062static int
1063mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1064			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1065			     struct tc_qopt_offload_stats *stats_ptr)
1066{
1067	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1068				    stats_ptr);
1069	return 0;
1070}
1071
1072static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
1073	.type = MLXSW_SP_QDISC_TBF,
1074	.check_params = mlxsw_sp_qdisc_tbf_check_params,
1075	.replace = mlxsw_sp_qdisc_tbf_replace,
1076	.unoffload = mlxsw_sp_qdisc_tbf_unoffload,
1077	.destroy = mlxsw_sp_qdisc_tbf_destroy,
1078	.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
1079	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1080	.find_class = mlxsw_sp_qdisc_leaf_find_class,
1081	.num_classes = 1,
1082};
1083
1084static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1085				   struct tc_tbf_qopt_offload *p)
1086{
1087	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1088
1089	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1090	if (!mlxsw_sp_qdisc)
1091		return -EOPNOTSUPP;
1092
1093	if (p->command == TC_TBF_REPLACE)
1094		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1095					      mlxsw_sp_qdisc,
1096					      &mlxsw_sp_qdisc_ops_tbf,
1097					      &p->replace_params);
1098
1099	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1100		return -EOPNOTSUPP;
1101
1102	switch (p->command) {
1103	case TC_TBF_DESTROY:
1104		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1105	case TC_TBF_STATS:
1106		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1107						&p->stats);
1108	case TC_TBF_GRAFT:
1109		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
1110					    p->child_handle);
1111	default:
1112		return -EOPNOTSUPP;
1113	}
1114}
1115
1116int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1117			  struct tc_tbf_qopt_offload *p)
1118{
1119	int err;
1120
1121	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1122	err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
1123	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1124
1125	return err;
1126}
1127
1128static int
1129mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1130				 void *params)
1131{
1132	return 0;
1133}
1134
1135static int
1136mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138			    void *params)
1139{
1140	return 0;
1141}
1142
1143static int
1144mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1145			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1146			      struct tc_qopt_offload_stats *stats_ptr)
1147{
1148	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1149				    stats_ptr);
1150	return 0;
1151}
1152
1153static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
1154	.type = MLXSW_SP_QDISC_FIFO,
1155	.check_params = mlxsw_sp_qdisc_fifo_check_params,
1156	.replace = mlxsw_sp_qdisc_fifo_replace,
1157	.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
1158	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1159};
1160
1161static int
1162mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1163				   u32 handle, unsigned int band,
1164				   struct mlxsw_sp_qdisc *child_qdisc)
1165{
1166	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1167
1168	if (handle == qdisc_state->future_handle &&
1169	    qdisc_state->future_fifos[band])
1170		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1171					      child_qdisc,
1172					      &mlxsw_sp_qdisc_ops_fifo,
1173					      NULL);
1174	return 0;
1175}
1176
1177static void
1178mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
1179				 u32 handle)
1180{
1181	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1182
1183	qdisc_state->future_handle = handle;
1184	memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1185}
1186
1187static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1188				    struct tc_fifo_qopt_offload *p)
1189{
1190	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1191	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1192	unsigned int band;
1193	u32 parent_handle;
1194
1195	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1196	if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
1197		parent_handle = TC_H_MAJ(p->parent);
1198		if (parent_handle != qdisc_state->future_handle) {
1199			/* This notifications is for a different Qdisc than
1200			 * previously. Wipe the future cache.
1201			 */
1202			mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
1203							 parent_handle);
1204		}
1205
1206		band = TC_H_MIN(p->parent) - 1;
1207		if (band < IEEE_8021QAZ_MAX_TCS) {
1208			if (p->command == TC_FIFO_REPLACE)
1209				qdisc_state->future_fifos[band] = true;
1210			else if (p->command == TC_FIFO_DESTROY)
1211				qdisc_state->future_fifos[band] = false;
1212		}
1213	}
1214	if (!mlxsw_sp_qdisc)
1215		return -EOPNOTSUPP;
1216
1217	if (p->command == TC_FIFO_REPLACE) {
1218		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1219					      mlxsw_sp_qdisc,
1220					      &mlxsw_sp_qdisc_ops_fifo, NULL);
1221	}
1222
1223	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1224		return -EOPNOTSUPP;
1225
1226	switch (p->command) {
1227	case TC_FIFO_DESTROY:
1228		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1229	case TC_FIFO_STATS:
1230		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1231						&p->stats);
1232	case TC_FIFO_REPLACE: /* Handled above. */
1233		break;
1234	}
1235
1236	return -EOPNOTSUPP;
1237}
1238
1239int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1240			   struct tc_fifo_qopt_offload *p)
1241{
1242	int err;
1243
1244	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1245	err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1246	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1247
1248	return err;
1249}
1250
1251static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1252					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1253{
1254	int i;
1255
1256	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1257		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1258					  MLXSW_SP_PORT_DEFAULT_TCLASS);
1259		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1260				      MLXSW_REG_QEEC_HR_SUBGROUP,
1261				      i, 0, false, 0);
1262	}
1263
1264	kfree(mlxsw_sp_qdisc->ets_data);
1265	mlxsw_sp_qdisc->ets_data = NULL;
1266	return 0;
1267}
1268
1269static int
1270mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1271			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1272{
1273	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1274}
1275
1276static int
1277__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1278{
1279	if (nbands > IEEE_8021QAZ_MAX_TCS)
1280		return -EOPNOTSUPP;
1281
1282	return 0;
1283}
1284
1285static int
1286mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1287				 void *params)
1288{
1289	struct tc_prio_qopt_offload_params *p = params;
1290
1291	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1292}
1293
1294static struct mlxsw_sp_qdisc *
1295mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1296				   void *mlxsw_sp_port)
1297{
1298	u64 backlog;
1299
1300	if (mlxsw_sp_qdisc->ops) {
1301		backlog = mlxsw_sp_qdisc->stats_base.backlog;
1302		if (mlxsw_sp_qdisc->ops->clean_stats)
1303			mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
1304							 mlxsw_sp_qdisc);
1305		mlxsw_sp_qdisc->stats_base.backlog = backlog;
1306	}
1307
1308	return NULL;
1309}
1310
1311static void
1312mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1313				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1314{
1315	mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
1316			    mlxsw_sp_port);
1317}
1318
1319static int
1320__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1321			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1322			     u32 handle, unsigned int nbands,
1323			     const unsigned int *quanta,
1324			     const unsigned int *weights,
1325			     const u8 *priomap)
1326{
1327	struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
1328	struct mlxsw_sp_qdisc_ets_band *ets_band;
1329	struct mlxsw_sp_qdisc *child_qdisc;
1330	u8 old_priomap, new_priomap;
1331	int i, band;
1332	int err;
1333
1334	if (!ets_data) {
1335		ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
1336		if (!ets_data)
1337			return -ENOMEM;
1338		mlxsw_sp_qdisc->ets_data = ets_data;
1339
1340		for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
1341			int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1342
1343			ets_band = &ets_data->bands[band];
1344			ets_band->tclass_num = tclass_num;
1345		}
1346	}
1347
1348	for (band = 0; band < nbands; band++) {
1349		int tclass_num;
1350
1351		child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1352		ets_band = &ets_data->bands[band];
1353
1354		tclass_num = ets_band->tclass_num;
1355		old_priomap = ets_band->prio_bitmap;
1356		new_priomap = 0;
1357
1358		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1359					    MLXSW_REG_QEEC_HR_SUBGROUP,
1360					    tclass_num, 0, !!quanta[band],
1361					    weights[band]);
1362		if (err)
1363			return err;
1364
1365		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1366			if (priomap[i] == band) {
1367				new_priomap |= BIT(i);
1368				if (BIT(i) & old_priomap)
1369					continue;
1370				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1371								i, tclass_num);
1372				if (err)
1373					return err;
1374			}
1375		}
1376
1377		ets_band->prio_bitmap = new_priomap;
1378
1379		if (old_priomap != new_priomap)
1380			mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
1381							child_qdisc);
1382
1383		err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
1384							 band, child_qdisc);
1385		if (err)
1386			return err;
1387	}
1388	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1389		ets_band = &ets_data->bands[band];
1390		ets_band->prio_bitmap = 0;
1391
1392		child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1393		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1394
1395		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1396				      MLXSW_REG_QEEC_HR_SUBGROUP,
1397				      ets_band->tclass_num, 0, false, 0);
1398	}
1399
1400	mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1401	return 0;
1402}
1403
1404static int
1405mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1406			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1407			    void *params)
1408{
1409	struct tc_prio_qopt_offload_params *p = params;
1410	unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1411
1412	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1413					    handle, p->bands, zeroes,
1414					    zeroes, p->priomap);
1415}
1416
1417static void
1418__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1419			       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1420			       struct gnet_stats_queue *qstats)
1421{
1422	u64 backlog;
1423
1424	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1425				       mlxsw_sp_qdisc->stats_base.backlog);
1426	qstats->backlog -= backlog;
1427}
1428
1429static void
1430mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1431			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1432			      void *params)
1433{
1434	struct tc_prio_qopt_offload_params *p = params;
1435
1436	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1437				       p->qstats);
1438}
1439
1440static int
1441mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1442			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1443			      struct tc_qopt_offload_stats *stats_ptr)
1444{
1445	struct mlxsw_sp_qdisc *tc_qdisc;
1446	u64 tx_packets = 0;
1447	u64 tx_bytes = 0;
1448	u64 backlog = 0;
1449	u64 drops = 0;
1450	int i;
1451
1452	for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1453		tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1454		mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1455						&tx_bytes, &tx_packets,
1456						&drops, &backlog);
1457	}
1458
1459	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1460				    tx_bytes, tx_packets, drops, backlog,
1461				    stats_ptr);
1462	return 0;
1463}
1464
1465static void
1466mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1467					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1468{
1469	struct mlxsw_sp_qdisc_stats *stats_base;
1470	struct mlxsw_sp_port_xstats *xstats;
1471	struct rtnl_link_stats64 *stats;
1472	int i;
1473
1474	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1475	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1476	stats_base = &mlxsw_sp_qdisc->stats_base;
1477
1478	stats_base->tx_packets = stats->tx_packets;
1479	stats_base->tx_bytes = stats->tx_bytes;
1480
1481	stats_base->drops = 0;
1482	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1483		stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1484		stats_base->drops += xstats->wred_drop[i];
1485	}
1486
1487	mlxsw_sp_qdisc->stats_base.backlog = 0;
1488}
1489
1490static struct mlxsw_sp_qdisc *
1491mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1492			       u32 parent)
1493{
1494	int child_index = TC_H_MIN(parent);
1495	int band = child_index - 1;
1496
1497	if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1498		return NULL;
1499	return &mlxsw_sp_qdisc->qdiscs[band];
1500}
1501
1502static struct mlxsw_sp_qdisc_ets_band *
1503mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1504			    struct mlxsw_sp_qdisc *child)
1505{
1506	unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
1507
1508	if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
1509		band = 0;
1510	return &mlxsw_sp_qdisc->ets_data->bands[band];
1511}
1512
1513static u8
1514mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1515				   struct mlxsw_sp_qdisc *child)
1516{
1517	return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
1518}
1519
1520static int
1521mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1522				  struct mlxsw_sp_qdisc *child)
1523{
1524	return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
1525}
1526
1527static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1528	.type = MLXSW_SP_QDISC_PRIO,
1529	.check_params = mlxsw_sp_qdisc_prio_check_params,
1530	.replace = mlxsw_sp_qdisc_prio_replace,
1531	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
1532	.destroy = mlxsw_sp_qdisc_prio_destroy,
1533	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1534	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1535	.find_class = mlxsw_sp_qdisc_prio_find_class,
1536	.num_classes = IEEE_8021QAZ_MAX_TCS,
1537	.get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1538	.get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1539};
1540
1541static int
1542mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1543				void *params)
1544{
1545	struct tc_ets_qopt_offload_replace_params *p = params;
1546
1547	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1548}
1549
1550static int
1551mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1552			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1553			   void *params)
1554{
1555	struct tc_ets_qopt_offload_replace_params *p = params;
1556
1557	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1558					    handle, p->bands, p->quanta,
1559					    p->weights, p->priomap);
1560}
1561
1562static void
1563mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1564			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1565			     void *params)
1566{
1567	struct tc_ets_qopt_offload_replace_params *p = params;
1568
1569	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1570				       p->qstats);
1571}
1572
1573static int
1574mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1575			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1576{
1577	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1578}
1579
1580static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1581	.type = MLXSW_SP_QDISC_ETS,
1582	.check_params = mlxsw_sp_qdisc_ets_check_params,
1583	.replace = mlxsw_sp_qdisc_ets_replace,
1584	.unoffload = mlxsw_sp_qdisc_ets_unoffload,
1585	.destroy = mlxsw_sp_qdisc_ets_destroy,
1586	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1587	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1588	.find_class = mlxsw_sp_qdisc_prio_find_class,
1589	.num_classes = IEEE_8021QAZ_MAX_TCS,
1590	.get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1591	.get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1592};
1593
1594/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1595 * graph is free of cycles). These operations do not change the parent handle
1596 * though, which means it can be incomplete (if there is more than one class
1597 * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1598 * linked to a different class and then removed from the original class).
1599 *
1600 * E.g. consider this sequence of operations:
1601 *
1602 *  # tc qdisc add dev swp1 root handle 1: prio
1603 *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1604 *  RED: set bandwidth to 10Mbit
1605 *  # tc qdisc link dev swp1 handle 13: parent 1:2
1606 *
1607 * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1608 * child. But RED will still only claim that 1:3 is its parent. If it's removed
1609 * from that band, its only parent will be 1:2, but it will continue to claim
1610 * that it is in fact 1:3.
1611 *
1612 * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1613 * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1614 * notification to offload the child Qdisc, based on its parent handle, and use
1615 * the graft operation to validate that the class where the child is actually
1616 * grafted corresponds to the parent handle. If the two don't match, we
1617 * unoffload the child.
1618 */
1619static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1620				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1621				u8 band, u32 child_handle)
1622{
1623	struct mlxsw_sp_qdisc *old_qdisc;
1624	u32 parent;
1625
1626	if (band < mlxsw_sp_qdisc->num_classes &&
1627	    mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1628		return 0;
1629
1630	if (!child_handle) {
1631		/* This is an invisible FIFO replacing the original Qdisc.
1632		 * Ignore it--the original Qdisc's destroy will follow.
1633		 */
1634		return 0;
1635	}
1636
1637	/* See if the grafted qdisc is already offloaded on any tclass. If so,
1638	 * unoffload it.
1639	 */
1640	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1641						  child_handle);
1642	if (old_qdisc)
1643		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1644
1645	parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
1646	mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
1647							 parent);
1648	if (!WARN_ON(!mlxsw_sp_qdisc))
1649		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1650
1651	return -EOPNOTSUPP;
1652}
1653
1654static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1655				    struct tc_prio_qopt_offload *p)
1656{
1657	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1658
1659	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1660	if (!mlxsw_sp_qdisc)
1661		return -EOPNOTSUPP;
1662
1663	if (p->command == TC_PRIO_REPLACE)
1664		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1665					      mlxsw_sp_qdisc,
1666					      &mlxsw_sp_qdisc_ops_prio,
1667					      &p->replace_params);
1668
1669	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1670		return -EOPNOTSUPP;
1671
1672	switch (p->command) {
1673	case TC_PRIO_DESTROY:
1674		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1675	case TC_PRIO_STATS:
1676		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1677						&p->stats);
1678	case TC_PRIO_GRAFT:
1679		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1680					    p->graft_params.band,
1681					    p->graft_params.child_handle);
1682	default:
1683		return -EOPNOTSUPP;
1684	}
1685}
1686
1687int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1688			   struct tc_prio_qopt_offload *p)
1689{
1690	int err;
1691
1692	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1693	err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1694	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1695
1696	return err;
1697}
1698
1699static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1700				   struct tc_ets_qopt_offload *p)
1701{
1702	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1703
1704	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1705	if (!mlxsw_sp_qdisc)
1706		return -EOPNOTSUPP;
1707
1708	if (p->command == TC_ETS_REPLACE)
1709		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1710					      mlxsw_sp_qdisc,
1711					      &mlxsw_sp_qdisc_ops_ets,
1712					      &p->replace_params);
1713
1714	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1715		return -EOPNOTSUPP;
1716
1717	switch (p->command) {
1718	case TC_ETS_DESTROY:
1719		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1720	case TC_ETS_STATS:
1721		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1722						&p->stats);
1723	case TC_ETS_GRAFT:
1724		return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1725					    p->graft_params.band,
1726					    p->graft_params.child_handle);
1727	default:
1728		return -EOPNOTSUPP;
1729	}
1730}
1731
1732int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1733			  struct tc_ets_qopt_offload *p)
1734{
1735	int err;
1736
1737	mutex_lock(&mlxsw_sp_port->qdisc->lock);
1738	err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1739	mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1740
1741	return err;
1742}
1743
1744struct mlxsw_sp_qevent_block {
1745	struct list_head binding_list;
1746	struct list_head mall_entry_list;
1747	struct mlxsw_sp *mlxsw_sp;
1748};
1749
1750struct mlxsw_sp_qevent_binding {
1751	struct list_head list;
1752	struct mlxsw_sp_port *mlxsw_sp_port;
1753	u32 handle;
1754	int tclass_num;
1755	enum mlxsw_sp_span_trigger span_trigger;
1756	unsigned int action_mask;
1757};
1758
1759static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1760
1761static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1762					  struct mlxsw_sp_mall_entry *mall_entry,
1763					  struct mlxsw_sp_qevent_binding *qevent_binding,
1764					  const struct mlxsw_sp_span_agent_parms *agent_parms,
1765					  int *p_span_id)
1766{
1767	enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1768	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1769	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1770	bool ingress;
1771	int span_id;
1772	int err;
1773
1774	err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1775	if (err)
1776		return err;
1777
1778	ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1779	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
1780	if (err)
1781		goto err_analyzed_port_get;
1782
1783	trigger_parms.span_id = span_id;
1784	trigger_parms.probability_rate = 1;
1785	err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1786				       &trigger_parms);
1787	if (err)
1788		goto err_agent_bind;
1789
1790	err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
1791					   qevent_binding->tclass_num);
1792	if (err)
1793		goto err_trigger_enable;
1794
1795	*p_span_id = span_id;
1796	return 0;
1797
1798err_trigger_enable:
1799	mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1800				   &trigger_parms);
1801err_agent_bind:
1802	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1803err_analyzed_port_get:
1804	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1805	return err;
1806}
1807
1808static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1809					     struct mlxsw_sp_qevent_binding *qevent_binding,
1810					     int span_id)
1811{
1812	enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1813	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1814	struct mlxsw_sp_span_trigger_parms trigger_parms = {
1815		.span_id = span_id,
1816	};
1817	bool ingress;
1818
1819	ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1820
1821	mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
1822				      qevent_binding->tclass_num);
1823	mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1824				   &trigger_parms);
1825	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1826	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1827}
1828
1829static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1830					    struct mlxsw_sp_mall_entry *mall_entry,
1831					    struct mlxsw_sp_qevent_binding *qevent_binding)
1832{
1833	struct mlxsw_sp_span_agent_parms agent_parms = {
1834		.to_dev = mall_entry->mirror.to_dev,
1835	};
1836
1837	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1838					      &agent_parms, &mall_entry->mirror.span_id);
1839}
1840
1841static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1842					       struct mlxsw_sp_mall_entry *mall_entry,
1843					       struct mlxsw_sp_qevent_binding *qevent_binding)
1844{
1845	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1846}
1847
1848static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1849					  struct mlxsw_sp_mall_entry *mall_entry,
1850					  struct mlxsw_sp_qevent_binding *qevent_binding)
1851{
1852	struct mlxsw_sp_span_agent_parms agent_parms = {
1853		.session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1854	};
1855	int err;
1856
1857	err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1858						    DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1859						    &agent_parms.policer_enable,
1860						    &agent_parms.policer_id);
1861	if (err)
1862		return err;
1863
1864	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1865					      &agent_parms, &mall_entry->trap.span_id);
1866}
1867
1868static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1869					     struct mlxsw_sp_mall_entry *mall_entry,
1870					     struct mlxsw_sp_qevent_binding *qevent_binding)
1871{
1872	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1873}
1874
1875static int
1876mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1877				struct mlxsw_sp_mall_entry *mall_entry,
1878				struct mlxsw_sp_qevent_binding *qevent_binding,
1879				struct netlink_ext_ack *extack)
1880{
1881	if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
1882		NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
1883		return -EOPNOTSUPP;
1884	}
1885
1886	switch (mall_entry->type) {
1887	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1888		return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1889	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1890		return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1891	default:
1892		/* This should have been validated away. */
1893		WARN_ON(1);
1894		return -EOPNOTSUPP;
1895	}
1896}
1897
1898static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1899					      struct mlxsw_sp_mall_entry *mall_entry,
1900					      struct mlxsw_sp_qevent_binding *qevent_binding)
1901{
1902	switch (mall_entry->type) {
1903	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1904		return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1905	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1906		return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1907	default:
1908		WARN_ON(1);
1909		return;
1910	}
1911}
1912
1913static int
1914mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1915				  struct mlxsw_sp_qevent_binding *qevent_binding,
1916				  struct netlink_ext_ack *extack)
1917{
1918	struct mlxsw_sp_mall_entry *mall_entry;
1919	int err;
1920
1921	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1922		err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1923						      qevent_binding, extack);
1924		if (err)
1925			goto err_entry_configure;
1926	}
1927
1928	return 0;
1929
1930err_entry_configure:
1931	list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1932		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1933						  qevent_binding);
1934	return err;
1935}
1936
1937static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1938						struct mlxsw_sp_qevent_binding *qevent_binding)
1939{
1940	struct mlxsw_sp_mall_entry *mall_entry;
1941
1942	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1943		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1944						  qevent_binding);
1945}
1946
1947static int
1948mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
1949				struct netlink_ext_ack *extack)
1950{
1951	struct mlxsw_sp_qevent_binding *qevent_binding;
1952	int err;
1953
1954	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1955		err = mlxsw_sp_qevent_binding_configure(qevent_block,
1956							qevent_binding,
1957							extack);
1958		if (err)
1959			goto err_binding_configure;
1960	}
1961
1962	return 0;
1963
1964err_binding_configure:
1965	list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1966		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1967	return err;
1968}
1969
1970static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1971{
1972	struct mlxsw_sp_qevent_binding *qevent_binding;
1973
1974	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1975		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1976}
1977
1978static struct mlxsw_sp_mall_entry *
1979mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1980{
1981	struct mlxsw_sp_mall_entry *mall_entry;
1982
1983	list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1984		if (mall_entry->cookie == cookie)
1985			return mall_entry;
1986
1987	return NULL;
1988}
1989
1990static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1991					struct mlxsw_sp_qevent_block *qevent_block,
1992					struct tc_cls_matchall_offload *f)
1993{
1994	struct mlxsw_sp_mall_entry *mall_entry;
1995	struct flow_action_entry *act;
1996	int err;
1997
1998	/* It should not currently be possible to replace a matchall rule. So
1999	 * this must be a new rule.
2000	 */
2001	if (!list_empty(&qevent_block->mall_entry_list)) {
2002		NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
2003		return -EOPNOTSUPP;
2004	}
2005	if (f->rule->action.num_entries != 1) {
2006		NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
2007		return -EOPNOTSUPP;
2008	}
2009	if (f->common.chain_index) {
2010		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
2011		return -EOPNOTSUPP;
2012	}
2013	if (f->common.protocol != htons(ETH_P_ALL)) {
2014		NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
2015		return -EOPNOTSUPP;
2016	}
2017
2018	act = &f->rule->action.entries[0];
2019	if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
2020		NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
2021		return -EOPNOTSUPP;
2022	}
2023
2024	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
2025	if (!mall_entry)
2026		return -ENOMEM;
2027	mall_entry->cookie = f->cookie;
2028
2029	if (act->id == FLOW_ACTION_MIRRED) {
2030		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
2031		mall_entry->mirror.to_dev = act->dev;
2032	} else if (act->id == FLOW_ACTION_TRAP) {
2033		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
2034	} else {
2035		NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
2036		err = -EOPNOTSUPP;
2037		goto err_unsupported_action;
2038	}
2039
2040	list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
2041
2042	err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
2043	if (err)
2044		goto err_block_configure;
2045
2046	return 0;
2047
2048err_block_configure:
2049	list_del(&mall_entry->list);
2050err_unsupported_action:
2051	kfree(mall_entry);
2052	return err;
2053}
2054
2055static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
2056					 struct tc_cls_matchall_offload *f)
2057{
2058	struct mlxsw_sp_mall_entry *mall_entry;
2059
2060	mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
2061	if (!mall_entry)
2062		return;
2063
2064	mlxsw_sp_qevent_block_deconfigure(qevent_block);
2065
2066	list_del(&mall_entry->list);
2067	kfree(mall_entry);
2068}
2069
2070static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
2071					 struct tc_cls_matchall_offload *f)
2072{
2073	struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
2074
2075	switch (f->command) {
2076	case TC_CLSMATCHALL_REPLACE:
2077		return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
2078	case TC_CLSMATCHALL_DESTROY:
2079		mlxsw_sp_qevent_mall_destroy(qevent_block, f);
2080		return 0;
2081	default:
2082		return -EOPNOTSUPP;
2083	}
2084}
2085
2086static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
2087{
2088	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2089
2090	switch (type) {
2091	case TC_SETUP_CLSMATCHALL:
2092		return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
2093	default:
2094		return -EOPNOTSUPP;
2095	}
2096}
2097
2098static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
2099								  struct net *net)
2100{
2101	struct mlxsw_sp_qevent_block *qevent_block;
2102
2103	qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
2104	if (!qevent_block)
2105		return NULL;
2106
2107	INIT_LIST_HEAD(&qevent_block->binding_list);
2108	INIT_LIST_HEAD(&qevent_block->mall_entry_list);
2109	qevent_block->mlxsw_sp = mlxsw_sp;
2110	return qevent_block;
2111}
2112
2113static void
2114mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
2115{
2116	WARN_ON(!list_empty(&qevent_block->binding_list));
2117	WARN_ON(!list_empty(&qevent_block->mall_entry_list));
2118	kfree(qevent_block);
2119}
2120
2121static void mlxsw_sp_qevent_block_release(void *cb_priv)
2122{
2123	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2124
2125	mlxsw_sp_qevent_block_destroy(qevent_block);
2126}
2127
2128static struct mlxsw_sp_qevent_binding *
2129mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
2130			       enum mlxsw_sp_span_trigger span_trigger,
2131			       unsigned int action_mask)
2132{
2133	struct mlxsw_sp_qevent_binding *binding;
2134
2135	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
2136	if (!binding)
2137		return ERR_PTR(-ENOMEM);
2138
2139	binding->mlxsw_sp_port = mlxsw_sp_port;
2140	binding->handle = handle;
2141	binding->tclass_num = tclass_num;
2142	binding->span_trigger = span_trigger;
2143	binding->action_mask = action_mask;
2144	return binding;
2145}
2146
2147static void
2148mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
2149{
2150	kfree(binding);
2151}
2152
2153static struct mlxsw_sp_qevent_binding *
2154mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
2155			       struct mlxsw_sp_port *mlxsw_sp_port,
2156			       u32 handle,
2157			       enum mlxsw_sp_span_trigger span_trigger)
2158{
2159	struct mlxsw_sp_qevent_binding *qevent_binding;
2160
2161	list_for_each_entry(qevent_binding, &block->binding_list, list)
2162		if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
2163		    qevent_binding->handle == handle &&
2164		    qevent_binding->span_trigger == span_trigger)
2165			return qevent_binding;
2166	return NULL;
2167}
2168
2169static int
2170mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
2171				    struct flow_block_offload *f,
2172				    enum mlxsw_sp_span_trigger span_trigger,
2173				    unsigned int action_mask)
2174{
2175	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2176	struct mlxsw_sp_qevent_binding *qevent_binding;
2177	struct mlxsw_sp_qevent_block *qevent_block;
2178	struct flow_block_cb *block_cb;
2179	struct mlxsw_sp_qdisc *qdisc;
2180	bool register_block = false;
2181	int tclass_num;
2182	int err;
2183
2184	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2185	if (!block_cb) {
2186		qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
2187		if (!qevent_block)
2188			return -ENOMEM;
2189		block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
2190					       mlxsw_sp_qevent_block_release);
2191		if (IS_ERR(block_cb)) {
2192			mlxsw_sp_qevent_block_destroy(qevent_block);
2193			return PTR_ERR(block_cb);
2194		}
2195		register_block = true;
2196	} else {
2197		qevent_block = flow_block_cb_priv(block_cb);
2198	}
2199	flow_block_cb_incref(block_cb);
2200
2201	qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
2202	if (!qdisc) {
2203		NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
2204		err = -ENOENT;
2205		goto err_find_qdisc;
2206	}
2207
2208	if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2209						   span_trigger))) {
2210		err = -EEXIST;
2211		goto err_binding_exists;
2212	}
2213
2214	tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
2215	qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
2216							f->sch->handle,
2217							tclass_num,
2218							span_trigger,
2219							action_mask);
2220	if (IS_ERR(qevent_binding)) {
2221		err = PTR_ERR(qevent_binding);
2222		goto err_binding_create;
2223	}
2224
2225	err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
2226						f->extack);
2227	if (err)
2228		goto err_binding_configure;
2229
2230	list_add(&qevent_binding->list, &qevent_block->binding_list);
2231
2232	if (register_block) {
2233		flow_block_cb_add(block_cb, f);
2234		list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
2235	}
2236
2237	return 0;
2238
2239err_binding_configure:
2240	mlxsw_sp_qevent_binding_destroy(qevent_binding);
2241err_binding_create:
2242err_binding_exists:
2243err_find_qdisc:
2244	if (!flow_block_cb_decref(block_cb))
2245		flow_block_cb_free(block_cb);
2246	return err;
2247}
2248
2249static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
2250						  struct flow_block_offload *f,
2251						  enum mlxsw_sp_span_trigger span_trigger)
2252{
2253	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2254	struct mlxsw_sp_qevent_binding *qevent_binding;
2255	struct mlxsw_sp_qevent_block *qevent_block;
2256	struct flow_block_cb *block_cb;
2257
2258	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2259	if (!block_cb)
2260		return;
2261	qevent_block = flow_block_cb_priv(block_cb);
2262
2263	qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2264							span_trigger);
2265	if (!qevent_binding)
2266		return;
2267
2268	list_del(&qevent_binding->list);
2269	mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
2270	mlxsw_sp_qevent_binding_destroy(qevent_binding);
2271
2272	if (!flow_block_cb_decref(block_cb)) {
2273		flow_block_cb_remove(block_cb, f);
2274		list_del(&block_cb->driver_list);
2275	}
2276}
2277
2278static int
2279mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
2280			       struct flow_block_offload *f,
2281			       enum mlxsw_sp_span_trigger span_trigger,
2282			       unsigned int action_mask)
2283{
2284	f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
2285
2286	switch (f->command) {
2287	case FLOW_BLOCK_BIND:
2288		return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
2289							   span_trigger,
2290							   action_mask);
2291	case FLOW_BLOCK_UNBIND:
2292		mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
2293		return 0;
2294	default:
2295		return -EOPNOTSUPP;
2296	}
2297}
2298
2299int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
2300					      struct flow_block_offload *f)
2301{
2302	unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
2303				   BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
2304
2305	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2306					      MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
2307					      action_mask);
2308}
2309
2310int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
2311					struct flow_block_offload *f)
2312{
2313	unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
2314
2315	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2316					      MLXSW_SP_SPAN_TRIGGER_ECN,
2317					      action_mask);
2318}
2319
2320int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
2321{
2322	struct mlxsw_sp_qdisc_state *qdisc_state;
2323
2324	qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
2325	if (!qdisc_state)
2326		return -ENOMEM;
2327
2328	mutex_init(&qdisc_state->lock);
2329	mlxsw_sp_port->qdisc = qdisc_state;
2330	return 0;
2331}
2332
2333void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2334{
2335	mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2336	kfree(mlxsw_sp_port->qdisc);
2337}
2338