1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2023 Marvell.
5 *
6 */
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/inetdevice.h>
10#include <linux/bitfield.h>
11
12#include "otx2_common.h"
13#include "cn10k.h"
14#include "qos.h"
15
16#define OTX2_QOS_QID_INNER		0xFFFFU
17#define OTX2_QOS_QID_NONE		0xFFFEU
18#define OTX2_QOS_ROOT_CLASSID		0xFFFFFFFF
19#define OTX2_QOS_CLASS_NONE		0
20#define OTX2_QOS_DEFAULT_PRIO		0xF
21#define OTX2_QOS_INVALID_SQ		0xFFFF
22#define OTX2_QOS_INVALID_TXSCHQ_IDX	0xFFFF
23#define CN10K_MAX_RR_WEIGHT		GENMASK_ULL(13, 0)
24#define OTX2_MAX_RR_QUANTUM		GENMASK_ULL(23, 0)
25
26static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27{
28	struct otx2_hw *hw = &pfvf->hw;
29	int tx_queues, qos_txqs, err;
30
31	qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32				 OTX2_QOS_MAX_LEAF_NODES);
33
34	tx_queues = hw->tx_queues + qos_txqs;
35
36	err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37	if (err) {
38		netdev_err(pfvf->netdev,
39			   "Failed to set no of Tx queues: %d\n", tx_queues);
40		return;
41	}
42}
43
44static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45				 struct nix_txschq_config *cfg,
46				 int index)
47{
48	if (node->level == NIX_TXSCH_LVL_SMQ) {
49		cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50		cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51		cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52		cfg->reg[index]   = NIX_AF_MDQX_CIR(node->schq);
53	} else if (node->level == NIX_TXSCH_LVL_TL4) {
54		cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55		cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56		cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57		cfg->reg[index]   = NIX_AF_TL4X_CIR(node->schq);
58	} else if (node->level == NIX_TXSCH_LVL_TL3) {
59		cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60		cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61		cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62		cfg->reg[index]   = NIX_AF_TL3X_CIR(node->schq);
63	} else if (node->level == NIX_TXSCH_LVL_TL2) {
64		cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65		cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66		cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67		cfg->reg[index]   = NIX_AF_TL2X_CIR(node->schq);
68	}
69}
70
71static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72{
73	u32 weight;
74
75	weight = quantum / pfvf->hw.dwrr_mtu;
76	if (quantum % pfvf->hw.dwrr_mtu)
77		weight += 1;
78
79	return weight;
80}
81
82static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83				      struct otx2_qos_node *node,
84				      struct nix_txschq_config *cfg,
85				      int *num_regs)
86{
87	u32 rr_weight;
88	u32 quantum;
89	u64 maxrate;
90
91	otx2_qos_get_regaddr(node, cfg, *num_regs);
92
93	/* configure parent txschq */
94	cfg->regval[*num_regs] = node->parent->schq << 16;
95	(*num_regs)++;
96
97	/* configure prio/quantum */
98	if (node->qid == OTX2_QOS_QID_NONE) {
99		cfg->regval[*num_regs] =  node->prio << 24 |
100					  mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101		(*num_regs)++;
102		return;
103	}
104
105	/* configure priority/quantum  */
106	if (node->is_static) {
107		cfg->regval[*num_regs] =
108			(node->schq - node->parent->prio_anchor) << 24;
109	} else {
110		quantum = node->quantum ?
111			  node->quantum : pfvf->tx_max_pktlen;
112		rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113		cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114					 rr_weight;
115	}
116	(*num_regs)++;
117
118	/* configure PIR */
119	maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120
121	cfg->regval[*num_regs] =
122		otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123	(*num_regs)++;
124
125	/* Don't configure CIR when both CIR+PIR not supported
126	 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127	 */
128	if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129		return;
130
131	cfg->regval[*num_regs] =
132		otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133	(*num_regs)++;
134}
135
136static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137				  struct otx2_qos_node *node,
138				  struct nix_txschq_config *cfg)
139{
140	struct otx2_hw *hw = &pfvf->hw;
141	int num_regs = 0;
142	u8 level;
143
144	level = node->level;
145
146	/* program txschq registers */
147	if (level == NIX_TXSCH_LVL_SMQ) {
148		cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149		cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150					OTX2_MIN_MTU;
151		cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152					 (0x2ULL << 36);
153		num_regs++;
154
155		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156
157	} else if (level == NIX_TXSCH_LVL_TL4) {
158		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
159	} else if (level == NIX_TXSCH_LVL_TL3) {
160		/* configure link cfg */
161		if (level == pfvf->qos.link_cfg_lvl) {
162			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
163			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
164			num_regs++;
165		}
166
167		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
168	} else if (level == NIX_TXSCH_LVL_TL2) {
169		/* configure link cfg */
170		if (level == pfvf->qos.link_cfg_lvl) {
171			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
172			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
173			num_regs++;
174		}
175
176		/* check if node is root */
177		if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
178			cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
179			cfg->regval[num_regs] =  TXSCH_TL1_DFLT_RR_PRIO << 24 |
180						 mtu_to_dwrr_weight(pfvf,
181								    pfvf->tx_max_pktlen);
182			num_regs++;
183			goto txschq_cfg_out;
184		}
185
186		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
187	}
188
189txschq_cfg_out:
190	cfg->num_regs = num_regs;
191}
192
193static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
194					       struct otx2_qos_node *parent)
195{
196	struct mbox *mbox = &pfvf->mbox;
197	struct nix_txschq_config *cfg;
198	int rc;
199
200	if (parent->level == NIX_TXSCH_LVL_MDQ)
201		return 0;
202
203	mutex_lock(&mbox->lock);
204
205	cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
206	if (!cfg) {
207		mutex_unlock(&mbox->lock);
208		return -ENOMEM;
209	}
210
211	cfg->lvl = parent->level;
212
213	if (parent->level == NIX_TXSCH_LVL_TL4)
214		cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
215	else if (parent->level == NIX_TXSCH_LVL_TL3)
216		cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
217	else if (parent->level == NIX_TXSCH_LVL_TL2)
218		cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
219	else if (parent->level == NIX_TXSCH_LVL_TL1)
220		cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
221
222	cfg->regval[0] = (u64)parent->prio_anchor << 32;
223	cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
224			    parent->child_dwrr_prio : 0)  << 1;
225	cfg->num_regs++;
226
227	rc = otx2_sync_mbox_msg(&pfvf->mbox);
228
229	mutex_unlock(&mbox->lock);
230
231	return rc;
232}
233
234static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
235				       struct otx2_qos_node *parent)
236{
237	struct otx2_qos_node *node;
238
239	list_for_each_entry_reverse(node, &parent->child_schq_list, list)
240		otx2_txschq_free_one(pfvf, node->level, node->schq);
241}
242
243static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
244				  struct otx2_qos_node *parent)
245{
246	struct otx2_qos_node *node, *tmp;
247
248	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
249		otx2_qos_free_hw_node(pfvf, node);
250		otx2_qos_free_hw_node_schq(pfvf, node);
251		otx2_txschq_free_one(pfvf, node->level, node->schq);
252	}
253}
254
255static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
256				 struct otx2_qos_node *node)
257{
258	mutex_lock(&pfvf->qos.qos_lock);
259
260	/* free child node hw mappings */
261	otx2_qos_free_hw_node(pfvf, node);
262	otx2_qos_free_hw_node_schq(pfvf, node);
263
264	/* free node hw mappings */
265	otx2_txschq_free_one(pfvf, node->level, node->schq);
266
267	mutex_unlock(&pfvf->qos.qos_lock);
268}
269
270static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
271				    struct otx2_qos_node *node)
272{
273	hash_del_rcu(&node->hlist);
274
275	if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
276		__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
277		otx2_qos_update_tx_netdev_queues(pfvf);
278	}
279
280	list_del(&node->list);
281	kfree(node);
282}
283
284static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
285				       struct otx2_qos_node *parent)
286{
287	struct otx2_qos_node *node, *tmp;
288
289	list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
290		list_del(&node->list);
291		kfree(node);
292	}
293}
294
295static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
296				    struct otx2_qos_node *parent)
297{
298	struct otx2_qos_node *node, *tmp;
299
300	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
301		__otx2_qos_free_sw_node(pfvf, node);
302		otx2_qos_free_sw_node_schq(pfvf, node);
303		otx2_qos_sw_node_delete(pfvf, node);
304	}
305}
306
307static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
308				  struct otx2_qos_node *node)
309{
310	mutex_lock(&pfvf->qos.qos_lock);
311
312	__otx2_qos_free_sw_node(pfvf, node);
313	otx2_qos_free_sw_node_schq(pfvf, node);
314	otx2_qos_sw_node_delete(pfvf, node);
315
316	mutex_unlock(&pfvf->qos.qos_lock);
317}
318
319static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
320				  struct otx2_qos_node *node)
321{
322	otx2_qos_free_hw_cfg(pfvf, node);
323	otx2_qos_free_sw_node(pfvf, node);
324}
325
326static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
327				   struct otx2_qos_cfg *cfg)
328{
329	struct otx2_qos_node *node;
330
331	list_for_each_entry(node, &parent->child_schq_list, list)
332		cfg->schq[node->level]++;
333}
334
335static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
336				 struct otx2_qos_cfg *cfg)
337{
338	struct otx2_qos_node *node;
339
340	list_for_each_entry(node, &parent->child_list, list) {
341		otx2_qos_fill_cfg_tl(node, cfg);
342		otx2_qos_fill_cfg_schq(node, cfg);
343	}
344
345	/* Assign the required number of transmit schedular queues under the
346	 * given class
347	 */
348	cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
349					       parent->max_static_prio + 1;
350}
351
352static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
353					struct otx2_qos_node *parent,
354					struct otx2_qos_cfg *cfg)
355{
356	mutex_lock(&pfvf->qos.qos_lock);
357	otx2_qos_fill_cfg_tl(parent, cfg);
358	mutex_unlock(&pfvf->qos.qos_lock);
359}
360
361static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
362					  struct otx2_qos_cfg *cfg)
363{
364	struct otx2_qos_node *node;
365	int cnt;
366
367	list_for_each_entry(node, &parent->child_schq_list, list) {
368		cnt = cfg->dwrr_node_pos[node->level];
369		cfg->schq_list[node->level][cnt] = node->schq;
370		cfg->schq[node->level]++;
371		cfg->dwrr_node_pos[node->level]++;
372	}
373}
374
375static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
376					struct otx2_qos_cfg *cfg)
377{
378	struct otx2_qos_node *node;
379	int cnt;
380
381	list_for_each_entry(node, &parent->child_list, list) {
382		otx2_qos_read_txschq_cfg_tl(node, cfg);
383		cnt = cfg->static_node_pos[node->level];
384		cfg->schq_contig_list[node->level][cnt] = node->schq;
385		cfg->schq_contig[node->level]++;
386		cfg->static_node_pos[node->level]++;
387		otx2_qos_read_txschq_cfg_schq(node, cfg);
388	}
389}
390
391static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
392				     struct otx2_qos_node *node,
393				     struct otx2_qos_cfg *cfg)
394{
395	mutex_lock(&pfvf->qos.qos_lock);
396	otx2_qos_read_txschq_cfg_tl(node, cfg);
397	mutex_unlock(&pfvf->qos.qos_lock);
398}
399
400static struct otx2_qos_node *
401otx2_qos_alloc_root(struct otx2_nic *pfvf)
402{
403	struct otx2_qos_node *node;
404
405	node = kzalloc(sizeof(*node), GFP_KERNEL);
406	if (!node)
407		return ERR_PTR(-ENOMEM);
408
409	node->parent = NULL;
410	if (!is_otx2_vf(pfvf->pcifunc)) {
411		node->level = NIX_TXSCH_LVL_TL1;
412	} else {
413		node->level = NIX_TXSCH_LVL_TL2;
414		node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
415	}
416
417	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
418	node->classid = OTX2_QOS_ROOT_CLASSID;
419
420	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
421	list_add_tail(&node->list, &pfvf->qos.qos_tree);
422	INIT_LIST_HEAD(&node->child_list);
423	INIT_LIST_HEAD(&node->child_schq_list);
424
425	return node;
426}
427
428static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
429				   struct otx2_qos_node *node)
430{
431	struct list_head *head = &parent->child_list;
432	struct otx2_qos_node *tmp_node;
433	struct list_head *tmp;
434
435	if (node->prio > parent->max_static_prio)
436		parent->max_static_prio = node->prio;
437
438	for (tmp = head->next; tmp != head; tmp = tmp->next) {
439		tmp_node = list_entry(tmp, struct otx2_qos_node, list);
440		if (tmp_node->prio == node->prio &&
441		    tmp_node->is_static)
442			return -EEXIST;
443		if (tmp_node->prio > node->prio) {
444			list_add_tail(&node->list, tmp);
445			return 0;
446		}
447	}
448
449	list_add_tail(&node->list, head);
450	return 0;
451}
452
453static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
454				      struct otx2_qos_node *node)
455{
456	struct otx2_qos_node *txschq_node, *parent, *tmp;
457	int lvl;
458
459	parent = node;
460	for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
461		txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
462		if (!txschq_node)
463			goto err_out;
464
465		txschq_node->parent = parent;
466		txschq_node->level = lvl;
467		txschq_node->classid = OTX2_QOS_CLASS_NONE;
468		WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
469		txschq_node->rate = 0;
470		txschq_node->ceil = 0;
471		txschq_node->prio = 0;
472		txschq_node->quantum = 0;
473		txschq_node->is_static = true;
474		txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
475		txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
476
477		mutex_lock(&pfvf->qos.qos_lock);
478		list_add_tail(&txschq_node->list, &node->child_schq_list);
479		mutex_unlock(&pfvf->qos.qos_lock);
480
481		INIT_LIST_HEAD(&txschq_node->child_list);
482		INIT_LIST_HEAD(&txschq_node->child_schq_list);
483		parent = txschq_node;
484	}
485
486	return 0;
487
488err_out:
489	list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
490				 list) {
491		list_del(&txschq_node->list);
492		kfree(txschq_node);
493	}
494	return -ENOMEM;
495}
496
497static struct otx2_qos_node *
498otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
499			     struct otx2_qos_node *parent,
500			     u16 classid, u32 prio, u64 rate, u64 ceil,
501			     u32 quantum, u16 qid, bool static_cfg)
502{
503	struct otx2_qos_node *node;
504	int err;
505
506	node = kzalloc(sizeof(*node), GFP_KERNEL);
507	if (!node)
508		return ERR_PTR(-ENOMEM);
509
510	node->parent = parent;
511	node->level = parent->level - 1;
512	node->classid = classid;
513	WRITE_ONCE(node->qid, qid);
514
515	node->rate = otx2_convert_rate(rate);
516	node->ceil = otx2_convert_rate(ceil);
517	node->prio = prio;
518	node->quantum = quantum;
519	node->is_static = static_cfg;
520	node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
521	node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
522
523	__set_bit(qid, pfvf->qos.qos_sq_bmap);
524
525	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
526
527	mutex_lock(&pfvf->qos.qos_lock);
528	err = otx2_qos_add_child_node(parent, node);
529	if (err) {
530		mutex_unlock(&pfvf->qos.qos_lock);
531		return ERR_PTR(err);
532	}
533	mutex_unlock(&pfvf->qos.qos_lock);
534
535	INIT_LIST_HEAD(&node->child_list);
536	INIT_LIST_HEAD(&node->child_schq_list);
537
538	err = otx2_qos_alloc_txschq_node(pfvf, node);
539	if (err) {
540		otx2_qos_sw_node_delete(pfvf, node);
541		return ERR_PTR(-ENOMEM);
542	}
543
544	return node;
545}
546
547static struct otx2_qos_node *
548otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
549{
550	struct otx2_qos_node *node = NULL;
551
552	hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
553		if (node->classid == classid)
554			break;
555	}
556
557	return node;
558}
559
560static struct otx2_qos_node *
561otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
562{
563	struct otx2_qos_node *node = NULL;
564
565	hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
566		if (node->classid == classid)
567			break;
568	}
569
570	return node;
571}
572
573int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
574{
575	struct otx2_qos_node *node;
576	u16 qid;
577	int res;
578
579	node = otx2_sw_node_find_rcu(pfvf, classid);
580	if (!node) {
581		res = -ENOENT;
582		goto out;
583	}
584	qid = READ_ONCE(node->qid);
585	if (qid == OTX2_QOS_QID_INNER) {
586		res = -EINVAL;
587		goto out;
588	}
589	res = pfvf->hw.tx_queues + qid;
590out:
591	return res;
592}
593
594static int
595otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
596{
597	struct mbox *mbox = &pfvf->mbox;
598	struct nix_txschq_config *req;
599	int rc;
600
601	mutex_lock(&mbox->lock);
602
603	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
604	if (!req) {
605		mutex_unlock(&mbox->lock);
606		return -ENOMEM;
607	}
608
609	req->lvl = node->level;
610	__otx2_qos_txschq_cfg(pfvf, node, req);
611
612	rc = otx2_sync_mbox_msg(&pfvf->mbox);
613
614	mutex_unlock(&mbox->lock);
615
616	return rc;
617}
618
619static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
620				 struct otx2_qos_cfg *cfg)
621{
622	struct nix_txsch_alloc_req *req;
623	struct nix_txsch_alloc_rsp *rsp;
624	struct mbox *mbox = &pfvf->mbox;
625	int lvl, rc, schq;
626
627	mutex_lock(&mbox->lock);
628	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
629	if (!req) {
630		mutex_unlock(&mbox->lock);
631		return -ENOMEM;
632	}
633
634	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
635		req->schq[lvl] = cfg->schq[lvl];
636		req->schq_contig[lvl] = cfg->schq_contig[lvl];
637	}
638
639	rc = otx2_sync_mbox_msg(&pfvf->mbox);
640	if (rc) {
641		mutex_unlock(&mbox->lock);
642		return rc;
643	}
644
645	rsp = (struct nix_txsch_alloc_rsp *)
646	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
647
648	if (IS_ERR(rsp)) {
649		rc = PTR_ERR(rsp);
650		goto out;
651	}
652
653	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
654		for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
655			cfg->schq_contig_list[lvl][schq] =
656				rsp->schq_contig_list[lvl][schq];
657		}
658	}
659
660	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
661		for (schq = 0; schq < rsp->schq[lvl]; schq++) {
662			cfg->schq_list[lvl][schq] =
663				rsp->schq_list[lvl][schq];
664		}
665	}
666
667	pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
668	pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
669
670out:
671	mutex_unlock(&mbox->lock);
672	return rc;
673}
674
675static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
676					struct otx2_qos_cfg *cfg)
677{
678	int lvl, idx, schq;
679
680	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
681		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
682			if (!cfg->schq_index_used[lvl][idx]) {
683				schq = cfg->schq_contig_list[lvl][idx];
684				otx2_txschq_free_one(pfvf, lvl, schq);
685			}
686		}
687	}
688}
689
690static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
691					  struct otx2_qos_node *node,
692					  struct otx2_qos_cfg *cfg)
693{
694	struct otx2_qos_node *tmp;
695	int cnt;
696
697	list_for_each_entry(tmp, &node->child_schq_list, list) {
698		cnt = cfg->dwrr_node_pos[tmp->level];
699		tmp->schq = cfg->schq_list[tmp->level][cnt];
700		cfg->dwrr_node_pos[tmp->level]++;
701	}
702}
703
704static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
705					struct otx2_qos_node *node,
706					struct otx2_qos_cfg *cfg)
707{
708	struct otx2_qos_node *tmp;
709	int cnt;
710
711	list_for_each_entry(tmp, &node->child_list, list) {
712		otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
713		cnt = cfg->static_node_pos[tmp->level];
714		tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
715		cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
716		if (cnt == 0)
717			node->prio_anchor =
718				cfg->schq_contig_list[tmp->level][0];
719		cfg->static_node_pos[tmp->level]++;
720		otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
721	}
722}
723
724static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
725				     struct otx2_qos_node *node,
726				     struct otx2_qos_cfg *cfg)
727{
728	mutex_lock(&pfvf->qos.qos_lock);
729	otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
730	otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
731	otx2_qos_free_unused_txschq(pfvf, cfg);
732	mutex_unlock(&pfvf->qos.qos_lock);
733}
734
735static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
736					  struct otx2_qos_node *tmp,
737					  unsigned long *child_idx_bmap,
738					  int child_cnt)
739{
740	int idx;
741
742	if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
743		return;
744
745	/* assign static nodes 1:1 prio mapping first, then remaining nodes */
746	for (idx = 0; idx < child_cnt; idx++) {
747		if (tmp->is_static && tmp->prio == idx &&
748		    !test_bit(idx, child_idx_bmap)) {
749			tmp->txschq_idx = idx;
750			set_bit(idx, child_idx_bmap);
751			return;
752		} else if (!tmp->is_static && idx >= tmp->prio &&
753			   !test_bit(idx, child_idx_bmap)) {
754			tmp->txschq_idx = idx;
755			set_bit(idx, child_idx_bmap);
756			return;
757		}
758	}
759}
760
761static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
762				       struct otx2_qos_node *node)
763{
764	unsigned long *child_idx_bmap;
765	struct otx2_qos_node *tmp;
766	int child_cnt;
767
768	list_for_each_entry(tmp, &node->child_list, list)
769		tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
770
771	/* allocate child index array */
772	child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
773	child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
774				 sizeof(unsigned long),
775				 GFP_KERNEL);
776	if (!child_idx_bmap)
777		return -ENOMEM;
778
779	list_for_each_entry(tmp, &node->child_list, list)
780		otx2_qos_assign_base_idx_tl(pfvf, tmp);
781
782	/* assign base index of static priority children first */
783	list_for_each_entry(tmp, &node->child_list, list) {
784		if (!tmp->is_static)
785			continue;
786		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
787					      child_cnt);
788	}
789
790	/* assign base index of dwrr priority children */
791	list_for_each_entry(tmp, &node->child_list, list)
792		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
793					      child_cnt);
794
795	kfree(child_idx_bmap);
796
797	return 0;
798}
799
800static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
801				    struct otx2_qos_node *node)
802{
803	int ret = 0;
804
805	mutex_lock(&pfvf->qos.qos_lock);
806	ret = otx2_qos_assign_base_idx_tl(pfvf, node);
807	mutex_unlock(&pfvf->qos.qos_lock);
808
809	return ret;
810}
811
812static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
813					 struct otx2_qos_node *node,
814					 struct otx2_qos_cfg *cfg)
815{
816	struct otx2_qos_node *tmp;
817	int ret;
818
819	list_for_each_entry(tmp, &node->child_schq_list, list) {
820		ret = otx2_qos_txschq_config(pfvf, tmp);
821		if (ret)
822			return -EIO;
823		ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
824		if (ret)
825			return -EIO;
826	}
827
828	return 0;
829}
830
831static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
832				       struct otx2_qos_node *node,
833				       struct otx2_qos_cfg *cfg)
834{
835	struct otx2_qos_node *tmp;
836	int ret;
837
838	list_for_each_entry(tmp, &node->child_list, list) {
839		ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
840		if (ret)
841			return -EIO;
842		ret = otx2_qos_txschq_config(pfvf, tmp);
843		if (ret)
844			return -EIO;
845		ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
846		if (ret)
847			return -EIO;
848	}
849
850	ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
851	if (ret)
852		return -EIO;
853
854	return 0;
855}
856
857static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
858				    struct otx2_qos_node *node,
859				    struct otx2_qos_cfg *cfg)
860{
861	int ret;
862
863	mutex_lock(&pfvf->qos.qos_lock);
864	ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
865	if (ret)
866		goto out;
867	ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
868out:
869	mutex_unlock(&pfvf->qos.qos_lock);
870	return ret;
871}
872
873static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
874					 struct otx2_qos_node *node,
875					 struct otx2_qos_cfg *cfg)
876{
877	otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
878
879	return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
880}
881
882static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
883					   struct otx2_qos_node *root,
884					   struct otx2_qos_cfg *cfg)
885{
886	root->schq = cfg->schq_list[root->level][0];
887	return otx2_qos_txschq_config(pfvf, root);
888}
889
890static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
891{
892	int lvl, idx, schq;
893
894	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
895		for (idx = 0; idx < cfg->schq[lvl]; idx++) {
896			schq = cfg->schq_list[lvl][idx];
897			otx2_txschq_free_one(pfvf, lvl, schq);
898		}
899	}
900
901	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
902		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
903			if (cfg->schq_index_used[lvl][idx]) {
904				schq = cfg->schq_contig_list[lvl][idx];
905				otx2_txschq_free_one(pfvf, lvl, schq);
906			}
907		}
908	}
909}
910
911static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
912			       struct otx2_qos_node *node,
913			       u16 qid)
914{
915	if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
916		otx2_qos_disable_sq(pfvf, qid);
917
918	pfvf->qos.qid_to_sqmap[qid] = node->schq;
919	otx2_qos_enable_sq(pfvf, qid);
920}
921
922static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
923				     struct otx2_qos_node *node,
924				     bool action)
925{
926	struct otx2_qos_node *tmp;
927
928	if (node->qid == OTX2_QOS_QID_INNER)
929		return;
930
931	list_for_each_entry(tmp, &node->child_schq_list, list) {
932		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
933			if (action == QOS_SMQ_FLUSH)
934				otx2_smq_flush(pfvf, tmp->schq);
935			else
936				otx2_qos_enadis_sq(pfvf, tmp, node->qid);
937		}
938	}
939}
940
941static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
942				  struct otx2_qos_node *node,
943				  bool action)
944{
945	struct otx2_qos_node *tmp;
946
947	list_for_each_entry(tmp, &node->child_list, list) {
948		__otx2_qos_update_smq(pfvf, tmp, action);
949		if (tmp->qid == OTX2_QOS_QID_INNER)
950			continue;
951		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
952			if (action == QOS_SMQ_FLUSH)
953				otx2_smq_flush(pfvf, tmp->schq);
954			else
955				otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
956		} else {
957			otx2_qos_update_smq_schq(pfvf, tmp, action);
958		}
959	}
960}
961
962static void otx2_qos_update_smq(struct otx2_nic *pfvf,
963				struct otx2_qos_node *node,
964				bool action)
965{
966	mutex_lock(&pfvf->qos.qos_lock);
967	__otx2_qos_update_smq(pfvf, node, action);
968	otx2_qos_update_smq_schq(pfvf, node, action);
969	mutex_unlock(&pfvf->qos.qos_lock);
970}
971
972static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
973				    struct otx2_qos_node *node,
974				    struct otx2_qos_cfg *cfg)
975{
976	int ret;
977
978	ret = otx2_qos_txschq_alloc(pfvf, cfg);
979	if (ret)
980		return -ENOSPC;
981
982	ret = otx2_qos_assign_base_idx(pfvf, node);
983	if (ret)
984		return -ENOMEM;
985
986	if (!(pfvf->netdev->flags & IFF_UP)) {
987		otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
988		return 0;
989	}
990
991	ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
992	if (ret) {
993		otx2_qos_free_cfg(pfvf, cfg);
994		return -EIO;
995	}
996
997	otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
998
999	return 0;
1000}
1001
1002static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1003				struct otx2_qos_node *node,
1004				struct otx2_qos_cfg *cfg)
1005{
1006	otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1007	return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1008}
1009
1010static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1011			     struct netlink_ext_ack *extack)
1012{
1013	struct otx2_qos_cfg *new_cfg;
1014	struct otx2_qos_node *root;
1015	int err;
1016
1017	netdev_dbg(pfvf->netdev,
1018		   "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1019		   htb_maj_id, htb_defcls);
1020
1021	root = otx2_qos_alloc_root(pfvf);
1022	if (IS_ERR(root)) {
1023		err = PTR_ERR(root);
1024		return err;
1025	}
1026
1027	/* allocate txschq queue */
1028	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1029	if (!new_cfg) {
1030		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1031		err = -ENOMEM;
1032		goto free_root_node;
1033	}
1034	/* allocate htb root node */
1035	new_cfg->schq[root->level] = 1;
1036	err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1037	if (err) {
1038		NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1039		goto free_root_node;
1040	}
1041
1042	/* Update TL1 RR PRIO */
1043	if (root->level == NIX_TXSCH_LVL_TL1) {
1044		root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1045		netdev_dbg(pfvf->netdev,
1046			   "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1047	}
1048
1049	if (!(pfvf->netdev->flags & IFF_UP) ||
1050	    root->level == NIX_TXSCH_LVL_TL1) {
1051		root->schq = new_cfg->schq_list[root->level][0];
1052		goto out;
1053	}
1054
1055	/* update the txschq configuration in hw */
1056	err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1057	if (err) {
1058		NL_SET_ERR_MSG_MOD(extack,
1059				   "Error updating txschq configuration");
1060		goto txschq_free;
1061	}
1062
1063out:
1064	WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1065	/* Pairs with smp_load_acquire() in ndo_select_queue */
1066	smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1067	kfree(new_cfg);
1068	return 0;
1069
1070txschq_free:
1071	otx2_qos_free_cfg(pfvf, new_cfg);
1072free_root_node:
1073	kfree(new_cfg);
1074	otx2_qos_sw_node_delete(pfvf, root);
1075	return err;
1076}
1077
1078static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1079{
1080	struct otx2_qos_node *root;
1081
1082	netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1083
1084	/* find root node */
1085	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1086	if (!root)
1087		return -ENOENT;
1088
1089	/* free the hw mappings */
1090	otx2_qos_destroy_node(pfvf, root);
1091
1092	return 0;
1093}
1094
1095static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1096{
1097	u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1098	int err = 0;
1099
1100	/* Max Round robin weight supported by octeontx2 and CN10K
1101	 * is different. Validate accordingly
1102	 */
1103	if (is_dev_otx2(pfvf->pdev))
1104		err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1105	else if	(rr_weight > CN10K_MAX_RR_WEIGHT)
1106		err = -EINVAL;
1107
1108	return err;
1109}
1110
1111static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1112				      struct netlink_ext_ack *extack,
1113				      struct otx2_nic *pfvf,
1114				      u64 prio, u64 quantum)
1115{
1116	int err;
1117
1118	err = otx2_qos_validate_quantum(pfvf, quantum);
1119	if (err) {
1120		NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1121		return err;
1122	}
1123
1124	if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1125		parent->child_dwrr_prio = prio;
1126	} else if (prio != parent->child_dwrr_prio) {
1127		NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1128		return -EOPNOTSUPP;
1129	}
1130
1131	return 0;
1132}
1133
1134static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1135					   struct netlink_ext_ack *extack,
1136					   struct otx2_nic *pfvf,
1137					   u64 prio, bool static_cfg)
1138{
1139	if (prio == parent->child_dwrr_prio && static_cfg) {
1140		NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1141		return -EEXIST;
1142	}
1143
1144	if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1145		NL_SET_ERR_MSG_MOD(extack,
1146				   "Static priority child with same priority exists");
1147		return -EEXIST;
1148	}
1149
1150	return 0;
1151}
1152
1153static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1154{
1155	/* For PF, root node dwrr priority is static */
1156	if (parent->level == NIX_TXSCH_LVL_TL1)
1157		return;
1158
1159	if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1160		parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1161		clear_bit(prio, parent->prio_bmap);
1162	}
1163}
1164
1165static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1166			     struct otx2_nic *pfvf,
1167			     u64 prio)
1168{
1169	struct otx2_qos_node *node;
1170	bool ret = false;
1171
1172	if (parent->child_dwrr_prio == prio)
1173		return true;
1174
1175	mutex_lock(&pfvf->qos.qos_lock);
1176	list_for_each_entry(node, &parent->child_list, list) {
1177		if (prio == node->prio) {
1178			if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1179			    parent->child_dwrr_prio != prio)
1180				continue;
1181
1182			if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1183				netdev_err(pfvf->netdev,
1184					   "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1185					    node->classid, node->quantum,
1186					    node->prio);
1187				break;
1188			}
1189			/* mark old node as dwrr */
1190			node->is_static = false;
1191			parent->child_dwrr_cnt++;
1192			parent->child_static_cnt--;
1193			ret = true;
1194			break;
1195		}
1196	}
1197	mutex_unlock(&pfvf->qos.qos_lock);
1198
1199	return ret;
1200}
1201
1202static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1203				     u32 parent_classid, u64 rate, u64 ceil,
1204				     u64 prio, u32 quantum,
1205				     struct netlink_ext_ack *extack)
1206{
1207	struct otx2_qos_cfg *old_cfg, *new_cfg;
1208	struct otx2_qos_node *node, *parent;
1209	int qid, ret, err;
1210	bool static_cfg;
1211
1212	netdev_dbg(pfvf->netdev,
1213		   "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1214		   classid, parent_classid, rate, ceil, prio, quantum);
1215
1216	if (prio > OTX2_QOS_MAX_PRIO) {
1217		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1218		ret = -EOPNOTSUPP;
1219		goto out;
1220	}
1221
1222	if (!quantum || quantum > INT_MAX) {
1223		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1224		ret = -EOPNOTSUPP;
1225		goto out;
1226	}
1227
1228	/* get parent node */
1229	parent = otx2_sw_node_find(pfvf, parent_classid);
1230	if (!parent) {
1231		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1232		ret = -ENOENT;
1233		goto out;
1234	}
1235	if (parent->level == NIX_TXSCH_LVL_MDQ) {
1236		NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1237		ret = -EOPNOTSUPP;
1238		goto out;
1239	}
1240
1241	static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1242	ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1243					      static_cfg);
1244	if (ret)
1245		goto out;
1246
1247	if (!static_cfg) {
1248		ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1249						 quantum);
1250		if (ret)
1251			goto out;
1252	}
1253
1254	if (static_cfg)
1255		parent->child_static_cnt++;
1256	else
1257		parent->child_dwrr_cnt++;
1258
1259	set_bit(prio, parent->prio_bmap);
1260
1261	/* read current txschq configuration */
1262	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1263	if (!old_cfg) {
1264		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1265		ret = -ENOMEM;
1266		goto reset_prio;
1267	}
1268	otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1269
1270	/* allocate a new sq */
1271	qid = otx2_qos_get_qid(pfvf);
1272	if (qid < 0) {
1273		NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1274		ret = -ENOMEM;
1275		goto free_old_cfg;
1276	}
1277
1278	/* Actual SQ mapping will be updated after SMQ alloc */
1279	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1280
1281	/* allocate and initialize a new child node */
1282	node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1283					    ceil, quantum, qid, static_cfg);
1284	if (IS_ERR(node)) {
1285		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1286		ret = PTR_ERR(node);
1287		goto free_old_cfg;
1288	}
1289
1290	/* push new txschq config to hw */
1291	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1292	if (!new_cfg) {
1293		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1294		ret = -ENOMEM;
1295		goto free_node;
1296	}
1297	ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1298	if (ret) {
1299		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1300		kfree(new_cfg);
1301		otx2_qos_sw_node_delete(pfvf, node);
1302		/* restore the old qos tree */
1303		err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1304		if (err) {
1305			netdev_err(pfvf->netdev,
1306				   "Failed to restore txcshq configuration");
1307			goto free_old_cfg;
1308		}
1309
1310		otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1311		goto free_old_cfg;
1312	}
1313
1314	/* update tx_real_queues */
1315	otx2_qos_update_tx_netdev_queues(pfvf);
1316
1317	/* free new txschq config */
1318	kfree(new_cfg);
1319
1320	/* free old txschq config */
1321	otx2_qos_free_cfg(pfvf, old_cfg);
1322	kfree(old_cfg);
1323
1324	return pfvf->hw.tx_queues + qid;
1325
1326free_node:
1327	otx2_qos_sw_node_delete(pfvf, node);
1328free_old_cfg:
1329	kfree(old_cfg);
1330reset_prio:
1331	if (static_cfg)
1332		parent->child_static_cnt--;
1333	else
1334		parent->child_dwrr_cnt--;
1335
1336	clear_bit(prio, parent->prio_bmap);
1337out:
1338	return ret;
1339}
1340
1341static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1342				  u16 child_classid, u64 rate, u64 ceil, u64 prio,
1343				  u32 quantum, struct netlink_ext_ack *extack)
1344{
1345	struct otx2_qos_cfg *old_cfg, *new_cfg;
1346	struct otx2_qos_node *node, *child;
1347	bool static_cfg;
1348	int ret, err;
1349	u16 qid;
1350
1351	netdev_dbg(pfvf->netdev,
1352		   "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1353		   classid, child_classid, rate, ceil);
1354
1355	if (prio > OTX2_QOS_MAX_PRIO) {
1356		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1357		ret = -EOPNOTSUPP;
1358		goto out;
1359	}
1360
1361	if (!quantum || quantum > INT_MAX) {
1362		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1363		ret = -EOPNOTSUPP;
1364		goto out;
1365	}
1366
1367	/* find node related to classid */
1368	node = otx2_sw_node_find(pfvf, classid);
1369	if (!node) {
1370		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1371		ret = -ENOENT;
1372		goto out;
1373	}
1374	/* check max qos txschq level */
1375	if (node->level == NIX_TXSCH_LVL_MDQ) {
1376		NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1377		ret = -EOPNOTSUPP;
1378		goto out;
1379	}
1380
1381	static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1382	if (!static_cfg) {
1383		ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1384						 quantum);
1385		if (ret)
1386			goto out;
1387	}
1388
1389	if (static_cfg)
1390		node->child_static_cnt++;
1391	else
1392		node->child_dwrr_cnt++;
1393
1394	set_bit(prio, node->prio_bmap);
1395
1396	/* store the qid to assign to leaf node */
1397	qid = node->qid;
1398
1399	/* read current txschq configuration */
1400	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1401	if (!old_cfg) {
1402		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1403		ret = -ENOMEM;
1404		goto reset_prio;
1405	}
1406	otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1407
1408	/* delete the txschq nodes allocated for this node */
1409	otx2_qos_free_sw_node_schq(pfvf, node);
1410
1411	/* mark this node as htb inner node */
1412	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1413
1414	/* allocate and initialize a new child node */
1415	child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1416					     prio, rate, ceil, quantum,
1417					     qid, static_cfg);
1418	if (IS_ERR(child)) {
1419		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1420		ret = PTR_ERR(child);
1421		goto free_old_cfg;
1422	}
1423
1424	/* push new txschq config to hw */
1425	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1426	if (!new_cfg) {
1427		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1428		ret = -ENOMEM;
1429		goto free_node;
1430	}
1431	ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1432	if (ret) {
1433		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1434		kfree(new_cfg);
1435		otx2_qos_sw_node_delete(pfvf, child);
1436		/* restore the old qos tree */
1437		WRITE_ONCE(node->qid, qid);
1438		err = otx2_qos_alloc_txschq_node(pfvf, node);
1439		if (err) {
1440			netdev_err(pfvf->netdev,
1441				   "Failed to restore old leaf node");
1442			goto free_old_cfg;
1443		}
1444		err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1445		if (err) {
1446			netdev_err(pfvf->netdev,
1447				   "Failed to restore txcshq configuration");
1448			goto free_old_cfg;
1449		}
1450		otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1451		goto free_old_cfg;
1452	}
1453
1454	/* free new txschq config */
1455	kfree(new_cfg);
1456
1457	/* free old txschq config */
1458	otx2_qos_free_cfg(pfvf, old_cfg);
1459	kfree(old_cfg);
1460
1461	return 0;
1462
1463free_node:
1464	otx2_qos_sw_node_delete(pfvf, child);
1465free_old_cfg:
1466	kfree(old_cfg);
1467reset_prio:
1468	if (static_cfg)
1469		node->child_static_cnt--;
1470	else
1471		node->child_dwrr_cnt--;
1472	clear_bit(prio, node->prio_bmap);
1473out:
1474	return ret;
1475}
1476
1477static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1478			     struct netlink_ext_ack *extack)
1479{
1480	struct otx2_qos_node *node, *parent;
1481	int dwrr_del_node = false;
1482	u64 prio;
1483	u16 qid;
1484
1485	netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1486
1487	/* find node related to classid */
1488	node = otx2_sw_node_find(pfvf, *classid);
1489	if (!node) {
1490		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1491		return -ENOENT;
1492	}
1493	parent = node->parent;
1494	prio   = node->prio;
1495	qid    = node->qid;
1496
1497	if (!node->is_static)
1498		dwrr_del_node = true;
1499
1500	otx2_qos_disable_sq(pfvf, node->qid);
1501
1502	otx2_qos_destroy_node(pfvf, node);
1503	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1504
1505	if (dwrr_del_node) {
1506		parent->child_dwrr_cnt--;
1507	} else {
1508		parent->child_static_cnt--;
1509		clear_bit(prio, parent->prio_bmap);
1510	}
1511
1512	/* Reset DWRR priority if all dwrr nodes are deleted */
1513	if (!parent->child_dwrr_cnt)
1514		otx2_reset_dwrr_prio(parent, prio);
1515
1516	if (!parent->child_static_cnt)
1517		parent->max_static_prio = 0;
1518
1519	return 0;
1520}
1521
1522static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1523				  struct netlink_ext_ack *extack)
1524{
1525	struct otx2_qos_node *node, *parent;
1526	struct otx2_qos_cfg *new_cfg;
1527	int dwrr_del_node = false;
1528	u64 prio;
1529	int err;
1530	u16 qid;
1531
1532	netdev_dbg(pfvf->netdev,
1533		   "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1534
1535	/* find node related to classid */
1536	node = otx2_sw_node_find(pfvf, classid);
1537	if (!node) {
1538		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1539		return -ENOENT;
1540	}
1541
1542	/* save qid for use by parent */
1543	qid = node->qid;
1544	prio = node->prio;
1545
1546	parent = otx2_sw_node_find(pfvf, node->parent->classid);
1547	if (!parent) {
1548		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1549		return -ENOENT;
1550	}
1551
1552	if (!node->is_static)
1553		dwrr_del_node = true;
1554
1555	/* destroy the leaf node */
1556	otx2_qos_destroy_node(pfvf, node);
1557	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1558
1559	if (dwrr_del_node) {
1560		parent->child_dwrr_cnt--;
1561	} else {
1562		parent->child_static_cnt--;
1563		clear_bit(prio, parent->prio_bmap);
1564	}
1565
1566	/* Reset DWRR priority if all dwrr nodes are deleted */
1567	if (!parent->child_dwrr_cnt)
1568		otx2_reset_dwrr_prio(parent, prio);
1569
1570	if (!parent->child_static_cnt)
1571		parent->max_static_prio = 0;
1572
1573	/* create downstream txschq entries to parent */
1574	err = otx2_qos_alloc_txschq_node(pfvf, parent);
1575	if (err) {
1576		NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1577		return err;
1578	}
1579	WRITE_ONCE(parent->qid, qid);
1580	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1581
1582	/* push new txschq config to hw */
1583	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1584	if (!new_cfg) {
1585		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1586		return -ENOMEM;
1587	}
1588	/* fill txschq cfg and push txschq cfg to hw */
1589	otx2_qos_fill_cfg_schq(parent, new_cfg);
1590	err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1591	if (err) {
1592		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1593		kfree(new_cfg);
1594		return err;
1595	}
1596	kfree(new_cfg);
1597
1598	/* update tx_real_queues */
1599	otx2_qos_update_tx_netdev_queues(pfvf);
1600
1601	return 0;
1602}
1603
1604void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1605{
1606	struct otx2_qos_node *root;
1607
1608	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1609	if (!root)
1610		return;
1611
1612	otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1613}
1614
1615void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1616{
1617	struct otx2_qos_node *root;
1618	int err;
1619
1620	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1621	if (!root)
1622		return;
1623
1624	if (root->level != NIX_TXSCH_LVL_TL1) {
1625		err = otx2_qos_txschq_config(pfvf, root);
1626		if (err) {
1627			netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1628			goto root_destroy;
1629		}
1630	}
1631
1632	err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1633	if (err) {
1634		netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1635		goto root_destroy;
1636	}
1637
1638	otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1639	return;
1640
1641root_destroy:
1642	netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1643	/* Free resources allocated */
1644	otx2_qos_root_destroy(pfvf);
1645}
1646
1647int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1648{
1649	struct otx2_nic *pfvf = netdev_priv(ndev);
1650	int res;
1651
1652	switch (htb->command) {
1653	case TC_HTB_CREATE:
1654		return otx2_qos_root_add(pfvf, htb->parent_classid,
1655					 htb->classid, htb->extack);
1656	case TC_HTB_DESTROY:
1657		return otx2_qos_root_destroy(pfvf);
1658	case TC_HTB_LEAF_ALLOC_QUEUE:
1659		res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1660						htb->parent_classid,
1661						htb->rate, htb->ceil,
1662						htb->prio, htb->quantum,
1663						htb->extack);
1664		if (res < 0)
1665			return res;
1666		htb->qid = res;
1667		return 0;
1668	case TC_HTB_LEAF_TO_INNER:
1669		return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1670					      htb->classid, htb->rate,
1671					      htb->ceil, htb->prio,
1672					      htb->quantum, htb->extack);
1673	case TC_HTB_LEAF_DEL:
1674		return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1675	case TC_HTB_LEAF_DEL_LAST:
1676	case TC_HTB_LEAF_DEL_LAST_FORCE:
1677		return otx2_qos_leaf_del_last(pfvf, htb->classid,
1678				htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1679					      htb->extack);
1680	case TC_HTB_LEAF_QUERY_QUEUE:
1681		res = otx2_get_txq_by_classid(pfvf, htb->classid);
1682		htb->qid = res;
1683		return 0;
1684	case TC_HTB_NODE_MODIFY:
1685		fallthrough;
1686	default:
1687		return -EOPNOTSUPP;
1688	}
1689}
1690