xref: /kernel/linux/linux-5.10/net/sched/sch_drr.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_drr.c         Deficit Round Robin scheduler
4 *
5 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/netdevice.h>
13#include <linux/pkt_sched.h>
14#include <net/sch_generic.h>
15#include <net/pkt_sched.h>
16#include <net/pkt_cls.h>
17
18struct drr_class {
19	struct Qdisc_class_common	common;
20	unsigned int			filter_cnt;
21
22	struct gnet_stats_basic_packed		bstats;
23	struct gnet_stats_queue		qstats;
24	struct net_rate_estimator __rcu *rate_est;
25	struct list_head		alist;
26	struct Qdisc			*qdisc;
27
28	u32				quantum;
29	u32				deficit;
30};
31
32struct drr_sched {
33	struct list_head		active;
34	struct tcf_proto __rcu		*filter_list;
35	struct tcf_block		*block;
36	struct Qdisc_class_hash		clhash;
37};
38
39static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
40{
41	struct drr_sched *q = qdisc_priv(sch);
42	struct Qdisc_class_common *clc;
43
44	clc = qdisc_class_find(&q->clhash, classid);
45	if (clc == NULL)
46		return NULL;
47	return container_of(clc, struct drr_class, common);
48}
49
50static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
51	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
52};
53
54static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
55			    struct nlattr **tca, unsigned long *arg,
56			    struct netlink_ext_ack *extack)
57{
58	struct drr_sched *q = qdisc_priv(sch);
59	struct drr_class *cl = (struct drr_class *)*arg;
60	struct nlattr *opt = tca[TCA_OPTIONS];
61	struct nlattr *tb[TCA_DRR_MAX + 1];
62	u32 quantum;
63	int err;
64
65	if (!opt) {
66		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
67		return -EINVAL;
68	}
69
70	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
71					  extack);
72	if (err < 0)
73		return err;
74
75	if (tb[TCA_DRR_QUANTUM]) {
76		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
77		if (quantum == 0) {
78			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
79			return -EINVAL;
80		}
81	} else
82		quantum = psched_mtu(qdisc_dev(sch));
83
84	if (cl != NULL) {
85		if (tca[TCA_RATE]) {
86			err = gen_replace_estimator(&cl->bstats, NULL,
87						    &cl->rate_est,
88						    NULL,
89						    qdisc_root_sleeping_running(sch),
90						    tca[TCA_RATE]);
91			if (err) {
92				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
93				return err;
94			}
95		}
96
97		sch_tree_lock(sch);
98		if (tb[TCA_DRR_QUANTUM])
99			cl->quantum = quantum;
100		sch_tree_unlock(sch);
101
102		return 0;
103	}
104
105	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
106	if (cl == NULL)
107		return -ENOBUFS;
108
109	cl->common.classid = classid;
110	cl->quantum	   = quantum;
111	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
112					       &pfifo_qdisc_ops, classid,
113					       NULL);
114	if (cl->qdisc == NULL)
115		cl->qdisc = &noop_qdisc;
116	else
117		qdisc_hash_add(cl->qdisc, true);
118
119	if (tca[TCA_RATE]) {
120		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
121					    NULL,
122					    qdisc_root_sleeping_running(sch),
123					    tca[TCA_RATE]);
124		if (err) {
125			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
126			qdisc_put(cl->qdisc);
127			kfree(cl);
128			return err;
129		}
130	}
131
132	sch_tree_lock(sch);
133	qdisc_class_hash_insert(&q->clhash, &cl->common);
134	sch_tree_unlock(sch);
135
136	qdisc_class_hash_grow(sch, &q->clhash);
137
138	*arg = (unsigned long)cl;
139	return 0;
140}
141
142static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
143{
144	gen_kill_estimator(&cl->rate_est);
145	qdisc_put(cl->qdisc);
146	kfree(cl);
147}
148
149static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
150{
151	struct drr_sched *q = qdisc_priv(sch);
152	struct drr_class *cl = (struct drr_class *)arg;
153
154	if (cl->filter_cnt > 0)
155		return -EBUSY;
156
157	sch_tree_lock(sch);
158
159	qdisc_purge_queue(cl->qdisc);
160	qdisc_class_hash_remove(&q->clhash, &cl->common);
161
162	sch_tree_unlock(sch);
163
164	drr_destroy_class(sch, cl);
165	return 0;
166}
167
168static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
169{
170	return (unsigned long)drr_find_class(sch, classid);
171}
172
173static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
174				       struct netlink_ext_ack *extack)
175{
176	struct drr_sched *q = qdisc_priv(sch);
177
178	if (cl) {
179		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
180		return NULL;
181	}
182
183	return q->block;
184}
185
186static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
187				  u32 classid)
188{
189	struct drr_class *cl = drr_find_class(sch, classid);
190
191	if (cl != NULL)
192		cl->filter_cnt++;
193
194	return (unsigned long)cl;
195}
196
197static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
198{
199	struct drr_class *cl = (struct drr_class *)arg;
200
201	cl->filter_cnt--;
202}
203
204static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
205			   struct Qdisc *new, struct Qdisc **old,
206			   struct netlink_ext_ack *extack)
207{
208	struct drr_class *cl = (struct drr_class *)arg;
209
210	if (new == NULL) {
211		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
212					cl->common.classid, NULL);
213		if (new == NULL)
214			new = &noop_qdisc;
215	}
216
217	*old = qdisc_replace(sch, new, &cl->qdisc);
218	return 0;
219}
220
221static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
222{
223	struct drr_class *cl = (struct drr_class *)arg;
224
225	return cl->qdisc;
226}
227
228static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
229{
230	struct drr_class *cl = (struct drr_class *)arg;
231
232	list_del(&cl->alist);
233}
234
235static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
236			  struct sk_buff *skb, struct tcmsg *tcm)
237{
238	struct drr_class *cl = (struct drr_class *)arg;
239	struct nlattr *nest;
240
241	tcm->tcm_parent	= TC_H_ROOT;
242	tcm->tcm_handle	= cl->common.classid;
243	tcm->tcm_info	= cl->qdisc->handle;
244
245	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
246	if (nest == NULL)
247		goto nla_put_failure;
248	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
249		goto nla_put_failure;
250	return nla_nest_end(skb, nest);
251
252nla_put_failure:
253	nla_nest_cancel(skb, nest);
254	return -EMSGSIZE;
255}
256
257static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
258				struct gnet_dump *d)
259{
260	struct drr_class *cl = (struct drr_class *)arg;
261	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
262	struct Qdisc *cl_q = cl->qdisc;
263	struct tc_drr_stats xstats;
264
265	memset(&xstats, 0, sizeof(xstats));
266	if (qlen)
267		xstats.deficit = cl->deficit;
268
269	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
270				  d, NULL, &cl->bstats) < 0 ||
271	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
272	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
273		return -1;
274
275	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
276}
277
278static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
279{
280	struct drr_sched *q = qdisc_priv(sch);
281	struct drr_class *cl;
282	unsigned int i;
283
284	if (arg->stop)
285		return;
286
287	for (i = 0; i < q->clhash.hashsize; i++) {
288		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
289			if (arg->count < arg->skip) {
290				arg->count++;
291				continue;
292			}
293			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
294				arg->stop = 1;
295				return;
296			}
297			arg->count++;
298		}
299	}
300}
301
302static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
303				      int *qerr)
304{
305	struct drr_sched *q = qdisc_priv(sch);
306	struct drr_class *cl;
307	struct tcf_result res;
308	struct tcf_proto *fl;
309	int result;
310
311	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
312		cl = drr_find_class(sch, skb->priority);
313		if (cl != NULL)
314			return cl;
315	}
316
317	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
318	fl = rcu_dereference_bh(q->filter_list);
319	result = tcf_classify(skb, fl, &res, false);
320	if (result >= 0) {
321#ifdef CONFIG_NET_CLS_ACT
322		switch (result) {
323		case TC_ACT_QUEUED:
324		case TC_ACT_STOLEN:
325		case TC_ACT_TRAP:
326			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
327			fallthrough;
328		case TC_ACT_SHOT:
329			return NULL;
330		}
331#endif
332		cl = (struct drr_class *)res.class;
333		if (cl == NULL)
334			cl = drr_find_class(sch, res.classid);
335		return cl;
336	}
337	return NULL;
338}
339
340static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
341		       struct sk_buff **to_free)
342{
343	unsigned int len = qdisc_pkt_len(skb);
344	struct drr_sched *q = qdisc_priv(sch);
345	struct drr_class *cl;
346	int err = 0;
347	bool first;
348
349	cl = drr_classify(skb, sch, &err);
350	if (cl == NULL) {
351		if (err & __NET_XMIT_BYPASS)
352			qdisc_qstats_drop(sch);
353		__qdisc_drop(skb, to_free);
354		return err;
355	}
356
357	first = !cl->qdisc->q.qlen;
358	err = qdisc_enqueue(skb, cl->qdisc, to_free);
359	if (unlikely(err != NET_XMIT_SUCCESS)) {
360		if (net_xmit_drop_count(err)) {
361			cl->qstats.drops++;
362			qdisc_qstats_drop(sch);
363		}
364		return err;
365	}
366
367	if (first) {
368		list_add_tail(&cl->alist, &q->active);
369		cl->deficit = cl->quantum;
370	}
371
372	sch->qstats.backlog += len;
373	sch->q.qlen++;
374	return err;
375}
376
377static struct sk_buff *drr_dequeue(struct Qdisc *sch)
378{
379	struct drr_sched *q = qdisc_priv(sch);
380	struct drr_class *cl;
381	struct sk_buff *skb;
382	unsigned int len;
383
384	if (list_empty(&q->active))
385		goto out;
386	while (1) {
387		cl = list_first_entry(&q->active, struct drr_class, alist);
388		skb = cl->qdisc->ops->peek(cl->qdisc);
389		if (skb == NULL) {
390			qdisc_warn_nonwc(__func__, cl->qdisc);
391			goto out;
392		}
393
394		len = qdisc_pkt_len(skb);
395		if (len <= cl->deficit) {
396			cl->deficit -= len;
397			skb = qdisc_dequeue_peeked(cl->qdisc);
398			if (unlikely(skb == NULL))
399				goto out;
400			if (cl->qdisc->q.qlen == 0)
401				list_del(&cl->alist);
402
403			bstats_update(&cl->bstats, skb);
404			qdisc_bstats_update(sch, skb);
405			qdisc_qstats_backlog_dec(sch, skb);
406			sch->q.qlen--;
407			return skb;
408		}
409
410		cl->deficit += cl->quantum;
411		list_move_tail(&cl->alist, &q->active);
412	}
413out:
414	return NULL;
415}
416
417static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
418			  struct netlink_ext_ack *extack)
419{
420	struct drr_sched *q = qdisc_priv(sch);
421	int err;
422
423	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
424	if (err)
425		return err;
426	err = qdisc_class_hash_init(&q->clhash);
427	if (err < 0)
428		return err;
429	INIT_LIST_HEAD(&q->active);
430	return 0;
431}
432
433static void drr_reset_qdisc(struct Qdisc *sch)
434{
435	struct drr_sched *q = qdisc_priv(sch);
436	struct drr_class *cl;
437	unsigned int i;
438
439	for (i = 0; i < q->clhash.hashsize; i++) {
440		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
441			if (cl->qdisc->q.qlen)
442				list_del(&cl->alist);
443			qdisc_reset(cl->qdisc);
444		}
445	}
446}
447
448static void drr_destroy_qdisc(struct Qdisc *sch)
449{
450	struct drr_sched *q = qdisc_priv(sch);
451	struct drr_class *cl;
452	struct hlist_node *next;
453	unsigned int i;
454
455	tcf_block_put(q->block);
456
457	for (i = 0; i < q->clhash.hashsize; i++) {
458		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
459					  common.hnode)
460			drr_destroy_class(sch, cl);
461	}
462	qdisc_class_hash_destroy(&q->clhash);
463}
464
465static const struct Qdisc_class_ops drr_class_ops = {
466	.change		= drr_change_class,
467	.delete		= drr_delete_class,
468	.find		= drr_search_class,
469	.tcf_block	= drr_tcf_block,
470	.bind_tcf	= drr_bind_tcf,
471	.unbind_tcf	= drr_unbind_tcf,
472	.graft		= drr_graft_class,
473	.leaf		= drr_class_leaf,
474	.qlen_notify	= drr_qlen_notify,
475	.dump		= drr_dump_class,
476	.dump_stats	= drr_dump_class_stats,
477	.walk		= drr_walk,
478};
479
480static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
481	.cl_ops		= &drr_class_ops,
482	.id		= "drr",
483	.priv_size	= sizeof(struct drr_sched),
484	.enqueue	= drr_enqueue,
485	.dequeue	= drr_dequeue,
486	.peek		= qdisc_peek_dequeued,
487	.init		= drr_init_qdisc,
488	.reset		= drr_reset_qdisc,
489	.destroy	= drr_destroy_qdisc,
490	.owner		= THIS_MODULE,
491};
492
493static int __init drr_init(void)
494{
495	return register_qdisc(&drr_qdisc_ops);
496}
497
498static void __exit drr_exit(void)
499{
500	unregister_qdisc(&drr_qdisc_ops);
501}
502
503module_init(drr_init);
504module_exit(drr_exit);
505MODULE_LICENSE("GPL");
506