xref: /kernel/linux/linux-6.6/net/sched/sch_generic.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_generic.c	Generic packet scheduler routines.
4 *
5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
7 *              - Ingress support
8 */
9
10#include <linux/bitops.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/rtnetlink.h>
20#include <linux/init.h>
21#include <linux/rcupdate.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/if_vlan.h>
25#include <linux/skb_array.h>
26#include <linux/if_macvlan.h>
27#include <net/sch_generic.h>
28#include <net/pkt_sched.h>
29#include <net/dst.h>
30#include <trace/events/qdisc.h>
31#include <trace/events/net.h>
32#include <net/xfrm.h>
33
34/* Qdisc to use by default */
35const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
36EXPORT_SYMBOL(default_qdisc_ops);
37
38static void qdisc_maybe_clear_missed(struct Qdisc *q,
39				     const struct netdev_queue *txq)
40{
41	clear_bit(__QDISC_STATE_MISSED, &q->state);
42
43	/* Make sure the below netif_xmit_frozen_or_stopped()
44	 * checking happens after clearing STATE_MISSED.
45	 */
46	smp_mb__after_atomic();
47
48	/* Checking netif_xmit_frozen_or_stopped() again to
49	 * make sure STATE_MISSED is set if the STATE_MISSED
50	 * set by netif_tx_wake_queue()'s rescheduling of
51	 * net_tx_action() is cleared by the above clear_bit().
52	 */
53	if (!netif_xmit_frozen_or_stopped(txq))
54		set_bit(__QDISC_STATE_MISSED, &q->state);
55	else
56		set_bit(__QDISC_STATE_DRAINING, &q->state);
57}
58
59/* Main transmission queue. */
60
61/* Modifications to data participating in scheduling must be protected with
62 * qdisc_lock(qdisc) spinlock.
63 *
64 * The idea is the following:
65 * - enqueue, dequeue are serialized via qdisc root lock
66 * - ingress filtering is also serialized via qdisc root lock
67 * - updates to tree and tree walking are only done under the rtnl mutex.
68 */
69
70#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
71
72static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
73{
74	const struct netdev_queue *txq = q->dev_queue;
75	spinlock_t *lock = NULL;
76	struct sk_buff *skb;
77
78	if (q->flags & TCQ_F_NOLOCK) {
79		lock = qdisc_lock(q);
80		spin_lock(lock);
81	}
82
83	skb = skb_peek(&q->skb_bad_txq);
84	if (skb) {
85		/* check the reason of requeuing without tx lock first */
86		txq = skb_get_tx_queue(txq->dev, skb);
87		if (!netif_xmit_frozen_or_stopped(txq)) {
88			skb = __skb_dequeue(&q->skb_bad_txq);
89			if (qdisc_is_percpu_stats(q)) {
90				qdisc_qstats_cpu_backlog_dec(q, skb);
91				qdisc_qstats_cpu_qlen_dec(q);
92			} else {
93				qdisc_qstats_backlog_dec(q, skb);
94				q->q.qlen--;
95			}
96		} else {
97			skb = SKB_XOFF_MAGIC;
98			qdisc_maybe_clear_missed(q, txq);
99		}
100	}
101
102	if (lock)
103		spin_unlock(lock);
104
105	return skb;
106}
107
108static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
109{
110	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
111
112	if (unlikely(skb))
113		skb = __skb_dequeue_bad_txq(q);
114
115	return skb;
116}
117
118static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
119					     struct sk_buff *skb)
120{
121	spinlock_t *lock = NULL;
122
123	if (q->flags & TCQ_F_NOLOCK) {
124		lock = qdisc_lock(q);
125		spin_lock(lock);
126	}
127
128	__skb_queue_tail(&q->skb_bad_txq, skb);
129
130	if (qdisc_is_percpu_stats(q)) {
131		qdisc_qstats_cpu_backlog_inc(q, skb);
132		qdisc_qstats_cpu_qlen_inc(q);
133	} else {
134		qdisc_qstats_backlog_inc(q, skb);
135		q->q.qlen++;
136	}
137
138	if (lock)
139		spin_unlock(lock);
140}
141
142static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
143{
144	spinlock_t *lock = NULL;
145
146	if (q->flags & TCQ_F_NOLOCK) {
147		lock = qdisc_lock(q);
148		spin_lock(lock);
149	}
150
151	while (skb) {
152		struct sk_buff *next = skb->next;
153
154		__skb_queue_tail(&q->gso_skb, skb);
155
156		/* it's still part of the queue */
157		if (qdisc_is_percpu_stats(q)) {
158			qdisc_qstats_cpu_requeues_inc(q);
159			qdisc_qstats_cpu_backlog_inc(q, skb);
160			qdisc_qstats_cpu_qlen_inc(q);
161		} else {
162			q->qstats.requeues++;
163			qdisc_qstats_backlog_inc(q, skb);
164			q->q.qlen++;
165		}
166
167		skb = next;
168	}
169
170	if (lock) {
171		spin_unlock(lock);
172		set_bit(__QDISC_STATE_MISSED, &q->state);
173	} else {
174		__netif_schedule(q);
175	}
176}
177
178static void try_bulk_dequeue_skb(struct Qdisc *q,
179				 struct sk_buff *skb,
180				 const struct netdev_queue *txq,
181				 int *packets)
182{
183	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
184
185	while (bytelimit > 0) {
186		struct sk_buff *nskb = q->dequeue(q);
187
188		if (!nskb)
189			break;
190
191		bytelimit -= nskb->len; /* covers GSO len */
192		skb->next = nskb;
193		skb = nskb;
194		(*packets)++; /* GSO counts as one pkt */
195	}
196	skb_mark_not_on_list(skb);
197}
198
199/* This variant of try_bulk_dequeue_skb() makes sure
200 * all skbs in the chain are for the same txq
201 */
202static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
203				      struct sk_buff *skb,
204				      int *packets)
205{
206	int mapping = skb_get_queue_mapping(skb);
207	struct sk_buff *nskb;
208	int cnt = 0;
209
210	do {
211		nskb = q->dequeue(q);
212		if (!nskb)
213			break;
214		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
215			qdisc_enqueue_skb_bad_txq(q, nskb);
216			break;
217		}
218		skb->next = nskb;
219		skb = nskb;
220	} while (++cnt < 8);
221	(*packets) += cnt;
222	skb_mark_not_on_list(skb);
223}
224
225/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
226 * A requeued skb (via q->gso_skb) can also be a SKB list.
227 */
228static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
229				   int *packets)
230{
231	const struct netdev_queue *txq = q->dev_queue;
232	struct sk_buff *skb = NULL;
233
234	*packets = 1;
235	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
236		spinlock_t *lock = NULL;
237
238		if (q->flags & TCQ_F_NOLOCK) {
239			lock = qdisc_lock(q);
240			spin_lock(lock);
241		}
242
243		skb = skb_peek(&q->gso_skb);
244
245		/* skb may be null if another cpu pulls gso_skb off in between
246		 * empty check and lock.
247		 */
248		if (!skb) {
249			if (lock)
250				spin_unlock(lock);
251			goto validate;
252		}
253
254		/* skb in gso_skb were already validated */
255		*validate = false;
256		if (xfrm_offload(skb))
257			*validate = true;
258		/* check the reason of requeuing without tx lock first */
259		txq = skb_get_tx_queue(txq->dev, skb);
260		if (!netif_xmit_frozen_or_stopped(txq)) {
261			skb = __skb_dequeue(&q->gso_skb);
262			if (qdisc_is_percpu_stats(q)) {
263				qdisc_qstats_cpu_backlog_dec(q, skb);
264				qdisc_qstats_cpu_qlen_dec(q);
265			} else {
266				qdisc_qstats_backlog_dec(q, skb);
267				q->q.qlen--;
268			}
269		} else {
270			skb = NULL;
271			qdisc_maybe_clear_missed(q, txq);
272		}
273		if (lock)
274			spin_unlock(lock);
275		goto trace;
276	}
277validate:
278	*validate = true;
279
280	if ((q->flags & TCQ_F_ONETXQUEUE) &&
281	    netif_xmit_frozen_or_stopped(txq)) {
282		qdisc_maybe_clear_missed(q, txq);
283		return skb;
284	}
285
286	skb = qdisc_dequeue_skb_bad_txq(q);
287	if (unlikely(skb)) {
288		if (skb == SKB_XOFF_MAGIC)
289			return NULL;
290		goto bulk;
291	}
292	skb = q->dequeue(q);
293	if (skb) {
294bulk:
295		if (qdisc_may_bulk(q))
296			try_bulk_dequeue_skb(q, skb, txq, packets);
297		else
298			try_bulk_dequeue_skb_slow(q, skb, packets);
299	}
300trace:
301	trace_qdisc_dequeue(q, txq, *packets, skb);
302	return skb;
303}
304
305/*
306 * Transmit possibly several skbs, and handle the return status as
307 * required. Owning qdisc running bit guarantees that only one CPU
308 * can execute this function.
309 *
310 * Returns to the caller:
311 *				false  - hardware queue frozen backoff
312 *				true   - feel free to send more pkts
313 */
314bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
315		     struct net_device *dev, struct netdev_queue *txq,
316		     spinlock_t *root_lock, bool validate)
317{
318	int ret = NETDEV_TX_BUSY;
319	bool again = false;
320
321	/* And release qdisc */
322	if (root_lock)
323		spin_unlock(root_lock);
324
325	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
326	if (validate)
327		skb = validate_xmit_skb_list(skb, dev, &again);
328
329#ifdef CONFIG_XFRM_OFFLOAD
330	if (unlikely(again)) {
331		if (root_lock)
332			spin_lock(root_lock);
333
334		dev_requeue_skb(skb, q);
335		return false;
336	}
337#endif
338
339	if (likely(skb)) {
340		HARD_TX_LOCK(dev, txq, smp_processor_id());
341		if (!netif_xmit_frozen_or_stopped(txq))
342			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343		else
344			qdisc_maybe_clear_missed(q, txq);
345
346		HARD_TX_UNLOCK(dev, txq);
347	} else {
348		if (root_lock)
349			spin_lock(root_lock);
350		return true;
351	}
352
353	if (root_lock)
354		spin_lock(root_lock);
355
356	if (!dev_xmit_complete(ret)) {
357		/* Driver returned NETDEV_TX_BUSY - requeue skb */
358		if (unlikely(ret != NETDEV_TX_BUSY))
359			net_warn_ratelimited("BUG %s code %d qlen %d\n",
360					     dev->name, ret, q->q.qlen);
361
362		dev_requeue_skb(skb, q);
363		return false;
364	}
365
366	return true;
367}
368
369/*
370 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
371 *
372 * running seqcount guarantees only one CPU can process
373 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
374 * this queue.
375 *
376 *  netif_tx_lock serializes accesses to device driver.
377 *
378 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
379 *  if one is grabbed, another must be free.
380 *
381 * Note, that this procedure can be called by a watchdog timer
382 *
383 * Returns to the caller:
384 *				0  - queue is empty or throttled.
385 *				>0 - queue is not empty.
386 *
387 */
388static inline bool qdisc_restart(struct Qdisc *q, int *packets)
389{
390	spinlock_t *root_lock = NULL;
391	struct netdev_queue *txq;
392	struct net_device *dev;
393	struct sk_buff *skb;
394	bool validate;
395
396	/* Dequeue packet */
397	skb = dequeue_skb(q, &validate, packets);
398	if (unlikely(!skb))
399		return false;
400
401	if (!(q->flags & TCQ_F_NOLOCK))
402		root_lock = qdisc_lock(q);
403
404	dev = qdisc_dev(q);
405	txq = skb_get_tx_queue(dev, skb);
406
407	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
408}
409
410void __qdisc_run(struct Qdisc *q)
411{
412	int quota = READ_ONCE(dev_tx_weight);
413	int packets;
414
415	while (qdisc_restart(q, &packets)) {
416		quota -= packets;
417		if (quota <= 0) {
418			if (q->flags & TCQ_F_NOLOCK)
419				set_bit(__QDISC_STATE_MISSED, &q->state);
420			else
421				__netif_schedule(q);
422
423			break;
424		}
425	}
426}
427
428unsigned long dev_trans_start(struct net_device *dev)
429{
430	unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
431	unsigned long val;
432	unsigned int i;
433
434	for (i = 1; i < dev->num_tx_queues; i++) {
435		val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
436		if (val && time_after(val, res))
437			res = val;
438	}
439
440	return res;
441}
442EXPORT_SYMBOL(dev_trans_start);
443
444static void netif_freeze_queues(struct net_device *dev)
445{
446	unsigned int i;
447	int cpu;
448
449	cpu = smp_processor_id();
450	for (i = 0; i < dev->num_tx_queues; i++) {
451		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
452
453		/* We are the only thread of execution doing a
454		 * freeze, but we have to grab the _xmit_lock in
455		 * order to synchronize with threads which are in
456		 * the ->hard_start_xmit() handler and already
457		 * checked the frozen bit.
458		 */
459		__netif_tx_lock(txq, cpu);
460		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
461		__netif_tx_unlock(txq);
462	}
463}
464
465void netif_tx_lock(struct net_device *dev)
466{
467	spin_lock(&dev->tx_global_lock);
468	netif_freeze_queues(dev);
469}
470EXPORT_SYMBOL(netif_tx_lock);
471
472static void netif_unfreeze_queues(struct net_device *dev)
473{
474	unsigned int i;
475
476	for (i = 0; i < dev->num_tx_queues; i++) {
477		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
478
479		/* No need to grab the _xmit_lock here.  If the
480		 * queue is not stopped for another reason, we
481		 * force a schedule.
482		 */
483		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
484		netif_schedule_queue(txq);
485	}
486}
487
488void netif_tx_unlock(struct net_device *dev)
489{
490	netif_unfreeze_queues(dev);
491	spin_unlock(&dev->tx_global_lock);
492}
493EXPORT_SYMBOL(netif_tx_unlock);
494
495static void dev_watchdog(struct timer_list *t)
496{
497	struct net_device *dev = from_timer(dev, t, watchdog_timer);
498	bool release = true;
499
500	spin_lock(&dev->tx_global_lock);
501	if (!qdisc_tx_is_noop(dev)) {
502		if (netif_device_present(dev) &&
503		    netif_running(dev) &&
504		    netif_carrier_ok(dev)) {
505			unsigned int timedout_ms = 0;
506			unsigned int i;
507			unsigned long trans_start;
508
509			for (i = 0; i < dev->num_tx_queues; i++) {
510				struct netdev_queue *txq;
511
512				txq = netdev_get_tx_queue(dev, i);
513				trans_start = READ_ONCE(txq->trans_start);
514				if (netif_xmit_stopped(txq) &&
515				    time_after(jiffies, (trans_start +
516							 dev->watchdog_timeo))) {
517					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
518					atomic_long_inc(&txq->trans_timeout);
519					break;
520				}
521			}
522
523			if (unlikely(timedout_ms)) {
524				trace_net_dev_xmit_timeout(dev, i);
525				WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n",
526					  dev->name, netdev_drivername(dev), i, timedout_ms);
527				netif_freeze_queues(dev);
528				dev->netdev_ops->ndo_tx_timeout(dev, i);
529				netif_unfreeze_queues(dev);
530			}
531			if (!mod_timer(&dev->watchdog_timer,
532				       round_jiffies(jiffies +
533						     dev->watchdog_timeo)))
534				release = false;
535		}
536	}
537	spin_unlock(&dev->tx_global_lock);
538
539	if (release)
540		netdev_put(dev, &dev->watchdog_dev_tracker);
541}
542
543void __netdev_watchdog_up(struct net_device *dev)
544{
545	if (dev->netdev_ops->ndo_tx_timeout) {
546		if (dev->watchdog_timeo <= 0)
547			dev->watchdog_timeo = 5*HZ;
548		if (!mod_timer(&dev->watchdog_timer,
549			       round_jiffies(jiffies + dev->watchdog_timeo)))
550			netdev_hold(dev, &dev->watchdog_dev_tracker,
551				    GFP_ATOMIC);
552	}
553}
554EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
555
556static void dev_watchdog_up(struct net_device *dev)
557{
558	__netdev_watchdog_up(dev);
559}
560
561static void dev_watchdog_down(struct net_device *dev)
562{
563	netif_tx_lock_bh(dev);
564	if (del_timer(&dev->watchdog_timer))
565		netdev_put(dev, &dev->watchdog_dev_tracker);
566	netif_tx_unlock_bh(dev);
567}
568
569/**
570 *	netif_carrier_on - set carrier
571 *	@dev: network device
572 *
573 * Device has detected acquisition of carrier.
574 */
575void netif_carrier_on(struct net_device *dev)
576{
577	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
578		if (dev->reg_state == NETREG_UNINITIALIZED)
579			return;
580		atomic_inc(&dev->carrier_up_count);
581		linkwatch_fire_event(dev);
582		if (netif_running(dev))
583			__netdev_watchdog_up(dev);
584	}
585}
586EXPORT_SYMBOL(netif_carrier_on);
587
588/**
589 *	netif_carrier_off - clear carrier
590 *	@dev: network device
591 *
592 * Device has detected loss of carrier.
593 */
594void netif_carrier_off(struct net_device *dev)
595{
596	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
597		if (dev->reg_state == NETREG_UNINITIALIZED)
598			return;
599		atomic_inc(&dev->carrier_down_count);
600		linkwatch_fire_event(dev);
601	}
602}
603EXPORT_SYMBOL(netif_carrier_off);
604
605/**
606 *	netif_carrier_event - report carrier state event
607 *	@dev: network device
608 *
609 * Device has detected a carrier event but the carrier state wasn't changed.
610 * Use in drivers when querying carrier state asynchronously, to avoid missing
611 * events (link flaps) if link recovers before it's queried.
612 */
613void netif_carrier_event(struct net_device *dev)
614{
615	if (dev->reg_state == NETREG_UNINITIALIZED)
616		return;
617	atomic_inc(&dev->carrier_up_count);
618	atomic_inc(&dev->carrier_down_count);
619	linkwatch_fire_event(dev);
620}
621EXPORT_SYMBOL_GPL(netif_carrier_event);
622
623/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
624   under all circumstances. It is difficult to invent anything faster or
625   cheaper.
626 */
627
628static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
629			struct sk_buff **to_free)
630{
631	__qdisc_drop(skb, to_free);
632	return NET_XMIT_CN;
633}
634
635static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
636{
637	return NULL;
638}
639
640struct Qdisc_ops noop_qdisc_ops __read_mostly = {
641	.id		=	"noop",
642	.priv_size	=	0,
643	.enqueue	=	noop_enqueue,
644	.dequeue	=	noop_dequeue,
645	.peek		=	noop_dequeue,
646	.owner		=	THIS_MODULE,
647};
648
649static struct netdev_queue noop_netdev_queue = {
650	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
651	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
652};
653
654struct Qdisc noop_qdisc = {
655	.enqueue	=	noop_enqueue,
656	.dequeue	=	noop_dequeue,
657	.flags		=	TCQ_F_BUILTIN,
658	.ops		=	&noop_qdisc_ops,
659	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
660	.dev_queue	=	&noop_netdev_queue,
661	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
662	.gso_skb = {
663		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
664		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
665		.qlen = 0,
666		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
667	},
668	.skb_bad_txq = {
669		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
670		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
671		.qlen = 0,
672		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
673	},
674};
675EXPORT_SYMBOL(noop_qdisc);
676
677static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
678			struct netlink_ext_ack *extack)
679{
680	/* register_qdisc() assigns a default of noop_enqueue if unset,
681	 * but __dev_queue_xmit() treats noqueue only as such
682	 * if this is NULL - so clear it here. */
683	qdisc->enqueue = NULL;
684	return 0;
685}
686
687struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
688	.id		=	"noqueue",
689	.priv_size	=	0,
690	.init		=	noqueue_init,
691	.enqueue	=	noop_enqueue,
692	.dequeue	=	noop_dequeue,
693	.peek		=	noop_dequeue,
694	.owner		=	THIS_MODULE,
695};
696
697static const u8 prio2band[TC_PRIO_MAX + 1] = {
698	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
699};
700
701/* 3-band FIFO queue: old style, but should be a bit faster than
702   generic prio+fifo combination.
703 */
704
705#define PFIFO_FAST_BANDS 3
706
707/*
708 * Private data for a pfifo_fast scheduler containing:
709 *	- rings for priority bands
710 */
711struct pfifo_fast_priv {
712	struct skb_array q[PFIFO_FAST_BANDS];
713};
714
715static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
716					  int band)
717{
718	return &priv->q[band];
719}
720
721static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
722			      struct sk_buff **to_free)
723{
724	int band = prio2band[skb->priority & TC_PRIO_MAX];
725	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
726	struct skb_array *q = band2list(priv, band);
727	unsigned int pkt_len = qdisc_pkt_len(skb);
728	int err;
729
730	err = skb_array_produce(q, skb);
731
732	if (unlikely(err)) {
733		if (qdisc_is_percpu_stats(qdisc))
734			return qdisc_drop_cpu(skb, qdisc, to_free);
735		else
736			return qdisc_drop(skb, qdisc, to_free);
737	}
738
739	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
740	return NET_XMIT_SUCCESS;
741}
742
743static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
744{
745	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
746	struct sk_buff *skb = NULL;
747	bool need_retry = true;
748	int band;
749
750retry:
751	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
752		struct skb_array *q = band2list(priv, band);
753
754		if (__skb_array_empty(q))
755			continue;
756
757		skb = __skb_array_consume(q);
758	}
759	if (likely(skb)) {
760		qdisc_update_stats_at_dequeue(qdisc, skb);
761	} else if (need_retry &&
762		   READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
763		/* Delay clearing the STATE_MISSED here to reduce
764		 * the overhead of the second spin_trylock() in
765		 * qdisc_run_begin() and __netif_schedule() calling
766		 * in qdisc_run_end().
767		 */
768		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
769		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
770
771		/* Make sure dequeuing happens after clearing
772		 * STATE_MISSED.
773		 */
774		smp_mb__after_atomic();
775
776		need_retry = false;
777
778		goto retry;
779	}
780
781	return skb;
782}
783
784static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
785{
786	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
787	struct sk_buff *skb = NULL;
788	int band;
789
790	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
791		struct skb_array *q = band2list(priv, band);
792
793		skb = __skb_array_peek(q);
794	}
795
796	return skb;
797}
798
799static void pfifo_fast_reset(struct Qdisc *qdisc)
800{
801	int i, band;
802	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
803
804	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
805		struct skb_array *q = band2list(priv, band);
806		struct sk_buff *skb;
807
808		/* NULL ring is possible if destroy path is due to a failed
809		 * skb_array_init() in pfifo_fast_init() case.
810		 */
811		if (!q->ring.queue)
812			continue;
813
814		while ((skb = __skb_array_consume(q)) != NULL)
815			kfree_skb(skb);
816	}
817
818	if (qdisc_is_percpu_stats(qdisc)) {
819		for_each_possible_cpu(i) {
820			struct gnet_stats_queue *q;
821
822			q = per_cpu_ptr(qdisc->cpu_qstats, i);
823			q->backlog = 0;
824			q->qlen = 0;
825		}
826	}
827}
828
829static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
830{
831	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
832
833	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
834	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
835		goto nla_put_failure;
836	return skb->len;
837
838nla_put_failure:
839	return -1;
840}
841
842static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
843			   struct netlink_ext_ack *extack)
844{
845	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
846	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
847	int prio;
848
849	/* guard against zero length rings */
850	if (!qlen)
851		return -EINVAL;
852
853	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
854		struct skb_array *q = band2list(priv, prio);
855		int err;
856
857		err = skb_array_init(q, qlen, GFP_KERNEL);
858		if (err)
859			return -ENOMEM;
860	}
861
862	/* Can by-pass the queue discipline */
863	qdisc->flags |= TCQ_F_CAN_BYPASS;
864	return 0;
865}
866
867static void pfifo_fast_destroy(struct Qdisc *sch)
868{
869	struct pfifo_fast_priv *priv = qdisc_priv(sch);
870	int prio;
871
872	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
873		struct skb_array *q = band2list(priv, prio);
874
875		/* NULL ring is possible if destroy path is due to a failed
876		 * skb_array_init() in pfifo_fast_init() case.
877		 */
878		if (!q->ring.queue)
879			continue;
880		/* Destroy ring but no need to kfree_skb because a call to
881		 * pfifo_fast_reset() has already done that work.
882		 */
883		ptr_ring_cleanup(&q->ring, NULL);
884	}
885}
886
887static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
888					  unsigned int new_len)
889{
890	struct pfifo_fast_priv *priv = qdisc_priv(sch);
891	struct skb_array *bands[PFIFO_FAST_BANDS];
892	int prio;
893
894	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
895		struct skb_array *q = band2list(priv, prio);
896
897		bands[prio] = q;
898	}
899
900	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
901					 GFP_KERNEL);
902}
903
904struct Qdisc_ops pfifo_fast_ops __read_mostly = {
905	.id		=	"pfifo_fast",
906	.priv_size	=	sizeof(struct pfifo_fast_priv),
907	.enqueue	=	pfifo_fast_enqueue,
908	.dequeue	=	pfifo_fast_dequeue,
909	.peek		=	pfifo_fast_peek,
910	.init		=	pfifo_fast_init,
911	.destroy	=	pfifo_fast_destroy,
912	.reset		=	pfifo_fast_reset,
913	.dump		=	pfifo_fast_dump,
914	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
915	.owner		=	THIS_MODULE,
916	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
917};
918EXPORT_SYMBOL(pfifo_fast_ops);
919
920static struct lock_class_key qdisc_tx_busylock;
921
922struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
923			  const struct Qdisc_ops *ops,
924			  struct netlink_ext_ack *extack)
925{
926	struct Qdisc *sch;
927	unsigned int size = sizeof(*sch) + ops->priv_size;
928	int err = -ENOBUFS;
929	struct net_device *dev;
930
931	if (!dev_queue) {
932		NL_SET_ERR_MSG(extack, "No device queue given");
933		err = -EINVAL;
934		goto errout;
935	}
936
937	dev = dev_queue->dev;
938	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
939
940	if (!sch)
941		goto errout;
942	__skb_queue_head_init(&sch->gso_skb);
943	__skb_queue_head_init(&sch->skb_bad_txq);
944	gnet_stats_basic_sync_init(&sch->bstats);
945	spin_lock_init(&sch->q.lock);
946
947	if (ops->static_flags & TCQ_F_CPUSTATS) {
948		sch->cpu_bstats =
949			netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
950		if (!sch->cpu_bstats)
951			goto errout1;
952
953		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
954		if (!sch->cpu_qstats) {
955			free_percpu(sch->cpu_bstats);
956			goto errout1;
957		}
958	}
959
960	spin_lock_init(&sch->busylock);
961	lockdep_set_class(&sch->busylock,
962			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
963
964	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
965	spin_lock_init(&sch->seqlock);
966	lockdep_set_class(&sch->seqlock,
967			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
968
969	sch->ops = ops;
970	sch->flags = ops->static_flags;
971	sch->enqueue = ops->enqueue;
972	sch->dequeue = ops->dequeue;
973	sch->dev_queue = dev_queue;
974	netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
975	refcount_set(&sch->refcnt, 1);
976
977	return sch;
978errout1:
979	kfree(sch);
980errout:
981	return ERR_PTR(err);
982}
983
984struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
985				const struct Qdisc_ops *ops,
986				unsigned int parentid,
987				struct netlink_ext_ack *extack)
988{
989	struct Qdisc *sch;
990
991	if (!try_module_get(ops->owner)) {
992		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
993		return NULL;
994	}
995
996	sch = qdisc_alloc(dev_queue, ops, extack);
997	if (IS_ERR(sch)) {
998		module_put(ops->owner);
999		return NULL;
1000	}
1001	sch->parent = parentid;
1002
1003	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1004		trace_qdisc_create(ops, dev_queue->dev, parentid);
1005		return sch;
1006	}
1007
1008	qdisc_put(sch);
1009	return NULL;
1010}
1011EXPORT_SYMBOL(qdisc_create_dflt);
1012
1013/* Under qdisc_lock(qdisc) and BH! */
1014
1015void qdisc_reset(struct Qdisc *qdisc)
1016{
1017	const struct Qdisc_ops *ops = qdisc->ops;
1018
1019	trace_qdisc_reset(qdisc);
1020
1021	if (ops->reset)
1022		ops->reset(qdisc);
1023
1024	__skb_queue_purge(&qdisc->gso_skb);
1025	__skb_queue_purge(&qdisc->skb_bad_txq);
1026
1027	qdisc->q.qlen = 0;
1028	qdisc->qstats.backlog = 0;
1029}
1030EXPORT_SYMBOL(qdisc_reset);
1031
1032void qdisc_free(struct Qdisc *qdisc)
1033{
1034	if (qdisc_is_percpu_stats(qdisc)) {
1035		free_percpu(qdisc->cpu_bstats);
1036		free_percpu(qdisc->cpu_qstats);
1037	}
1038
1039	kfree(qdisc);
1040}
1041
1042static void qdisc_free_cb(struct rcu_head *head)
1043{
1044	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1045
1046	qdisc_free(q);
1047}
1048
1049static void __qdisc_destroy(struct Qdisc *qdisc)
1050{
1051	const struct Qdisc_ops  *ops = qdisc->ops;
1052
1053#ifdef CONFIG_NET_SCHED
1054	qdisc_hash_del(qdisc);
1055
1056	qdisc_put_stab(rtnl_dereference(qdisc->stab));
1057#endif
1058	gen_kill_estimator(&qdisc->rate_est);
1059
1060	qdisc_reset(qdisc);
1061
1062	if (ops->destroy)
1063		ops->destroy(qdisc);
1064
1065	module_put(ops->owner);
1066	netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
1067
1068	trace_qdisc_destroy(qdisc);
1069
1070	call_rcu(&qdisc->rcu, qdisc_free_cb);
1071}
1072
1073void qdisc_destroy(struct Qdisc *qdisc)
1074{
1075	if (qdisc->flags & TCQ_F_BUILTIN)
1076		return;
1077
1078	__qdisc_destroy(qdisc);
1079}
1080
1081void qdisc_put(struct Qdisc *qdisc)
1082{
1083	if (!qdisc)
1084		return;
1085
1086	if (qdisc->flags & TCQ_F_BUILTIN ||
1087	    !refcount_dec_and_test(&qdisc->refcnt))
1088		return;
1089
1090	__qdisc_destroy(qdisc);
1091}
1092EXPORT_SYMBOL(qdisc_put);
1093
1094/* Version of qdisc_put() that is called with rtnl mutex unlocked.
1095 * Intended to be used as optimization, this function only takes rtnl lock if
1096 * qdisc reference counter reached zero.
1097 */
1098
1099void qdisc_put_unlocked(struct Qdisc *qdisc)
1100{
1101	if (qdisc->flags & TCQ_F_BUILTIN ||
1102	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1103		return;
1104
1105	__qdisc_destroy(qdisc);
1106	rtnl_unlock();
1107}
1108EXPORT_SYMBOL(qdisc_put_unlocked);
1109
1110/* Attach toplevel qdisc to device queue. */
1111struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1112			      struct Qdisc *qdisc)
1113{
1114	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1115	spinlock_t *root_lock;
1116
1117	root_lock = qdisc_lock(oqdisc);
1118	spin_lock_bh(root_lock);
1119
1120	/* ... and graft new one */
1121	if (qdisc == NULL)
1122		qdisc = &noop_qdisc;
1123	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1124	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1125
1126	spin_unlock_bh(root_lock);
1127
1128	return oqdisc;
1129}
1130EXPORT_SYMBOL(dev_graft_qdisc);
1131
1132static void shutdown_scheduler_queue(struct net_device *dev,
1133				     struct netdev_queue *dev_queue,
1134				     void *_qdisc_default)
1135{
1136	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1137	struct Qdisc *qdisc_default = _qdisc_default;
1138
1139	if (qdisc) {
1140		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1141		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
1142
1143		qdisc_put(qdisc);
1144	}
1145}
1146
1147static void attach_one_default_qdisc(struct net_device *dev,
1148				     struct netdev_queue *dev_queue,
1149				     void *_unused)
1150{
1151	struct Qdisc *qdisc;
1152	const struct Qdisc_ops *ops = default_qdisc_ops;
1153
1154	if (dev->priv_flags & IFF_NO_QUEUE)
1155		ops = &noqueue_qdisc_ops;
1156	else if(dev->type == ARPHRD_CAN)
1157		ops = &pfifo_fast_ops;
1158
1159	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1160	if (!qdisc)
1161		return;
1162
1163	if (!netif_is_multiqueue(dev))
1164		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1165	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1166}
1167
1168static void attach_default_qdiscs(struct net_device *dev)
1169{
1170	struct netdev_queue *txq;
1171	struct Qdisc *qdisc;
1172
1173	txq = netdev_get_tx_queue(dev, 0);
1174
1175	if (!netif_is_multiqueue(dev) ||
1176	    dev->priv_flags & IFF_NO_QUEUE) {
1177		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1178		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1179		rcu_assign_pointer(dev->qdisc, qdisc);
1180		qdisc_refcount_inc(qdisc);
1181	} else {
1182		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1183		if (qdisc) {
1184			rcu_assign_pointer(dev->qdisc, qdisc);
1185			qdisc->ops->attach(qdisc);
1186		}
1187	}
1188	qdisc = rtnl_dereference(dev->qdisc);
1189
1190	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
1191	if (qdisc == &noop_qdisc) {
1192		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1193			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
1194		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1195		dev->priv_flags |= IFF_NO_QUEUE;
1196		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1197		qdisc = rtnl_dereference(txq->qdisc_sleeping);
1198		rcu_assign_pointer(dev->qdisc, qdisc);
1199		qdisc_refcount_inc(qdisc);
1200		dev->priv_flags ^= IFF_NO_QUEUE;
1201	}
1202
1203#ifdef CONFIG_NET_SCHED
1204	if (qdisc != &noop_qdisc)
1205		qdisc_hash_add(qdisc, false);
1206#endif
1207}
1208
1209static void transition_one_qdisc(struct net_device *dev,
1210				 struct netdev_queue *dev_queue,
1211				 void *_need_watchdog)
1212{
1213	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1214	int *need_watchdog_p = _need_watchdog;
1215
1216	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1217		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1218
1219	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1220	if (need_watchdog_p) {
1221		WRITE_ONCE(dev_queue->trans_start, 0);
1222		*need_watchdog_p = 1;
1223	}
1224}
1225
1226void dev_activate(struct net_device *dev)
1227{
1228	int need_watchdog;
1229
1230	/* No queueing discipline is attached to device;
1231	 * create default one for devices, which need queueing
1232	 * and noqueue_qdisc for virtual interfaces
1233	 */
1234
1235	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1236		attach_default_qdiscs(dev);
1237
1238	if (!netif_carrier_ok(dev))
1239		/* Delay activation until next carrier-on event */
1240		return;
1241
1242	need_watchdog = 0;
1243	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1244	if (dev_ingress_queue(dev))
1245		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1246
1247	if (need_watchdog) {
1248		netif_trans_update(dev);
1249		dev_watchdog_up(dev);
1250	}
1251}
1252EXPORT_SYMBOL(dev_activate);
1253
1254static void qdisc_deactivate(struct Qdisc *qdisc)
1255{
1256	if (qdisc->flags & TCQ_F_BUILTIN)
1257		return;
1258
1259	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1260}
1261
1262static void dev_deactivate_queue(struct net_device *dev,
1263				 struct netdev_queue *dev_queue,
1264				 void *_qdisc_default)
1265{
1266	struct Qdisc *qdisc_default = _qdisc_default;
1267	struct Qdisc *qdisc;
1268
1269	qdisc = rtnl_dereference(dev_queue->qdisc);
1270	if (qdisc) {
1271		qdisc_deactivate(qdisc);
1272		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1273	}
1274}
1275
1276static void dev_reset_queue(struct net_device *dev,
1277			    struct netdev_queue *dev_queue,
1278			    void *_unused)
1279{
1280	struct Qdisc *qdisc;
1281	bool nolock;
1282
1283	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1284	if (!qdisc)
1285		return;
1286
1287	nolock = qdisc->flags & TCQ_F_NOLOCK;
1288
1289	if (nolock)
1290		spin_lock_bh(&qdisc->seqlock);
1291	spin_lock_bh(qdisc_lock(qdisc));
1292
1293	qdisc_reset(qdisc);
1294
1295	spin_unlock_bh(qdisc_lock(qdisc));
1296	if (nolock) {
1297		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1298		clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1299		spin_unlock_bh(&qdisc->seqlock);
1300	}
1301}
1302
1303static bool some_qdisc_is_busy(struct net_device *dev)
1304{
1305	unsigned int i;
1306
1307	for (i = 0; i < dev->num_tx_queues; i++) {
1308		struct netdev_queue *dev_queue;
1309		spinlock_t *root_lock;
1310		struct Qdisc *q;
1311		int val;
1312
1313		dev_queue = netdev_get_tx_queue(dev, i);
1314		q = rtnl_dereference(dev_queue->qdisc_sleeping);
1315
1316		root_lock = qdisc_lock(q);
1317		spin_lock_bh(root_lock);
1318
1319		val = (qdisc_is_running(q) ||
1320		       test_bit(__QDISC_STATE_SCHED, &q->state));
1321
1322		spin_unlock_bh(root_lock);
1323
1324		if (val)
1325			return true;
1326	}
1327	return false;
1328}
1329
1330/**
1331 * 	dev_deactivate_many - deactivate transmissions on several devices
1332 * 	@head: list of devices to deactivate
1333 *
1334 *	This function returns only when all outstanding transmissions
1335 *	have completed, unless all devices are in dismantle phase.
1336 */
1337void dev_deactivate_many(struct list_head *head)
1338{
1339	struct net_device *dev;
1340
1341	list_for_each_entry(dev, head, close_list) {
1342		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1343					 &noop_qdisc);
1344		if (dev_ingress_queue(dev))
1345			dev_deactivate_queue(dev, dev_ingress_queue(dev),
1346					     &noop_qdisc);
1347
1348		dev_watchdog_down(dev);
1349	}
1350
1351	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
1352	 * outstanding qdisc enqueuing calls.
1353	 * This is avoided if all devices are in dismantle phase :
1354	 * Caller will call synchronize_net() for us
1355	 */
1356	synchronize_net();
1357
1358	list_for_each_entry(dev, head, close_list) {
1359		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1360
1361		if (dev_ingress_queue(dev))
1362			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1363	}
1364
1365	/* Wait for outstanding qdisc_run calls. */
1366	list_for_each_entry(dev, head, close_list) {
1367		while (some_qdisc_is_busy(dev)) {
1368			/* wait_event() would avoid this sleep-loop but would
1369			 * require expensive checks in the fast paths of packet
1370			 * processing which isn't worth it.
1371			 */
1372			schedule_timeout_uninterruptible(1);
1373		}
1374	}
1375}
1376
1377void dev_deactivate(struct net_device *dev)
1378{
1379	LIST_HEAD(single);
1380
1381	list_add(&dev->close_list, &single);
1382	dev_deactivate_many(&single);
1383	list_del(&single);
1384}
1385EXPORT_SYMBOL(dev_deactivate);
1386
1387static int qdisc_change_tx_queue_len(struct net_device *dev,
1388				     struct netdev_queue *dev_queue)
1389{
1390	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1391	const struct Qdisc_ops *ops = qdisc->ops;
1392
1393	if (ops->change_tx_queue_len)
1394		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1395	return 0;
1396}
1397
1398void dev_qdisc_change_real_num_tx(struct net_device *dev,
1399				  unsigned int new_real_tx)
1400{
1401	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1402
1403	if (qdisc->ops->change_real_num_tx)
1404		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1405}
1406
1407void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1408{
1409#ifdef CONFIG_NET_SCHED
1410	struct net_device *dev = qdisc_dev(sch);
1411	struct Qdisc *qdisc;
1412	unsigned int i;
1413
1414	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1415		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1416		/* Only update the default qdiscs we created,
1417		 * qdiscs with handles are always hashed.
1418		 */
1419		if (qdisc != &noop_qdisc && !qdisc->handle)
1420			qdisc_hash_del(qdisc);
1421	}
1422	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1423		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
1424		if (qdisc != &noop_qdisc && !qdisc->handle)
1425			qdisc_hash_add(qdisc, false);
1426	}
1427#endif
1428}
1429EXPORT_SYMBOL(mq_change_real_num_tx);
1430
1431int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1432{
1433	bool up = dev->flags & IFF_UP;
1434	unsigned int i;
1435	int ret = 0;
1436
1437	if (up)
1438		dev_deactivate(dev);
1439
1440	for (i = 0; i < dev->num_tx_queues; i++) {
1441		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1442
1443		/* TODO: revert changes on a partial failure */
1444		if (ret)
1445			break;
1446	}
1447
1448	if (up)
1449		dev_activate(dev);
1450	return ret;
1451}
1452
1453static void dev_init_scheduler_queue(struct net_device *dev,
1454				     struct netdev_queue *dev_queue,
1455				     void *_qdisc)
1456{
1457	struct Qdisc *qdisc = _qdisc;
1458
1459	rcu_assign_pointer(dev_queue->qdisc, qdisc);
1460	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
1461}
1462
1463void dev_init_scheduler(struct net_device *dev)
1464{
1465	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1466	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1467	if (dev_ingress_queue(dev))
1468		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1469
1470	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1471}
1472
1473void dev_shutdown(struct net_device *dev)
1474{
1475	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1476	if (dev_ingress_queue(dev))
1477		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1478	qdisc_put(rtnl_dereference(dev->qdisc));
1479	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1480
1481	WARN_ON(timer_pending(&dev->watchdog_timer));
1482}
1483
1484/**
1485 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1486 * @rate:   Rate to compute reciprocal division values of
1487 * @mult:   Multiplier for reciprocal division
1488 * @shift:  Shift for reciprocal division
1489 *
1490 * The multiplier and shift for reciprocal division by rate are stored
1491 * in mult and shift.
1492 *
1493 * The deal here is to replace a divide by a reciprocal one
1494 * in fast path (a reciprocal divide is a multiply and a shift)
1495 *
1496 * Normal formula would be :
1497 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1498 *
1499 * We compute mult/shift to use instead :
1500 *  time_in_ns = (len * mult) >> shift;
1501 *
1502 * We try to get the highest possible mult value for accuracy,
1503 * but have to make sure no overflows will ever happen.
1504 *
1505 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1506 */
1507static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1508{
1509	u64 factor = NSEC_PER_SEC;
1510
1511	*mult = 1;
1512	*shift = 0;
1513
1514	if (rate <= 0)
1515		return;
1516
1517	for (;;) {
1518		*mult = div64_u64(factor, rate);
1519		if (*mult & (1U << 31) || factor & (1ULL << 63))
1520			break;
1521		factor <<= 1;
1522		(*shift)++;
1523	}
1524}
1525
1526void psched_ratecfg_precompute(struct psched_ratecfg *r,
1527			       const struct tc_ratespec *conf,
1528			       u64 rate64)
1529{
1530	memset(r, 0, sizeof(*r));
1531	r->overhead = conf->overhead;
1532	r->mpu = conf->mpu;
1533	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1534	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1535	psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1536}
1537EXPORT_SYMBOL(psched_ratecfg_precompute);
1538
1539void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1540{
1541	r->rate_pkts_ps = pktrate64;
1542	psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1543}
1544EXPORT_SYMBOL(psched_ppscfg_precompute);
1545
1546void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1547			  struct tcf_proto *tp_head)
1548{
1549	/* Protected with chain0->filter_chain_lock.
1550	 * Can't access chain directly because tp_head can be NULL.
1551	 */
1552	struct mini_Qdisc *miniq_old =
1553		rcu_dereference_protected(*miniqp->p_miniq, 1);
1554	struct mini_Qdisc *miniq;
1555
1556	if (!tp_head) {
1557		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1558	} else {
1559		miniq = miniq_old != &miniqp->miniq1 ?
1560			&miniqp->miniq1 : &miniqp->miniq2;
1561
1562		/* We need to make sure that readers won't see the miniq
1563		 * we are about to modify. So ensure that at least one RCU
1564		 * grace period has elapsed since the miniq was made
1565		 * inactive.
1566		 */
1567		if (IS_ENABLED(CONFIG_PREEMPT_RT))
1568			cond_synchronize_rcu(miniq->rcu_state);
1569		else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1570			synchronize_rcu_expedited();
1571
1572		miniq->filter_list = tp_head;
1573		rcu_assign_pointer(*miniqp->p_miniq, miniq);
1574	}
1575
1576	if (miniq_old)
1577		/* This is counterpart of the rcu sync above. We need to
1578		 * block potential new user of miniq_old until all readers
1579		 * are not seeing it.
1580		 */
1581		miniq_old->rcu_state = start_poll_synchronize_rcu();
1582}
1583EXPORT_SYMBOL(mini_qdisc_pair_swap);
1584
1585void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1586				struct tcf_block *block)
1587{
1588	miniqp->miniq1.block = block;
1589	miniqp->miniq2.block = block;
1590}
1591EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1592
1593void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1594			  struct mini_Qdisc __rcu **p_miniq)
1595{
1596	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1597	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1598	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1599	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1600	miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1601	miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1602	miniqp->p_miniq = p_miniq;
1603}
1604EXPORT_SYMBOL(mini_qdisc_pair_init);
1605