18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * net/sched/sch_generic.c	Generic packet scheduler routines.
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
68c2ecf20Sopenharmony_ci *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
78c2ecf20Sopenharmony_ci *              - Ingress support
88c2ecf20Sopenharmony_ci */
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci#include <linux/bitops.h>
118c2ecf20Sopenharmony_ci#include <linux/module.h>
128c2ecf20Sopenharmony_ci#include <linux/types.h>
138c2ecf20Sopenharmony_ci#include <linux/kernel.h>
148c2ecf20Sopenharmony_ci#include <linux/sched.h>
158c2ecf20Sopenharmony_ci#include <linux/string.h>
168c2ecf20Sopenharmony_ci#include <linux/errno.h>
178c2ecf20Sopenharmony_ci#include <linux/netdevice.h>
188c2ecf20Sopenharmony_ci#include <linux/skbuff.h>
198c2ecf20Sopenharmony_ci#include <linux/rtnetlink.h>
208c2ecf20Sopenharmony_ci#include <linux/init.h>
218c2ecf20Sopenharmony_ci#include <linux/rcupdate.h>
228c2ecf20Sopenharmony_ci#include <linux/list.h>
238c2ecf20Sopenharmony_ci#include <linux/slab.h>
248c2ecf20Sopenharmony_ci#include <linux/if_vlan.h>
258c2ecf20Sopenharmony_ci#include <linux/skb_array.h>
268c2ecf20Sopenharmony_ci#include <linux/if_macvlan.h>
278c2ecf20Sopenharmony_ci#include <net/sch_generic.h>
288c2ecf20Sopenharmony_ci#include <net/pkt_sched.h>
298c2ecf20Sopenharmony_ci#include <net/dst.h>
308c2ecf20Sopenharmony_ci#include <trace/events/qdisc.h>
318c2ecf20Sopenharmony_ci#include <trace/events/net.h>
328c2ecf20Sopenharmony_ci#include <net/xfrm.h>
338c2ecf20Sopenharmony_ci
348c2ecf20Sopenharmony_ci/* Qdisc to use by default */
358c2ecf20Sopenharmony_ciconst struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
368c2ecf20Sopenharmony_ciEXPORT_SYMBOL(default_qdisc_ops);
378c2ecf20Sopenharmony_ci
388c2ecf20Sopenharmony_cistatic void qdisc_maybe_clear_missed(struct Qdisc *q,
398c2ecf20Sopenharmony_ci				     const struct netdev_queue *txq)
408c2ecf20Sopenharmony_ci{
418c2ecf20Sopenharmony_ci	clear_bit(__QDISC_STATE_MISSED, &q->state);
428c2ecf20Sopenharmony_ci
438c2ecf20Sopenharmony_ci	/* Make sure the below netif_xmit_frozen_or_stopped()
448c2ecf20Sopenharmony_ci	 * checking happens after clearing STATE_MISSED.
458c2ecf20Sopenharmony_ci	 */
468c2ecf20Sopenharmony_ci	smp_mb__after_atomic();
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci	/* Checking netif_xmit_frozen_or_stopped() again to
498c2ecf20Sopenharmony_ci	 * make sure STATE_MISSED is set if the STATE_MISSED
508c2ecf20Sopenharmony_ci	 * set by netif_tx_wake_queue()'s rescheduling of
518c2ecf20Sopenharmony_ci	 * net_tx_action() is cleared by the above clear_bit().
528c2ecf20Sopenharmony_ci	 */
538c2ecf20Sopenharmony_ci	if (!netif_xmit_frozen_or_stopped(txq))
548c2ecf20Sopenharmony_ci		set_bit(__QDISC_STATE_MISSED, &q->state);
558c2ecf20Sopenharmony_ci}
568c2ecf20Sopenharmony_ci
578c2ecf20Sopenharmony_ci/* Main transmission queue. */
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ci/* Modifications to data participating in scheduling must be protected with
608c2ecf20Sopenharmony_ci * qdisc_lock(qdisc) spinlock.
618c2ecf20Sopenharmony_ci *
628c2ecf20Sopenharmony_ci * The idea is the following:
638c2ecf20Sopenharmony_ci * - enqueue, dequeue are serialized via qdisc root lock
648c2ecf20Sopenharmony_ci * - ingress filtering is also serialized via qdisc root lock
658c2ecf20Sopenharmony_ci * - updates to tree and tree walking are only done under the rtnl mutex.
668c2ecf20Sopenharmony_ci */
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_cistatic inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
718c2ecf20Sopenharmony_ci{
728c2ecf20Sopenharmony_ci	const struct netdev_queue *txq = q->dev_queue;
738c2ecf20Sopenharmony_ci	spinlock_t *lock = NULL;
748c2ecf20Sopenharmony_ci	struct sk_buff *skb;
758c2ecf20Sopenharmony_ci
768c2ecf20Sopenharmony_ci	if (q->flags & TCQ_F_NOLOCK) {
778c2ecf20Sopenharmony_ci		lock = qdisc_lock(q);
788c2ecf20Sopenharmony_ci		spin_lock(lock);
798c2ecf20Sopenharmony_ci	}
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ci	skb = skb_peek(&q->skb_bad_txq);
828c2ecf20Sopenharmony_ci	if (skb) {
838c2ecf20Sopenharmony_ci		/* check the reason of requeuing without tx lock first */
848c2ecf20Sopenharmony_ci		txq = skb_get_tx_queue(txq->dev, skb);
858c2ecf20Sopenharmony_ci		if (!netif_xmit_frozen_or_stopped(txq)) {
868c2ecf20Sopenharmony_ci			skb = __skb_dequeue(&q->skb_bad_txq);
878c2ecf20Sopenharmony_ci			if (qdisc_is_percpu_stats(q)) {
888c2ecf20Sopenharmony_ci				qdisc_qstats_cpu_backlog_dec(q, skb);
898c2ecf20Sopenharmony_ci				qdisc_qstats_cpu_qlen_dec(q);
908c2ecf20Sopenharmony_ci			} else {
918c2ecf20Sopenharmony_ci				qdisc_qstats_backlog_dec(q, skb);
928c2ecf20Sopenharmony_ci				q->q.qlen--;
938c2ecf20Sopenharmony_ci			}
948c2ecf20Sopenharmony_ci		} else {
958c2ecf20Sopenharmony_ci			skb = SKB_XOFF_MAGIC;
968c2ecf20Sopenharmony_ci			qdisc_maybe_clear_missed(q, txq);
978c2ecf20Sopenharmony_ci		}
988c2ecf20Sopenharmony_ci	}
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_ci	if (lock)
1018c2ecf20Sopenharmony_ci		spin_unlock(lock);
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci	return skb;
1048c2ecf20Sopenharmony_ci}
1058c2ecf20Sopenharmony_ci
1068c2ecf20Sopenharmony_cistatic inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
1078c2ecf20Sopenharmony_ci{
1088c2ecf20Sopenharmony_ci	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
1098c2ecf20Sopenharmony_ci
1108c2ecf20Sopenharmony_ci	if (unlikely(skb))
1118c2ecf20Sopenharmony_ci		skb = __skb_dequeue_bad_txq(q);
1128c2ecf20Sopenharmony_ci
1138c2ecf20Sopenharmony_ci	return skb;
1148c2ecf20Sopenharmony_ci}
1158c2ecf20Sopenharmony_ci
1168c2ecf20Sopenharmony_cistatic inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
1178c2ecf20Sopenharmony_ci					     struct sk_buff *skb)
1188c2ecf20Sopenharmony_ci{
1198c2ecf20Sopenharmony_ci	spinlock_t *lock = NULL;
1208c2ecf20Sopenharmony_ci
1218c2ecf20Sopenharmony_ci	if (q->flags & TCQ_F_NOLOCK) {
1228c2ecf20Sopenharmony_ci		lock = qdisc_lock(q);
1238c2ecf20Sopenharmony_ci		spin_lock(lock);
1248c2ecf20Sopenharmony_ci	}
1258c2ecf20Sopenharmony_ci
1268c2ecf20Sopenharmony_ci	__skb_queue_tail(&q->skb_bad_txq, skb);
1278c2ecf20Sopenharmony_ci
1288c2ecf20Sopenharmony_ci	if (qdisc_is_percpu_stats(q)) {
1298c2ecf20Sopenharmony_ci		qdisc_qstats_cpu_backlog_inc(q, skb);
1308c2ecf20Sopenharmony_ci		qdisc_qstats_cpu_qlen_inc(q);
1318c2ecf20Sopenharmony_ci	} else {
1328c2ecf20Sopenharmony_ci		qdisc_qstats_backlog_inc(q, skb);
1338c2ecf20Sopenharmony_ci		q->q.qlen++;
1348c2ecf20Sopenharmony_ci	}
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci	if (lock)
1378c2ecf20Sopenharmony_ci		spin_unlock(lock);
1388c2ecf20Sopenharmony_ci}
1398c2ecf20Sopenharmony_ci
1408c2ecf20Sopenharmony_cistatic inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
1418c2ecf20Sopenharmony_ci{
1428c2ecf20Sopenharmony_ci	spinlock_t *lock = NULL;
1438c2ecf20Sopenharmony_ci
1448c2ecf20Sopenharmony_ci	if (q->flags & TCQ_F_NOLOCK) {
1458c2ecf20Sopenharmony_ci		lock = qdisc_lock(q);
1468c2ecf20Sopenharmony_ci		spin_lock(lock);
1478c2ecf20Sopenharmony_ci	}
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci	while (skb) {
1508c2ecf20Sopenharmony_ci		struct sk_buff *next = skb->next;
1518c2ecf20Sopenharmony_ci
1528c2ecf20Sopenharmony_ci		__skb_queue_tail(&q->gso_skb, skb);
1538c2ecf20Sopenharmony_ci
1548c2ecf20Sopenharmony_ci		/* it's still part of the queue */
1558c2ecf20Sopenharmony_ci		if (qdisc_is_percpu_stats(q)) {
1568c2ecf20Sopenharmony_ci			qdisc_qstats_cpu_requeues_inc(q);
1578c2ecf20Sopenharmony_ci			qdisc_qstats_cpu_backlog_inc(q, skb);
1588c2ecf20Sopenharmony_ci			qdisc_qstats_cpu_qlen_inc(q);
1598c2ecf20Sopenharmony_ci		} else {
1608c2ecf20Sopenharmony_ci			q->qstats.requeues++;
1618c2ecf20Sopenharmony_ci			qdisc_qstats_backlog_inc(q, skb);
1628c2ecf20Sopenharmony_ci			q->q.qlen++;
1638c2ecf20Sopenharmony_ci		}
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_ci		skb = next;
1668c2ecf20Sopenharmony_ci	}
1678c2ecf20Sopenharmony_ci	if (lock)
1688c2ecf20Sopenharmony_ci		spin_unlock(lock);
1698c2ecf20Sopenharmony_ci	__netif_schedule(q);
1708c2ecf20Sopenharmony_ci}
1718c2ecf20Sopenharmony_ci
1728c2ecf20Sopenharmony_cistatic void try_bulk_dequeue_skb(struct Qdisc *q,
1738c2ecf20Sopenharmony_ci				 struct sk_buff *skb,
1748c2ecf20Sopenharmony_ci				 const struct netdev_queue *txq,
1758c2ecf20Sopenharmony_ci				 int *packets)
1768c2ecf20Sopenharmony_ci{
1778c2ecf20Sopenharmony_ci	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_ci	while (bytelimit > 0) {
1808c2ecf20Sopenharmony_ci		struct sk_buff *nskb = q->dequeue(q);
1818c2ecf20Sopenharmony_ci
1828c2ecf20Sopenharmony_ci		if (!nskb)
1838c2ecf20Sopenharmony_ci			break;
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci		bytelimit -= nskb->len; /* covers GSO len */
1868c2ecf20Sopenharmony_ci		skb->next = nskb;
1878c2ecf20Sopenharmony_ci		skb = nskb;
1888c2ecf20Sopenharmony_ci		(*packets)++; /* GSO counts as one pkt */
1898c2ecf20Sopenharmony_ci	}
1908c2ecf20Sopenharmony_ci	skb_mark_not_on_list(skb);
1918c2ecf20Sopenharmony_ci}
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci/* This variant of try_bulk_dequeue_skb() makes sure
1948c2ecf20Sopenharmony_ci * all skbs in the chain are for the same txq
1958c2ecf20Sopenharmony_ci */
1968c2ecf20Sopenharmony_cistatic void try_bulk_dequeue_skb_slow(struct Qdisc *q,
1978c2ecf20Sopenharmony_ci				      struct sk_buff *skb,
1988c2ecf20Sopenharmony_ci				      int *packets)
1998c2ecf20Sopenharmony_ci{
2008c2ecf20Sopenharmony_ci	int mapping = skb_get_queue_mapping(skb);
2018c2ecf20Sopenharmony_ci	struct sk_buff *nskb;
2028c2ecf20Sopenharmony_ci	int cnt = 0;
2038c2ecf20Sopenharmony_ci
2048c2ecf20Sopenharmony_ci	do {
2058c2ecf20Sopenharmony_ci		nskb = q->dequeue(q);
2068c2ecf20Sopenharmony_ci		if (!nskb)
2078c2ecf20Sopenharmony_ci			break;
2088c2ecf20Sopenharmony_ci		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
2098c2ecf20Sopenharmony_ci			qdisc_enqueue_skb_bad_txq(q, nskb);
2108c2ecf20Sopenharmony_ci			break;
2118c2ecf20Sopenharmony_ci		}
2128c2ecf20Sopenharmony_ci		skb->next = nskb;
2138c2ecf20Sopenharmony_ci		skb = nskb;
2148c2ecf20Sopenharmony_ci	} while (++cnt < 8);
2158c2ecf20Sopenharmony_ci	(*packets) += cnt;
2168c2ecf20Sopenharmony_ci	skb_mark_not_on_list(skb);
2178c2ecf20Sopenharmony_ci}
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
2208c2ecf20Sopenharmony_ci * A requeued skb (via q->gso_skb) can also be a SKB list.
2218c2ecf20Sopenharmony_ci */
2228c2ecf20Sopenharmony_cistatic struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
2238c2ecf20Sopenharmony_ci				   int *packets)
2248c2ecf20Sopenharmony_ci{
2258c2ecf20Sopenharmony_ci	const struct netdev_queue *txq = q->dev_queue;
2268c2ecf20Sopenharmony_ci	struct sk_buff *skb = NULL;
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_ci	*packets = 1;
2298c2ecf20Sopenharmony_ci	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
2308c2ecf20Sopenharmony_ci		spinlock_t *lock = NULL;
2318c2ecf20Sopenharmony_ci
2328c2ecf20Sopenharmony_ci		if (q->flags & TCQ_F_NOLOCK) {
2338c2ecf20Sopenharmony_ci			lock = qdisc_lock(q);
2348c2ecf20Sopenharmony_ci			spin_lock(lock);
2358c2ecf20Sopenharmony_ci		}
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_ci		skb = skb_peek(&q->gso_skb);
2388c2ecf20Sopenharmony_ci
2398c2ecf20Sopenharmony_ci		/* skb may be null if another cpu pulls gso_skb off in between
2408c2ecf20Sopenharmony_ci		 * empty check and lock.
2418c2ecf20Sopenharmony_ci		 */
2428c2ecf20Sopenharmony_ci		if (!skb) {
2438c2ecf20Sopenharmony_ci			if (lock)
2448c2ecf20Sopenharmony_ci				spin_unlock(lock);
2458c2ecf20Sopenharmony_ci			goto validate;
2468c2ecf20Sopenharmony_ci		}
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci		/* skb in gso_skb were already validated */
2498c2ecf20Sopenharmony_ci		*validate = false;
2508c2ecf20Sopenharmony_ci		if (xfrm_offload(skb))
2518c2ecf20Sopenharmony_ci			*validate = true;
2528c2ecf20Sopenharmony_ci		/* check the reason of requeuing without tx lock first */
2538c2ecf20Sopenharmony_ci		txq = skb_get_tx_queue(txq->dev, skb);
2548c2ecf20Sopenharmony_ci		if (!netif_xmit_frozen_or_stopped(txq)) {
2558c2ecf20Sopenharmony_ci			skb = __skb_dequeue(&q->gso_skb);
2568c2ecf20Sopenharmony_ci			if (qdisc_is_percpu_stats(q)) {
2578c2ecf20Sopenharmony_ci				qdisc_qstats_cpu_backlog_dec(q, skb);
2588c2ecf20Sopenharmony_ci				qdisc_qstats_cpu_qlen_dec(q);
2598c2ecf20Sopenharmony_ci			} else {
2608c2ecf20Sopenharmony_ci				qdisc_qstats_backlog_dec(q, skb);
2618c2ecf20Sopenharmony_ci				q->q.qlen--;
2628c2ecf20Sopenharmony_ci			}
2638c2ecf20Sopenharmony_ci		} else {
2648c2ecf20Sopenharmony_ci			skb = NULL;
2658c2ecf20Sopenharmony_ci			qdisc_maybe_clear_missed(q, txq);
2668c2ecf20Sopenharmony_ci		}
2678c2ecf20Sopenharmony_ci		if (lock)
2688c2ecf20Sopenharmony_ci			spin_unlock(lock);
2698c2ecf20Sopenharmony_ci		goto trace;
2708c2ecf20Sopenharmony_ci	}
2718c2ecf20Sopenharmony_civalidate:
2728c2ecf20Sopenharmony_ci	*validate = true;
2738c2ecf20Sopenharmony_ci
2748c2ecf20Sopenharmony_ci	if ((q->flags & TCQ_F_ONETXQUEUE) &&
2758c2ecf20Sopenharmony_ci	    netif_xmit_frozen_or_stopped(txq)) {
2768c2ecf20Sopenharmony_ci		qdisc_maybe_clear_missed(q, txq);
2778c2ecf20Sopenharmony_ci		return skb;
2788c2ecf20Sopenharmony_ci	}
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci	skb = qdisc_dequeue_skb_bad_txq(q);
2818c2ecf20Sopenharmony_ci	if (unlikely(skb)) {
2828c2ecf20Sopenharmony_ci		if (skb == SKB_XOFF_MAGIC)
2838c2ecf20Sopenharmony_ci			return NULL;
2848c2ecf20Sopenharmony_ci		goto bulk;
2858c2ecf20Sopenharmony_ci	}
2868c2ecf20Sopenharmony_ci	skb = q->dequeue(q);
2878c2ecf20Sopenharmony_ci	if (skb) {
2888c2ecf20Sopenharmony_cibulk:
2898c2ecf20Sopenharmony_ci		if (qdisc_may_bulk(q))
2908c2ecf20Sopenharmony_ci			try_bulk_dequeue_skb(q, skb, txq, packets);
2918c2ecf20Sopenharmony_ci		else
2928c2ecf20Sopenharmony_ci			try_bulk_dequeue_skb_slow(q, skb, packets);
2938c2ecf20Sopenharmony_ci	}
2948c2ecf20Sopenharmony_citrace:
2958c2ecf20Sopenharmony_ci	trace_qdisc_dequeue(q, txq, *packets, skb);
2968c2ecf20Sopenharmony_ci	return skb;
2978c2ecf20Sopenharmony_ci}
2988c2ecf20Sopenharmony_ci
2998c2ecf20Sopenharmony_ci/*
3008c2ecf20Sopenharmony_ci * Transmit possibly several skbs, and handle the return status as
3018c2ecf20Sopenharmony_ci * required. Owning running seqcount bit guarantees that
3028c2ecf20Sopenharmony_ci * only one CPU can execute this function.
3038c2ecf20Sopenharmony_ci *
3048c2ecf20Sopenharmony_ci * Returns to the caller:
3058c2ecf20Sopenharmony_ci *				false  - hardware queue frozen backoff
3068c2ecf20Sopenharmony_ci *				true   - feel free to send more pkts
3078c2ecf20Sopenharmony_ci */
3088c2ecf20Sopenharmony_cibool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
3098c2ecf20Sopenharmony_ci		     struct net_device *dev, struct netdev_queue *txq,
3108c2ecf20Sopenharmony_ci		     spinlock_t *root_lock, bool validate)
3118c2ecf20Sopenharmony_ci{
3128c2ecf20Sopenharmony_ci	int ret = NETDEV_TX_BUSY;
3138c2ecf20Sopenharmony_ci	bool again = false;
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci	/* And release qdisc */
3168c2ecf20Sopenharmony_ci	if (root_lock)
3178c2ecf20Sopenharmony_ci		spin_unlock(root_lock);
3188c2ecf20Sopenharmony_ci
3198c2ecf20Sopenharmony_ci	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
3208c2ecf20Sopenharmony_ci	if (validate)
3218c2ecf20Sopenharmony_ci		skb = validate_xmit_skb_list(skb, dev, &again);
3228c2ecf20Sopenharmony_ci
3238c2ecf20Sopenharmony_ci#ifdef CONFIG_XFRM_OFFLOAD
3248c2ecf20Sopenharmony_ci	if (unlikely(again)) {
3258c2ecf20Sopenharmony_ci		if (root_lock)
3268c2ecf20Sopenharmony_ci			spin_lock(root_lock);
3278c2ecf20Sopenharmony_ci
3288c2ecf20Sopenharmony_ci		dev_requeue_skb(skb, q);
3298c2ecf20Sopenharmony_ci		return false;
3308c2ecf20Sopenharmony_ci	}
3318c2ecf20Sopenharmony_ci#endif
3328c2ecf20Sopenharmony_ci
3338c2ecf20Sopenharmony_ci	if (likely(skb)) {
3348c2ecf20Sopenharmony_ci		HARD_TX_LOCK(dev, txq, smp_processor_id());
3358c2ecf20Sopenharmony_ci		if (!netif_xmit_frozen_or_stopped(txq))
3368c2ecf20Sopenharmony_ci			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
3378c2ecf20Sopenharmony_ci		else
3388c2ecf20Sopenharmony_ci			qdisc_maybe_clear_missed(q, txq);
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci		HARD_TX_UNLOCK(dev, txq);
3418c2ecf20Sopenharmony_ci	} else {
3428c2ecf20Sopenharmony_ci		if (root_lock)
3438c2ecf20Sopenharmony_ci			spin_lock(root_lock);
3448c2ecf20Sopenharmony_ci		return true;
3458c2ecf20Sopenharmony_ci	}
3468c2ecf20Sopenharmony_ci
3478c2ecf20Sopenharmony_ci	if (root_lock)
3488c2ecf20Sopenharmony_ci		spin_lock(root_lock);
3498c2ecf20Sopenharmony_ci
3508c2ecf20Sopenharmony_ci	if (!dev_xmit_complete(ret)) {
3518c2ecf20Sopenharmony_ci		/* Driver returned NETDEV_TX_BUSY - requeue skb */
3528c2ecf20Sopenharmony_ci		if (unlikely(ret != NETDEV_TX_BUSY))
3538c2ecf20Sopenharmony_ci			net_warn_ratelimited("BUG %s code %d qlen %d\n",
3548c2ecf20Sopenharmony_ci					     dev->name, ret, q->q.qlen);
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci		dev_requeue_skb(skb, q);
3578c2ecf20Sopenharmony_ci		return false;
3588c2ecf20Sopenharmony_ci	}
3598c2ecf20Sopenharmony_ci
3608c2ecf20Sopenharmony_ci	return true;
3618c2ecf20Sopenharmony_ci}
3628c2ecf20Sopenharmony_ci
3638c2ecf20Sopenharmony_ci/*
3648c2ecf20Sopenharmony_ci * NOTE: Called under qdisc_lock(q) with locally disabled BH.
3658c2ecf20Sopenharmony_ci *
3668c2ecf20Sopenharmony_ci * running seqcount guarantees only one CPU can process
3678c2ecf20Sopenharmony_ci * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
3688c2ecf20Sopenharmony_ci * this queue.
3698c2ecf20Sopenharmony_ci *
3708c2ecf20Sopenharmony_ci *  netif_tx_lock serializes accesses to device driver.
3718c2ecf20Sopenharmony_ci *
3728c2ecf20Sopenharmony_ci *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
3738c2ecf20Sopenharmony_ci *  if one is grabbed, another must be free.
3748c2ecf20Sopenharmony_ci *
3758c2ecf20Sopenharmony_ci * Note, that this procedure can be called by a watchdog timer
3768c2ecf20Sopenharmony_ci *
3778c2ecf20Sopenharmony_ci * Returns to the caller:
3788c2ecf20Sopenharmony_ci *				0  - queue is empty or throttled.
3798c2ecf20Sopenharmony_ci *				>0 - queue is not empty.
3808c2ecf20Sopenharmony_ci *
3818c2ecf20Sopenharmony_ci */
3828c2ecf20Sopenharmony_cistatic inline bool qdisc_restart(struct Qdisc *q, int *packets)
3838c2ecf20Sopenharmony_ci{
3848c2ecf20Sopenharmony_ci	spinlock_t *root_lock = NULL;
3858c2ecf20Sopenharmony_ci	struct netdev_queue *txq;
3868c2ecf20Sopenharmony_ci	struct net_device *dev;
3878c2ecf20Sopenharmony_ci	struct sk_buff *skb;
3888c2ecf20Sopenharmony_ci	bool validate;
3898c2ecf20Sopenharmony_ci
3908c2ecf20Sopenharmony_ci	/* Dequeue packet */
3918c2ecf20Sopenharmony_ci	skb = dequeue_skb(q, &validate, packets);
3928c2ecf20Sopenharmony_ci	if (unlikely(!skb))
3938c2ecf20Sopenharmony_ci		return false;
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci	if (!(q->flags & TCQ_F_NOLOCK))
3968c2ecf20Sopenharmony_ci		root_lock = qdisc_lock(q);
3978c2ecf20Sopenharmony_ci
3988c2ecf20Sopenharmony_ci	dev = qdisc_dev(q);
3998c2ecf20Sopenharmony_ci	txq = skb_get_tx_queue(dev, skb);
4008c2ecf20Sopenharmony_ci
4018c2ecf20Sopenharmony_ci	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
4028c2ecf20Sopenharmony_ci}
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_civoid __qdisc_run(struct Qdisc *q)
4058c2ecf20Sopenharmony_ci{
4068c2ecf20Sopenharmony_ci	int quota = READ_ONCE(dev_tx_weight);
4078c2ecf20Sopenharmony_ci	int packets;
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci	while (qdisc_restart(q, &packets)) {
4108c2ecf20Sopenharmony_ci		quota -= packets;
4118c2ecf20Sopenharmony_ci		if (quota <= 0) {
4128c2ecf20Sopenharmony_ci			__netif_schedule(q);
4138c2ecf20Sopenharmony_ci			break;
4148c2ecf20Sopenharmony_ci		}
4158c2ecf20Sopenharmony_ci	}
4168c2ecf20Sopenharmony_ci}
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ciunsigned long dev_trans_start(struct net_device *dev)
4198c2ecf20Sopenharmony_ci{
4208c2ecf20Sopenharmony_ci	unsigned long val, res;
4218c2ecf20Sopenharmony_ci	unsigned int i;
4228c2ecf20Sopenharmony_ci
4238c2ecf20Sopenharmony_ci	if (is_vlan_dev(dev))
4248c2ecf20Sopenharmony_ci		dev = vlan_dev_real_dev(dev);
4258c2ecf20Sopenharmony_ci	else if (netif_is_macvlan(dev))
4268c2ecf20Sopenharmony_ci		dev = macvlan_dev_real_dev(dev);
4278c2ecf20Sopenharmony_ci	res = netdev_get_tx_queue(dev, 0)->trans_start;
4288c2ecf20Sopenharmony_ci	for (i = 1; i < dev->num_tx_queues; i++) {
4298c2ecf20Sopenharmony_ci		val = netdev_get_tx_queue(dev, i)->trans_start;
4308c2ecf20Sopenharmony_ci		if (val && time_after(val, res))
4318c2ecf20Sopenharmony_ci			res = val;
4328c2ecf20Sopenharmony_ci	}
4338c2ecf20Sopenharmony_ci
4348c2ecf20Sopenharmony_ci	return res;
4358c2ecf20Sopenharmony_ci}
4368c2ecf20Sopenharmony_ciEXPORT_SYMBOL(dev_trans_start);
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_cistatic void dev_watchdog(struct timer_list *t)
4398c2ecf20Sopenharmony_ci{
4408c2ecf20Sopenharmony_ci	struct net_device *dev = from_timer(dev, t, watchdog_timer);
4418c2ecf20Sopenharmony_ci
4428c2ecf20Sopenharmony_ci	netif_tx_lock(dev);
4438c2ecf20Sopenharmony_ci	if (!qdisc_tx_is_noop(dev)) {
4448c2ecf20Sopenharmony_ci		if (netif_device_present(dev) &&
4458c2ecf20Sopenharmony_ci		    netif_running(dev) &&
4468c2ecf20Sopenharmony_ci		    netif_carrier_ok(dev)) {
4478c2ecf20Sopenharmony_ci			int some_queue_timedout = 0;
4488c2ecf20Sopenharmony_ci			unsigned int i;
4498c2ecf20Sopenharmony_ci			unsigned long trans_start;
4508c2ecf20Sopenharmony_ci
4518c2ecf20Sopenharmony_ci			for (i = 0; i < dev->num_tx_queues; i++) {
4528c2ecf20Sopenharmony_ci				struct netdev_queue *txq;
4538c2ecf20Sopenharmony_ci
4548c2ecf20Sopenharmony_ci				txq = netdev_get_tx_queue(dev, i);
4558c2ecf20Sopenharmony_ci				trans_start = txq->trans_start;
4568c2ecf20Sopenharmony_ci				if (netif_xmit_stopped(txq) &&
4578c2ecf20Sopenharmony_ci				    time_after(jiffies, (trans_start +
4588c2ecf20Sopenharmony_ci							 dev->watchdog_timeo))) {
4598c2ecf20Sopenharmony_ci					some_queue_timedout = 1;
4608c2ecf20Sopenharmony_ci					txq->trans_timeout++;
4618c2ecf20Sopenharmony_ci					break;
4628c2ecf20Sopenharmony_ci				}
4638c2ecf20Sopenharmony_ci			}
4648c2ecf20Sopenharmony_ci
4658c2ecf20Sopenharmony_ci			if (some_queue_timedout) {
4668c2ecf20Sopenharmony_ci				trace_net_dev_xmit_timeout(dev, i);
4678c2ecf20Sopenharmony_ci				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
4688c2ecf20Sopenharmony_ci				       dev->name, netdev_drivername(dev), i);
4698c2ecf20Sopenharmony_ci				dev->netdev_ops->ndo_tx_timeout(dev, i);
4708c2ecf20Sopenharmony_ci			}
4718c2ecf20Sopenharmony_ci			if (!mod_timer(&dev->watchdog_timer,
4728c2ecf20Sopenharmony_ci				       round_jiffies(jiffies +
4738c2ecf20Sopenharmony_ci						     dev->watchdog_timeo)))
4748c2ecf20Sopenharmony_ci				dev_hold(dev);
4758c2ecf20Sopenharmony_ci		}
4768c2ecf20Sopenharmony_ci	}
4778c2ecf20Sopenharmony_ci	netif_tx_unlock(dev);
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_ci	dev_put(dev);
4808c2ecf20Sopenharmony_ci}
4818c2ecf20Sopenharmony_ci
4828c2ecf20Sopenharmony_civoid __netdev_watchdog_up(struct net_device *dev)
4838c2ecf20Sopenharmony_ci{
4848c2ecf20Sopenharmony_ci	if (dev->netdev_ops->ndo_tx_timeout) {
4858c2ecf20Sopenharmony_ci		if (dev->watchdog_timeo <= 0)
4868c2ecf20Sopenharmony_ci			dev->watchdog_timeo = 5*HZ;
4878c2ecf20Sopenharmony_ci		if (!mod_timer(&dev->watchdog_timer,
4888c2ecf20Sopenharmony_ci			       round_jiffies(jiffies + dev->watchdog_timeo)))
4898c2ecf20Sopenharmony_ci			dev_hold(dev);
4908c2ecf20Sopenharmony_ci	}
4918c2ecf20Sopenharmony_ci}
4928c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__netdev_watchdog_up);
4938c2ecf20Sopenharmony_ci
4948c2ecf20Sopenharmony_cistatic void dev_watchdog_up(struct net_device *dev)
4958c2ecf20Sopenharmony_ci{
4968c2ecf20Sopenharmony_ci	__netdev_watchdog_up(dev);
4978c2ecf20Sopenharmony_ci}
4988c2ecf20Sopenharmony_ci
4998c2ecf20Sopenharmony_cistatic void dev_watchdog_down(struct net_device *dev)
5008c2ecf20Sopenharmony_ci{
5018c2ecf20Sopenharmony_ci	netif_tx_lock_bh(dev);
5028c2ecf20Sopenharmony_ci	if (del_timer(&dev->watchdog_timer))
5038c2ecf20Sopenharmony_ci		dev_put(dev);
5048c2ecf20Sopenharmony_ci	netif_tx_unlock_bh(dev);
5058c2ecf20Sopenharmony_ci}
5068c2ecf20Sopenharmony_ci
5078c2ecf20Sopenharmony_ci/**
5088c2ecf20Sopenharmony_ci *	netif_carrier_on - set carrier
5098c2ecf20Sopenharmony_ci *	@dev: network device
5108c2ecf20Sopenharmony_ci *
5118c2ecf20Sopenharmony_ci * Device has detected acquisition of carrier.
5128c2ecf20Sopenharmony_ci */
5138c2ecf20Sopenharmony_civoid netif_carrier_on(struct net_device *dev)
5148c2ecf20Sopenharmony_ci{
5158c2ecf20Sopenharmony_ci	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
5168c2ecf20Sopenharmony_ci		if (dev->reg_state == NETREG_UNINITIALIZED)
5178c2ecf20Sopenharmony_ci			return;
5188c2ecf20Sopenharmony_ci		atomic_inc(&dev->carrier_up_count);
5198c2ecf20Sopenharmony_ci		linkwatch_fire_event(dev);
5208c2ecf20Sopenharmony_ci		if (netif_running(dev))
5218c2ecf20Sopenharmony_ci			__netdev_watchdog_up(dev);
5228c2ecf20Sopenharmony_ci	}
5238c2ecf20Sopenharmony_ci}
5248c2ecf20Sopenharmony_ciEXPORT_SYMBOL(netif_carrier_on);
5258c2ecf20Sopenharmony_ci
5268c2ecf20Sopenharmony_ci/**
5278c2ecf20Sopenharmony_ci *	netif_carrier_off - clear carrier
5288c2ecf20Sopenharmony_ci *	@dev: network device
5298c2ecf20Sopenharmony_ci *
5308c2ecf20Sopenharmony_ci * Device has detected loss of carrier.
5318c2ecf20Sopenharmony_ci */
5328c2ecf20Sopenharmony_civoid netif_carrier_off(struct net_device *dev)
5338c2ecf20Sopenharmony_ci{
5348c2ecf20Sopenharmony_ci	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
5358c2ecf20Sopenharmony_ci		if (dev->reg_state == NETREG_UNINITIALIZED)
5368c2ecf20Sopenharmony_ci			return;
5378c2ecf20Sopenharmony_ci		atomic_inc(&dev->carrier_down_count);
5388c2ecf20Sopenharmony_ci		linkwatch_fire_event(dev);
5398c2ecf20Sopenharmony_ci	}
5408c2ecf20Sopenharmony_ci}
5418c2ecf20Sopenharmony_ciEXPORT_SYMBOL(netif_carrier_off);
5428c2ecf20Sopenharmony_ci
5438c2ecf20Sopenharmony_ci/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
5448c2ecf20Sopenharmony_ci   under all circumstances. It is difficult to invent anything faster or
5458c2ecf20Sopenharmony_ci   cheaper.
5468c2ecf20Sopenharmony_ci */
5478c2ecf20Sopenharmony_ci
5488c2ecf20Sopenharmony_cistatic int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
5498c2ecf20Sopenharmony_ci			struct sk_buff **to_free)
5508c2ecf20Sopenharmony_ci{
5518c2ecf20Sopenharmony_ci	__qdisc_drop(skb, to_free);
5528c2ecf20Sopenharmony_ci	return NET_XMIT_CN;
5538c2ecf20Sopenharmony_ci}
5548c2ecf20Sopenharmony_ci
5558c2ecf20Sopenharmony_cistatic struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
5568c2ecf20Sopenharmony_ci{
5578c2ecf20Sopenharmony_ci	return NULL;
5588c2ecf20Sopenharmony_ci}
5598c2ecf20Sopenharmony_ci
5608c2ecf20Sopenharmony_cistruct Qdisc_ops noop_qdisc_ops __read_mostly = {
5618c2ecf20Sopenharmony_ci	.id		=	"noop",
5628c2ecf20Sopenharmony_ci	.priv_size	=	0,
5638c2ecf20Sopenharmony_ci	.enqueue	=	noop_enqueue,
5648c2ecf20Sopenharmony_ci	.dequeue	=	noop_dequeue,
5658c2ecf20Sopenharmony_ci	.peek		=	noop_dequeue,
5668c2ecf20Sopenharmony_ci	.owner		=	THIS_MODULE,
5678c2ecf20Sopenharmony_ci};
5688c2ecf20Sopenharmony_ci
5698c2ecf20Sopenharmony_cistatic struct netdev_queue noop_netdev_queue = {
5708c2ecf20Sopenharmony_ci	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
5718c2ecf20Sopenharmony_ci	.qdisc_sleeping	=	&noop_qdisc,
5728c2ecf20Sopenharmony_ci};
5738c2ecf20Sopenharmony_ci
5748c2ecf20Sopenharmony_cistruct Qdisc noop_qdisc = {
5758c2ecf20Sopenharmony_ci	.enqueue	=	noop_enqueue,
5768c2ecf20Sopenharmony_ci	.dequeue	=	noop_dequeue,
5778c2ecf20Sopenharmony_ci	.flags		=	TCQ_F_BUILTIN,
5788c2ecf20Sopenharmony_ci	.ops		=	&noop_qdisc_ops,
5798c2ecf20Sopenharmony_ci	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
5808c2ecf20Sopenharmony_ci	.dev_queue	=	&noop_netdev_queue,
5818c2ecf20Sopenharmony_ci	.running	=	SEQCNT_ZERO(noop_qdisc.running),
5828c2ecf20Sopenharmony_ci	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
5838c2ecf20Sopenharmony_ci	.gso_skb = {
5848c2ecf20Sopenharmony_ci		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
5858c2ecf20Sopenharmony_ci		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
5868c2ecf20Sopenharmony_ci		.qlen = 0,
5878c2ecf20Sopenharmony_ci		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
5888c2ecf20Sopenharmony_ci	},
5898c2ecf20Sopenharmony_ci	.skb_bad_txq = {
5908c2ecf20Sopenharmony_ci		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
5918c2ecf20Sopenharmony_ci		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
5928c2ecf20Sopenharmony_ci		.qlen = 0,
5938c2ecf20Sopenharmony_ci		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
5948c2ecf20Sopenharmony_ci	},
5958c2ecf20Sopenharmony_ci};
5968c2ecf20Sopenharmony_ciEXPORT_SYMBOL(noop_qdisc);
5978c2ecf20Sopenharmony_ci
5988c2ecf20Sopenharmony_cistatic int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
5998c2ecf20Sopenharmony_ci			struct netlink_ext_ack *extack)
6008c2ecf20Sopenharmony_ci{
6018c2ecf20Sopenharmony_ci	/* register_qdisc() assigns a default of noop_enqueue if unset,
6028c2ecf20Sopenharmony_ci	 * but __dev_queue_xmit() treats noqueue only as such
6038c2ecf20Sopenharmony_ci	 * if this is NULL - so clear it here. */
6048c2ecf20Sopenharmony_ci	qdisc->enqueue = NULL;
6058c2ecf20Sopenharmony_ci	return 0;
6068c2ecf20Sopenharmony_ci}
6078c2ecf20Sopenharmony_ci
6088c2ecf20Sopenharmony_cistruct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
6098c2ecf20Sopenharmony_ci	.id		=	"noqueue",
6108c2ecf20Sopenharmony_ci	.priv_size	=	0,
6118c2ecf20Sopenharmony_ci	.init		=	noqueue_init,
6128c2ecf20Sopenharmony_ci	.enqueue	=	noop_enqueue,
6138c2ecf20Sopenharmony_ci	.dequeue	=	noop_dequeue,
6148c2ecf20Sopenharmony_ci	.peek		=	noop_dequeue,
6158c2ecf20Sopenharmony_ci	.owner		=	THIS_MODULE,
6168c2ecf20Sopenharmony_ci};
6178c2ecf20Sopenharmony_ci
6188c2ecf20Sopenharmony_cistatic const u8 prio2band[TC_PRIO_MAX + 1] = {
6198c2ecf20Sopenharmony_ci	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
6208c2ecf20Sopenharmony_ci};
6218c2ecf20Sopenharmony_ci
6228c2ecf20Sopenharmony_ci/* 3-band FIFO queue: old style, but should be a bit faster than
6238c2ecf20Sopenharmony_ci   generic prio+fifo combination.
6248c2ecf20Sopenharmony_ci */
6258c2ecf20Sopenharmony_ci
6268c2ecf20Sopenharmony_ci#define PFIFO_FAST_BANDS 3
6278c2ecf20Sopenharmony_ci
6288c2ecf20Sopenharmony_ci/*
6298c2ecf20Sopenharmony_ci * Private data for a pfifo_fast scheduler containing:
6308c2ecf20Sopenharmony_ci *	- rings for priority bands
6318c2ecf20Sopenharmony_ci */
6328c2ecf20Sopenharmony_cistruct pfifo_fast_priv {
6338c2ecf20Sopenharmony_ci	struct skb_array q[PFIFO_FAST_BANDS];
6348c2ecf20Sopenharmony_ci};
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_cistatic inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
6378c2ecf20Sopenharmony_ci					  int band)
6388c2ecf20Sopenharmony_ci{
6398c2ecf20Sopenharmony_ci	return &priv->q[band];
6408c2ecf20Sopenharmony_ci}
6418c2ecf20Sopenharmony_ci
6428c2ecf20Sopenharmony_cistatic int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
6438c2ecf20Sopenharmony_ci			      struct sk_buff **to_free)
6448c2ecf20Sopenharmony_ci{
6458c2ecf20Sopenharmony_ci	int band = prio2band[skb->priority & TC_PRIO_MAX];
6468c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
6478c2ecf20Sopenharmony_ci	struct skb_array *q = band2list(priv, band);
6488c2ecf20Sopenharmony_ci	unsigned int pkt_len = qdisc_pkt_len(skb);
6498c2ecf20Sopenharmony_ci	int err;
6508c2ecf20Sopenharmony_ci
6518c2ecf20Sopenharmony_ci	err = skb_array_produce(q, skb);
6528c2ecf20Sopenharmony_ci
6538c2ecf20Sopenharmony_ci	if (unlikely(err)) {
6548c2ecf20Sopenharmony_ci		if (qdisc_is_percpu_stats(qdisc))
6558c2ecf20Sopenharmony_ci			return qdisc_drop_cpu(skb, qdisc, to_free);
6568c2ecf20Sopenharmony_ci		else
6578c2ecf20Sopenharmony_ci			return qdisc_drop(skb, qdisc, to_free);
6588c2ecf20Sopenharmony_ci	}
6598c2ecf20Sopenharmony_ci
6608c2ecf20Sopenharmony_ci	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
6618c2ecf20Sopenharmony_ci	return NET_XMIT_SUCCESS;
6628c2ecf20Sopenharmony_ci}
6638c2ecf20Sopenharmony_ci
6648c2ecf20Sopenharmony_cistatic struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
6658c2ecf20Sopenharmony_ci{
6668c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
6678c2ecf20Sopenharmony_ci	struct sk_buff *skb = NULL;
6688c2ecf20Sopenharmony_ci	bool need_retry = true;
6698c2ecf20Sopenharmony_ci	int band;
6708c2ecf20Sopenharmony_ci
6718c2ecf20Sopenharmony_ciretry:
6728c2ecf20Sopenharmony_ci	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
6738c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, band);
6748c2ecf20Sopenharmony_ci
6758c2ecf20Sopenharmony_ci		if (__skb_array_empty(q))
6768c2ecf20Sopenharmony_ci			continue;
6778c2ecf20Sopenharmony_ci
6788c2ecf20Sopenharmony_ci		skb = __skb_array_consume(q);
6798c2ecf20Sopenharmony_ci	}
6808c2ecf20Sopenharmony_ci	if (likely(skb)) {
6818c2ecf20Sopenharmony_ci		qdisc_update_stats_at_dequeue(qdisc, skb);
6828c2ecf20Sopenharmony_ci	} else if (need_retry &&
6838c2ecf20Sopenharmony_ci		   test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
6848c2ecf20Sopenharmony_ci		/* Delay clearing the STATE_MISSED here to reduce
6858c2ecf20Sopenharmony_ci		 * the overhead of the second spin_trylock() in
6868c2ecf20Sopenharmony_ci		 * qdisc_run_begin() and __netif_schedule() calling
6878c2ecf20Sopenharmony_ci		 * in qdisc_run_end().
6888c2ecf20Sopenharmony_ci		 */
6898c2ecf20Sopenharmony_ci		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
6908c2ecf20Sopenharmony_ci
6918c2ecf20Sopenharmony_ci		/* Make sure dequeuing happens after clearing
6928c2ecf20Sopenharmony_ci		 * STATE_MISSED.
6938c2ecf20Sopenharmony_ci		 */
6948c2ecf20Sopenharmony_ci		smp_mb__after_atomic();
6958c2ecf20Sopenharmony_ci
6968c2ecf20Sopenharmony_ci		need_retry = false;
6978c2ecf20Sopenharmony_ci
6988c2ecf20Sopenharmony_ci		goto retry;
6998c2ecf20Sopenharmony_ci	} else {
7008c2ecf20Sopenharmony_ci		WRITE_ONCE(qdisc->empty, true);
7018c2ecf20Sopenharmony_ci	}
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci	return skb;
7048c2ecf20Sopenharmony_ci}
7058c2ecf20Sopenharmony_ci
7068c2ecf20Sopenharmony_cistatic struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
7078c2ecf20Sopenharmony_ci{
7088c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
7098c2ecf20Sopenharmony_ci	struct sk_buff *skb = NULL;
7108c2ecf20Sopenharmony_ci	int band;
7118c2ecf20Sopenharmony_ci
7128c2ecf20Sopenharmony_ci	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
7138c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, band);
7148c2ecf20Sopenharmony_ci
7158c2ecf20Sopenharmony_ci		skb = __skb_array_peek(q);
7168c2ecf20Sopenharmony_ci	}
7178c2ecf20Sopenharmony_ci
7188c2ecf20Sopenharmony_ci	return skb;
7198c2ecf20Sopenharmony_ci}
7208c2ecf20Sopenharmony_ci
7218c2ecf20Sopenharmony_cistatic void pfifo_fast_reset(struct Qdisc *qdisc)
7228c2ecf20Sopenharmony_ci{
7238c2ecf20Sopenharmony_ci	int i, band;
7248c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
7258c2ecf20Sopenharmony_ci
7268c2ecf20Sopenharmony_ci	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
7278c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, band);
7288c2ecf20Sopenharmony_ci		struct sk_buff *skb;
7298c2ecf20Sopenharmony_ci
7308c2ecf20Sopenharmony_ci		/* NULL ring is possible if destroy path is due to a failed
7318c2ecf20Sopenharmony_ci		 * skb_array_init() in pfifo_fast_init() case.
7328c2ecf20Sopenharmony_ci		 */
7338c2ecf20Sopenharmony_ci		if (!q->ring.queue)
7348c2ecf20Sopenharmony_ci			continue;
7358c2ecf20Sopenharmony_ci
7368c2ecf20Sopenharmony_ci		while ((skb = __skb_array_consume(q)) != NULL)
7378c2ecf20Sopenharmony_ci			kfree_skb(skb);
7388c2ecf20Sopenharmony_ci	}
7398c2ecf20Sopenharmony_ci
7408c2ecf20Sopenharmony_ci	if (qdisc_is_percpu_stats(qdisc)) {
7418c2ecf20Sopenharmony_ci		for_each_possible_cpu(i) {
7428c2ecf20Sopenharmony_ci			struct gnet_stats_queue *q;
7438c2ecf20Sopenharmony_ci
7448c2ecf20Sopenharmony_ci			q = per_cpu_ptr(qdisc->cpu_qstats, i);
7458c2ecf20Sopenharmony_ci			q->backlog = 0;
7468c2ecf20Sopenharmony_ci			q->qlen = 0;
7478c2ecf20Sopenharmony_ci		}
7488c2ecf20Sopenharmony_ci	}
7498c2ecf20Sopenharmony_ci}
7508c2ecf20Sopenharmony_ci
7518c2ecf20Sopenharmony_cistatic int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
7528c2ecf20Sopenharmony_ci{
7538c2ecf20Sopenharmony_ci	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
7548c2ecf20Sopenharmony_ci
7558c2ecf20Sopenharmony_ci	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
7568c2ecf20Sopenharmony_ci	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
7578c2ecf20Sopenharmony_ci		goto nla_put_failure;
7588c2ecf20Sopenharmony_ci	return skb->len;
7598c2ecf20Sopenharmony_ci
7608c2ecf20Sopenharmony_cinla_put_failure:
7618c2ecf20Sopenharmony_ci	return -1;
7628c2ecf20Sopenharmony_ci}
7638c2ecf20Sopenharmony_ci
7648c2ecf20Sopenharmony_cistatic int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
7658c2ecf20Sopenharmony_ci			   struct netlink_ext_ack *extack)
7668c2ecf20Sopenharmony_ci{
7678c2ecf20Sopenharmony_ci	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
7688c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
7698c2ecf20Sopenharmony_ci	int prio;
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci	/* guard against zero length rings */
7728c2ecf20Sopenharmony_ci	if (!qlen)
7738c2ecf20Sopenharmony_ci		return -EINVAL;
7748c2ecf20Sopenharmony_ci
7758c2ecf20Sopenharmony_ci	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
7768c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, prio);
7778c2ecf20Sopenharmony_ci		int err;
7788c2ecf20Sopenharmony_ci
7798c2ecf20Sopenharmony_ci		err = skb_array_init(q, qlen, GFP_KERNEL);
7808c2ecf20Sopenharmony_ci		if (err)
7818c2ecf20Sopenharmony_ci			return -ENOMEM;
7828c2ecf20Sopenharmony_ci	}
7838c2ecf20Sopenharmony_ci
7848c2ecf20Sopenharmony_ci	/* Can by-pass the queue discipline */
7858c2ecf20Sopenharmony_ci	qdisc->flags |= TCQ_F_CAN_BYPASS;
7868c2ecf20Sopenharmony_ci	return 0;
7878c2ecf20Sopenharmony_ci}
7888c2ecf20Sopenharmony_ci
7898c2ecf20Sopenharmony_cistatic void pfifo_fast_destroy(struct Qdisc *sch)
7908c2ecf20Sopenharmony_ci{
7918c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(sch);
7928c2ecf20Sopenharmony_ci	int prio;
7938c2ecf20Sopenharmony_ci
7948c2ecf20Sopenharmony_ci	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
7958c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, prio);
7968c2ecf20Sopenharmony_ci
7978c2ecf20Sopenharmony_ci		/* NULL ring is possible if destroy path is due to a failed
7988c2ecf20Sopenharmony_ci		 * skb_array_init() in pfifo_fast_init() case.
7998c2ecf20Sopenharmony_ci		 */
8008c2ecf20Sopenharmony_ci		if (!q->ring.queue)
8018c2ecf20Sopenharmony_ci			continue;
8028c2ecf20Sopenharmony_ci		/* Destroy ring but no need to kfree_skb because a call to
8038c2ecf20Sopenharmony_ci		 * pfifo_fast_reset() has already done that work.
8048c2ecf20Sopenharmony_ci		 */
8058c2ecf20Sopenharmony_ci		ptr_ring_cleanup(&q->ring, NULL);
8068c2ecf20Sopenharmony_ci	}
8078c2ecf20Sopenharmony_ci}
8088c2ecf20Sopenharmony_ci
8098c2ecf20Sopenharmony_cistatic int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
8108c2ecf20Sopenharmony_ci					  unsigned int new_len)
8118c2ecf20Sopenharmony_ci{
8128c2ecf20Sopenharmony_ci	struct pfifo_fast_priv *priv = qdisc_priv(sch);
8138c2ecf20Sopenharmony_ci	struct skb_array *bands[PFIFO_FAST_BANDS];
8148c2ecf20Sopenharmony_ci	int prio;
8158c2ecf20Sopenharmony_ci
8168c2ecf20Sopenharmony_ci	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
8178c2ecf20Sopenharmony_ci		struct skb_array *q = band2list(priv, prio);
8188c2ecf20Sopenharmony_ci
8198c2ecf20Sopenharmony_ci		bands[prio] = q;
8208c2ecf20Sopenharmony_ci	}
8218c2ecf20Sopenharmony_ci
8228c2ecf20Sopenharmony_ci	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
8238c2ecf20Sopenharmony_ci					 GFP_KERNEL);
8248c2ecf20Sopenharmony_ci}
8258c2ecf20Sopenharmony_ci
8268c2ecf20Sopenharmony_cistruct Qdisc_ops pfifo_fast_ops __read_mostly = {
8278c2ecf20Sopenharmony_ci	.id		=	"pfifo_fast",
8288c2ecf20Sopenharmony_ci	.priv_size	=	sizeof(struct pfifo_fast_priv),
8298c2ecf20Sopenharmony_ci	.enqueue	=	pfifo_fast_enqueue,
8308c2ecf20Sopenharmony_ci	.dequeue	=	pfifo_fast_dequeue,
8318c2ecf20Sopenharmony_ci	.peek		=	pfifo_fast_peek,
8328c2ecf20Sopenharmony_ci	.init		=	pfifo_fast_init,
8338c2ecf20Sopenharmony_ci	.destroy	=	pfifo_fast_destroy,
8348c2ecf20Sopenharmony_ci	.reset		=	pfifo_fast_reset,
8358c2ecf20Sopenharmony_ci	.dump		=	pfifo_fast_dump,
8368c2ecf20Sopenharmony_ci	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
8378c2ecf20Sopenharmony_ci	.owner		=	THIS_MODULE,
8388c2ecf20Sopenharmony_ci	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
8398c2ecf20Sopenharmony_ci};
8408c2ecf20Sopenharmony_ciEXPORT_SYMBOL(pfifo_fast_ops);
8418c2ecf20Sopenharmony_ci
8428c2ecf20Sopenharmony_cistatic struct lock_class_key qdisc_tx_busylock;
8438c2ecf20Sopenharmony_cistatic struct lock_class_key qdisc_running_key;
8448c2ecf20Sopenharmony_ci
8458c2ecf20Sopenharmony_cistruct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
8468c2ecf20Sopenharmony_ci			  const struct Qdisc_ops *ops,
8478c2ecf20Sopenharmony_ci			  struct netlink_ext_ack *extack)
8488c2ecf20Sopenharmony_ci{
8498c2ecf20Sopenharmony_ci	struct Qdisc *sch;
8508c2ecf20Sopenharmony_ci	unsigned int size = sizeof(*sch) + ops->priv_size;
8518c2ecf20Sopenharmony_ci	int err = -ENOBUFS;
8528c2ecf20Sopenharmony_ci	struct net_device *dev;
8538c2ecf20Sopenharmony_ci
8548c2ecf20Sopenharmony_ci	if (!dev_queue) {
8558c2ecf20Sopenharmony_ci		NL_SET_ERR_MSG(extack, "No device queue given");
8568c2ecf20Sopenharmony_ci		err = -EINVAL;
8578c2ecf20Sopenharmony_ci		goto errout;
8588c2ecf20Sopenharmony_ci	}
8598c2ecf20Sopenharmony_ci
8608c2ecf20Sopenharmony_ci	dev = dev_queue->dev;
8618c2ecf20Sopenharmony_ci	sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
8628c2ecf20Sopenharmony_ci
8638c2ecf20Sopenharmony_ci	if (!sch)
8648c2ecf20Sopenharmony_ci		goto errout;
8658c2ecf20Sopenharmony_ci	__skb_queue_head_init(&sch->gso_skb);
8668c2ecf20Sopenharmony_ci	__skb_queue_head_init(&sch->skb_bad_txq);
8678c2ecf20Sopenharmony_ci	qdisc_skb_head_init(&sch->q);
8688c2ecf20Sopenharmony_ci	spin_lock_init(&sch->q.lock);
8698c2ecf20Sopenharmony_ci
8708c2ecf20Sopenharmony_ci	if (ops->static_flags & TCQ_F_CPUSTATS) {
8718c2ecf20Sopenharmony_ci		sch->cpu_bstats =
8728c2ecf20Sopenharmony_ci			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
8738c2ecf20Sopenharmony_ci		if (!sch->cpu_bstats)
8748c2ecf20Sopenharmony_ci			goto errout1;
8758c2ecf20Sopenharmony_ci
8768c2ecf20Sopenharmony_ci		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
8778c2ecf20Sopenharmony_ci		if (!sch->cpu_qstats) {
8788c2ecf20Sopenharmony_ci			free_percpu(sch->cpu_bstats);
8798c2ecf20Sopenharmony_ci			goto errout1;
8808c2ecf20Sopenharmony_ci		}
8818c2ecf20Sopenharmony_ci	}
8828c2ecf20Sopenharmony_ci
8838c2ecf20Sopenharmony_ci	spin_lock_init(&sch->busylock);
8848c2ecf20Sopenharmony_ci	lockdep_set_class(&sch->busylock,
8858c2ecf20Sopenharmony_ci			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
8868c2ecf20Sopenharmony_ci
8878c2ecf20Sopenharmony_ci	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
8888c2ecf20Sopenharmony_ci	spin_lock_init(&sch->seqlock);
8898c2ecf20Sopenharmony_ci	lockdep_set_class(&sch->seqlock,
8908c2ecf20Sopenharmony_ci			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
8918c2ecf20Sopenharmony_ci
8928c2ecf20Sopenharmony_ci	seqcount_init(&sch->running);
8938c2ecf20Sopenharmony_ci	lockdep_set_class(&sch->running,
8948c2ecf20Sopenharmony_ci			  dev->qdisc_running_key ?: &qdisc_running_key);
8958c2ecf20Sopenharmony_ci
8968c2ecf20Sopenharmony_ci	sch->ops = ops;
8978c2ecf20Sopenharmony_ci	sch->flags = ops->static_flags;
8988c2ecf20Sopenharmony_ci	sch->enqueue = ops->enqueue;
8998c2ecf20Sopenharmony_ci	sch->dequeue = ops->dequeue;
9008c2ecf20Sopenharmony_ci	sch->dev_queue = dev_queue;
9018c2ecf20Sopenharmony_ci	sch->empty = true;
9028c2ecf20Sopenharmony_ci	dev_hold(dev);
9038c2ecf20Sopenharmony_ci	refcount_set(&sch->refcnt, 1);
9048c2ecf20Sopenharmony_ci
9058c2ecf20Sopenharmony_ci	return sch;
9068c2ecf20Sopenharmony_cierrout1:
9078c2ecf20Sopenharmony_ci	kfree(sch);
9088c2ecf20Sopenharmony_cierrout:
9098c2ecf20Sopenharmony_ci	return ERR_PTR(err);
9108c2ecf20Sopenharmony_ci}
9118c2ecf20Sopenharmony_ci
9128c2ecf20Sopenharmony_cistruct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
9138c2ecf20Sopenharmony_ci				const struct Qdisc_ops *ops,
9148c2ecf20Sopenharmony_ci				unsigned int parentid,
9158c2ecf20Sopenharmony_ci				struct netlink_ext_ack *extack)
9168c2ecf20Sopenharmony_ci{
9178c2ecf20Sopenharmony_ci	struct Qdisc *sch;
9188c2ecf20Sopenharmony_ci
9198c2ecf20Sopenharmony_ci	if (!try_module_get(ops->owner)) {
9208c2ecf20Sopenharmony_ci		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
9218c2ecf20Sopenharmony_ci		return NULL;
9228c2ecf20Sopenharmony_ci	}
9238c2ecf20Sopenharmony_ci
9248c2ecf20Sopenharmony_ci	sch = qdisc_alloc(dev_queue, ops, extack);
9258c2ecf20Sopenharmony_ci	if (IS_ERR(sch)) {
9268c2ecf20Sopenharmony_ci		module_put(ops->owner);
9278c2ecf20Sopenharmony_ci		return NULL;
9288c2ecf20Sopenharmony_ci	}
9298c2ecf20Sopenharmony_ci	sch->parent = parentid;
9308c2ecf20Sopenharmony_ci
9318c2ecf20Sopenharmony_ci	if (!ops->init || ops->init(sch, NULL, extack) == 0) {
9328c2ecf20Sopenharmony_ci		trace_qdisc_create(ops, dev_queue->dev, parentid);
9338c2ecf20Sopenharmony_ci		return sch;
9348c2ecf20Sopenharmony_ci	}
9358c2ecf20Sopenharmony_ci
9368c2ecf20Sopenharmony_ci	qdisc_put(sch);
9378c2ecf20Sopenharmony_ci	return NULL;
9388c2ecf20Sopenharmony_ci}
9398c2ecf20Sopenharmony_ciEXPORT_SYMBOL(qdisc_create_dflt);
9408c2ecf20Sopenharmony_ci
9418c2ecf20Sopenharmony_ci/* Under qdisc_lock(qdisc) and BH! */
9428c2ecf20Sopenharmony_ci
9438c2ecf20Sopenharmony_civoid qdisc_reset(struct Qdisc *qdisc)
9448c2ecf20Sopenharmony_ci{
9458c2ecf20Sopenharmony_ci	const struct Qdisc_ops *ops = qdisc->ops;
9468c2ecf20Sopenharmony_ci	struct sk_buff *skb, *tmp;
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_ci	trace_qdisc_reset(qdisc);
9498c2ecf20Sopenharmony_ci
9508c2ecf20Sopenharmony_ci	if (ops->reset)
9518c2ecf20Sopenharmony_ci		ops->reset(qdisc);
9528c2ecf20Sopenharmony_ci
9538c2ecf20Sopenharmony_ci	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
9548c2ecf20Sopenharmony_ci		__skb_unlink(skb, &qdisc->gso_skb);
9558c2ecf20Sopenharmony_ci		kfree_skb_list(skb);
9568c2ecf20Sopenharmony_ci	}
9578c2ecf20Sopenharmony_ci
9588c2ecf20Sopenharmony_ci	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
9598c2ecf20Sopenharmony_ci		__skb_unlink(skb, &qdisc->skb_bad_txq);
9608c2ecf20Sopenharmony_ci		kfree_skb_list(skb);
9618c2ecf20Sopenharmony_ci	}
9628c2ecf20Sopenharmony_ci
9638c2ecf20Sopenharmony_ci	qdisc->q.qlen = 0;
9648c2ecf20Sopenharmony_ci	qdisc->qstats.backlog = 0;
9658c2ecf20Sopenharmony_ci}
9668c2ecf20Sopenharmony_ciEXPORT_SYMBOL(qdisc_reset);
9678c2ecf20Sopenharmony_ci
9688c2ecf20Sopenharmony_civoid qdisc_free(struct Qdisc *qdisc)
9698c2ecf20Sopenharmony_ci{
9708c2ecf20Sopenharmony_ci	if (qdisc_is_percpu_stats(qdisc)) {
9718c2ecf20Sopenharmony_ci		free_percpu(qdisc->cpu_bstats);
9728c2ecf20Sopenharmony_ci		free_percpu(qdisc->cpu_qstats);
9738c2ecf20Sopenharmony_ci	}
9748c2ecf20Sopenharmony_ci
9758c2ecf20Sopenharmony_ci	kfree(qdisc);
9768c2ecf20Sopenharmony_ci}
9778c2ecf20Sopenharmony_ci
9788c2ecf20Sopenharmony_cistatic void qdisc_free_cb(struct rcu_head *head)
9798c2ecf20Sopenharmony_ci{
9808c2ecf20Sopenharmony_ci	struct Qdisc *q = container_of(head, struct Qdisc, rcu);
9818c2ecf20Sopenharmony_ci
9828c2ecf20Sopenharmony_ci	qdisc_free(q);
9838c2ecf20Sopenharmony_ci}
9848c2ecf20Sopenharmony_ci
9858c2ecf20Sopenharmony_cistatic void qdisc_destroy(struct Qdisc *qdisc)
9868c2ecf20Sopenharmony_ci{
9878c2ecf20Sopenharmony_ci	const struct Qdisc_ops  *ops = qdisc->ops;
9888c2ecf20Sopenharmony_ci
9898c2ecf20Sopenharmony_ci#ifdef CONFIG_NET_SCHED
9908c2ecf20Sopenharmony_ci	qdisc_hash_del(qdisc);
9918c2ecf20Sopenharmony_ci
9928c2ecf20Sopenharmony_ci	qdisc_put_stab(rtnl_dereference(qdisc->stab));
9938c2ecf20Sopenharmony_ci#endif
9948c2ecf20Sopenharmony_ci	gen_kill_estimator(&qdisc->rate_est);
9958c2ecf20Sopenharmony_ci
9968c2ecf20Sopenharmony_ci	qdisc_reset(qdisc);
9978c2ecf20Sopenharmony_ci
9988c2ecf20Sopenharmony_ci	if (ops->destroy)
9998c2ecf20Sopenharmony_ci		ops->destroy(qdisc);
10008c2ecf20Sopenharmony_ci
10018c2ecf20Sopenharmony_ci	module_put(ops->owner);
10028c2ecf20Sopenharmony_ci	dev_put(qdisc_dev(qdisc));
10038c2ecf20Sopenharmony_ci
10048c2ecf20Sopenharmony_ci	trace_qdisc_destroy(qdisc);
10058c2ecf20Sopenharmony_ci
10068c2ecf20Sopenharmony_ci	call_rcu(&qdisc->rcu, qdisc_free_cb);
10078c2ecf20Sopenharmony_ci}
10088c2ecf20Sopenharmony_ci
10098c2ecf20Sopenharmony_civoid qdisc_put(struct Qdisc *qdisc)
10108c2ecf20Sopenharmony_ci{
10118c2ecf20Sopenharmony_ci	if (!qdisc)
10128c2ecf20Sopenharmony_ci		return;
10138c2ecf20Sopenharmony_ci
10148c2ecf20Sopenharmony_ci	if (qdisc->flags & TCQ_F_BUILTIN ||
10158c2ecf20Sopenharmony_ci	    !refcount_dec_and_test(&qdisc->refcnt))
10168c2ecf20Sopenharmony_ci		return;
10178c2ecf20Sopenharmony_ci
10188c2ecf20Sopenharmony_ci	qdisc_destroy(qdisc);
10198c2ecf20Sopenharmony_ci}
10208c2ecf20Sopenharmony_ciEXPORT_SYMBOL(qdisc_put);
10218c2ecf20Sopenharmony_ci
10228c2ecf20Sopenharmony_ci/* Version of qdisc_put() that is called with rtnl mutex unlocked.
10238c2ecf20Sopenharmony_ci * Intended to be used as optimization, this function only takes rtnl lock if
10248c2ecf20Sopenharmony_ci * qdisc reference counter reached zero.
10258c2ecf20Sopenharmony_ci */
10268c2ecf20Sopenharmony_ci
10278c2ecf20Sopenharmony_civoid qdisc_put_unlocked(struct Qdisc *qdisc)
10288c2ecf20Sopenharmony_ci{
10298c2ecf20Sopenharmony_ci	if (qdisc->flags & TCQ_F_BUILTIN ||
10308c2ecf20Sopenharmony_ci	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
10318c2ecf20Sopenharmony_ci		return;
10328c2ecf20Sopenharmony_ci
10338c2ecf20Sopenharmony_ci	qdisc_destroy(qdisc);
10348c2ecf20Sopenharmony_ci	rtnl_unlock();
10358c2ecf20Sopenharmony_ci}
10368c2ecf20Sopenharmony_ciEXPORT_SYMBOL(qdisc_put_unlocked);
10378c2ecf20Sopenharmony_ci
10388c2ecf20Sopenharmony_ci/* Attach toplevel qdisc to device queue. */
10398c2ecf20Sopenharmony_cistruct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
10408c2ecf20Sopenharmony_ci			      struct Qdisc *qdisc)
10418c2ecf20Sopenharmony_ci{
10428c2ecf20Sopenharmony_ci	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
10438c2ecf20Sopenharmony_ci	spinlock_t *root_lock;
10448c2ecf20Sopenharmony_ci
10458c2ecf20Sopenharmony_ci	root_lock = qdisc_lock(oqdisc);
10468c2ecf20Sopenharmony_ci	spin_lock_bh(root_lock);
10478c2ecf20Sopenharmony_ci
10488c2ecf20Sopenharmony_ci	/* ... and graft new one */
10498c2ecf20Sopenharmony_ci	if (qdisc == NULL)
10508c2ecf20Sopenharmony_ci		qdisc = &noop_qdisc;
10518c2ecf20Sopenharmony_ci	dev_queue->qdisc_sleeping = qdisc;
10528c2ecf20Sopenharmony_ci	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
10538c2ecf20Sopenharmony_ci
10548c2ecf20Sopenharmony_ci	spin_unlock_bh(root_lock);
10558c2ecf20Sopenharmony_ci
10568c2ecf20Sopenharmony_ci	return oqdisc;
10578c2ecf20Sopenharmony_ci}
10588c2ecf20Sopenharmony_ciEXPORT_SYMBOL(dev_graft_qdisc);
10598c2ecf20Sopenharmony_ci
10608c2ecf20Sopenharmony_cistatic void shutdown_scheduler_queue(struct net_device *dev,
10618c2ecf20Sopenharmony_ci				     struct netdev_queue *dev_queue,
10628c2ecf20Sopenharmony_ci				     void *_qdisc_default)
10638c2ecf20Sopenharmony_ci{
10648c2ecf20Sopenharmony_ci	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
10658c2ecf20Sopenharmony_ci	struct Qdisc *qdisc_default = _qdisc_default;
10668c2ecf20Sopenharmony_ci
10678c2ecf20Sopenharmony_ci	if (qdisc) {
10688c2ecf20Sopenharmony_ci		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
10698c2ecf20Sopenharmony_ci		dev_queue->qdisc_sleeping = qdisc_default;
10708c2ecf20Sopenharmony_ci
10718c2ecf20Sopenharmony_ci		qdisc_put(qdisc);
10728c2ecf20Sopenharmony_ci	}
10738c2ecf20Sopenharmony_ci}
10748c2ecf20Sopenharmony_ci
10758c2ecf20Sopenharmony_cistatic void attach_one_default_qdisc(struct net_device *dev,
10768c2ecf20Sopenharmony_ci				     struct netdev_queue *dev_queue,
10778c2ecf20Sopenharmony_ci				     void *_unused)
10788c2ecf20Sopenharmony_ci{
10798c2ecf20Sopenharmony_ci	struct Qdisc *qdisc;
10808c2ecf20Sopenharmony_ci	const struct Qdisc_ops *ops = default_qdisc_ops;
10818c2ecf20Sopenharmony_ci
10828c2ecf20Sopenharmony_ci	if (dev->priv_flags & IFF_NO_QUEUE)
10838c2ecf20Sopenharmony_ci		ops = &noqueue_qdisc_ops;
10848c2ecf20Sopenharmony_ci	else if(dev->type == ARPHRD_CAN)
10858c2ecf20Sopenharmony_ci		ops = &pfifo_fast_ops;
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
10888c2ecf20Sopenharmony_ci	if (!qdisc)
10898c2ecf20Sopenharmony_ci		return;
10908c2ecf20Sopenharmony_ci
10918c2ecf20Sopenharmony_ci	if (!netif_is_multiqueue(dev))
10928c2ecf20Sopenharmony_ci		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
10938c2ecf20Sopenharmony_ci	dev_queue->qdisc_sleeping = qdisc;
10948c2ecf20Sopenharmony_ci}
10958c2ecf20Sopenharmony_ci
10968c2ecf20Sopenharmony_cistatic void attach_default_qdiscs(struct net_device *dev)
10978c2ecf20Sopenharmony_ci{
10988c2ecf20Sopenharmony_ci	struct netdev_queue *txq;
10998c2ecf20Sopenharmony_ci	struct Qdisc *qdisc;
11008c2ecf20Sopenharmony_ci
11018c2ecf20Sopenharmony_ci	txq = netdev_get_tx_queue(dev, 0);
11028c2ecf20Sopenharmony_ci
11038c2ecf20Sopenharmony_ci	if (!netif_is_multiqueue(dev) ||
11048c2ecf20Sopenharmony_ci	    dev->priv_flags & IFF_NO_QUEUE) {
11058c2ecf20Sopenharmony_ci		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
11068c2ecf20Sopenharmony_ci		qdisc = txq->qdisc_sleeping;
11078c2ecf20Sopenharmony_ci		rcu_assign_pointer(dev->qdisc, qdisc);
11088c2ecf20Sopenharmony_ci		qdisc_refcount_inc(qdisc);
11098c2ecf20Sopenharmony_ci	} else {
11108c2ecf20Sopenharmony_ci		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
11118c2ecf20Sopenharmony_ci		if (qdisc) {
11128c2ecf20Sopenharmony_ci			rcu_assign_pointer(dev->qdisc, qdisc);
11138c2ecf20Sopenharmony_ci			qdisc->ops->attach(qdisc);
11148c2ecf20Sopenharmony_ci		}
11158c2ecf20Sopenharmony_ci	}
11168c2ecf20Sopenharmony_ci	qdisc = rtnl_dereference(dev->qdisc);
11178c2ecf20Sopenharmony_ci
11188c2ecf20Sopenharmony_ci	/* Detect default qdisc setup/init failed and fallback to "noqueue" */
11198c2ecf20Sopenharmony_ci	if (qdisc == &noop_qdisc) {
11208c2ecf20Sopenharmony_ci		netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
11218c2ecf20Sopenharmony_ci			    default_qdisc_ops->id, noqueue_qdisc_ops.id);
11228c2ecf20Sopenharmony_ci		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
11238c2ecf20Sopenharmony_ci		dev->priv_flags |= IFF_NO_QUEUE;
11248c2ecf20Sopenharmony_ci		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
11258c2ecf20Sopenharmony_ci		qdisc = txq->qdisc_sleeping;
11268c2ecf20Sopenharmony_ci		rcu_assign_pointer(dev->qdisc, qdisc);
11278c2ecf20Sopenharmony_ci		qdisc_refcount_inc(qdisc);
11288c2ecf20Sopenharmony_ci		dev->priv_flags ^= IFF_NO_QUEUE;
11298c2ecf20Sopenharmony_ci	}
11308c2ecf20Sopenharmony_ci
11318c2ecf20Sopenharmony_ci#ifdef CONFIG_NET_SCHED
11328c2ecf20Sopenharmony_ci	if (qdisc != &noop_qdisc)
11338c2ecf20Sopenharmony_ci		qdisc_hash_add(qdisc, false);
11348c2ecf20Sopenharmony_ci#endif
11358c2ecf20Sopenharmony_ci}
11368c2ecf20Sopenharmony_ci
11378c2ecf20Sopenharmony_cistatic void transition_one_qdisc(struct net_device *dev,
11388c2ecf20Sopenharmony_ci				 struct netdev_queue *dev_queue,
11398c2ecf20Sopenharmony_ci				 void *_need_watchdog)
11408c2ecf20Sopenharmony_ci{
11418c2ecf20Sopenharmony_ci	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
11428c2ecf20Sopenharmony_ci	int *need_watchdog_p = _need_watchdog;
11438c2ecf20Sopenharmony_ci
11448c2ecf20Sopenharmony_ci	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
11458c2ecf20Sopenharmony_ci		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
11468c2ecf20Sopenharmony_ci
11478c2ecf20Sopenharmony_ci	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
11488c2ecf20Sopenharmony_ci	if (need_watchdog_p) {
11498c2ecf20Sopenharmony_ci		dev_queue->trans_start = 0;
11508c2ecf20Sopenharmony_ci		*need_watchdog_p = 1;
11518c2ecf20Sopenharmony_ci	}
11528c2ecf20Sopenharmony_ci}
11538c2ecf20Sopenharmony_ci
11548c2ecf20Sopenharmony_civoid dev_activate(struct net_device *dev)
11558c2ecf20Sopenharmony_ci{
11568c2ecf20Sopenharmony_ci	int need_watchdog;
11578c2ecf20Sopenharmony_ci
11588c2ecf20Sopenharmony_ci	/* No queueing discipline is attached to device;
11598c2ecf20Sopenharmony_ci	 * create default one for devices, which need queueing
11608c2ecf20Sopenharmony_ci	 * and noqueue_qdisc for virtual interfaces
11618c2ecf20Sopenharmony_ci	 */
11628c2ecf20Sopenharmony_ci
11638c2ecf20Sopenharmony_ci	if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
11648c2ecf20Sopenharmony_ci		attach_default_qdiscs(dev);
11658c2ecf20Sopenharmony_ci
11668c2ecf20Sopenharmony_ci	if (!netif_carrier_ok(dev))
11678c2ecf20Sopenharmony_ci		/* Delay activation until next carrier-on event */
11688c2ecf20Sopenharmony_ci		return;
11698c2ecf20Sopenharmony_ci
11708c2ecf20Sopenharmony_ci	need_watchdog = 0;
11718c2ecf20Sopenharmony_ci	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
11728c2ecf20Sopenharmony_ci	if (dev_ingress_queue(dev))
11738c2ecf20Sopenharmony_ci		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
11748c2ecf20Sopenharmony_ci
11758c2ecf20Sopenharmony_ci	if (need_watchdog) {
11768c2ecf20Sopenharmony_ci		netif_trans_update(dev);
11778c2ecf20Sopenharmony_ci		dev_watchdog_up(dev);
11788c2ecf20Sopenharmony_ci	}
11798c2ecf20Sopenharmony_ci}
11808c2ecf20Sopenharmony_ciEXPORT_SYMBOL(dev_activate);
11818c2ecf20Sopenharmony_ci
11828c2ecf20Sopenharmony_cistatic void qdisc_deactivate(struct Qdisc *qdisc)
11838c2ecf20Sopenharmony_ci{
11848c2ecf20Sopenharmony_ci	if (qdisc->flags & TCQ_F_BUILTIN)
11858c2ecf20Sopenharmony_ci		return;
11868c2ecf20Sopenharmony_ci
11878c2ecf20Sopenharmony_ci	set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
11888c2ecf20Sopenharmony_ci}
11898c2ecf20Sopenharmony_ci
11908c2ecf20Sopenharmony_cistatic void dev_deactivate_queue(struct net_device *dev,
11918c2ecf20Sopenharmony_ci				 struct netdev_queue *dev_queue,
11928c2ecf20Sopenharmony_ci				 void *_qdisc_default)
11938c2ecf20Sopenharmony_ci{
11948c2ecf20Sopenharmony_ci	struct Qdisc *qdisc_default = _qdisc_default;
11958c2ecf20Sopenharmony_ci	struct Qdisc *qdisc;
11968c2ecf20Sopenharmony_ci
11978c2ecf20Sopenharmony_ci	qdisc = rtnl_dereference(dev_queue->qdisc);
11988c2ecf20Sopenharmony_ci	if (qdisc) {
11998c2ecf20Sopenharmony_ci		qdisc_deactivate(qdisc);
12008c2ecf20Sopenharmony_ci		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
12018c2ecf20Sopenharmony_ci	}
12028c2ecf20Sopenharmony_ci}
12038c2ecf20Sopenharmony_ci
12048c2ecf20Sopenharmony_cistatic void dev_reset_queue(struct net_device *dev,
12058c2ecf20Sopenharmony_ci			    struct netdev_queue *dev_queue,
12068c2ecf20Sopenharmony_ci			    void *_unused)
12078c2ecf20Sopenharmony_ci{
12088c2ecf20Sopenharmony_ci	struct Qdisc *qdisc;
12098c2ecf20Sopenharmony_ci	bool nolock;
12108c2ecf20Sopenharmony_ci
12118c2ecf20Sopenharmony_ci	qdisc = dev_queue->qdisc_sleeping;
12128c2ecf20Sopenharmony_ci	if (!qdisc)
12138c2ecf20Sopenharmony_ci		return;
12148c2ecf20Sopenharmony_ci
12158c2ecf20Sopenharmony_ci	nolock = qdisc->flags & TCQ_F_NOLOCK;
12168c2ecf20Sopenharmony_ci
12178c2ecf20Sopenharmony_ci	if (nolock)
12188c2ecf20Sopenharmony_ci		spin_lock_bh(&qdisc->seqlock);
12198c2ecf20Sopenharmony_ci	spin_lock_bh(qdisc_lock(qdisc));
12208c2ecf20Sopenharmony_ci
12218c2ecf20Sopenharmony_ci	qdisc_reset(qdisc);
12228c2ecf20Sopenharmony_ci
12238c2ecf20Sopenharmony_ci	spin_unlock_bh(qdisc_lock(qdisc));
12248c2ecf20Sopenharmony_ci	if (nolock) {
12258c2ecf20Sopenharmony_ci		clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
12268c2ecf20Sopenharmony_ci		spin_unlock_bh(&qdisc->seqlock);
12278c2ecf20Sopenharmony_ci	}
12288c2ecf20Sopenharmony_ci}
12298c2ecf20Sopenharmony_ci
12308c2ecf20Sopenharmony_cistatic bool some_qdisc_is_busy(struct net_device *dev)
12318c2ecf20Sopenharmony_ci{
12328c2ecf20Sopenharmony_ci	unsigned int i;
12338c2ecf20Sopenharmony_ci
12348c2ecf20Sopenharmony_ci	for (i = 0; i < dev->num_tx_queues; i++) {
12358c2ecf20Sopenharmony_ci		struct netdev_queue *dev_queue;
12368c2ecf20Sopenharmony_ci		spinlock_t *root_lock;
12378c2ecf20Sopenharmony_ci		struct Qdisc *q;
12388c2ecf20Sopenharmony_ci		int val;
12398c2ecf20Sopenharmony_ci
12408c2ecf20Sopenharmony_ci		dev_queue = netdev_get_tx_queue(dev, i);
12418c2ecf20Sopenharmony_ci		q = dev_queue->qdisc_sleeping;
12428c2ecf20Sopenharmony_ci
12438c2ecf20Sopenharmony_ci		root_lock = qdisc_lock(q);
12448c2ecf20Sopenharmony_ci		spin_lock_bh(root_lock);
12458c2ecf20Sopenharmony_ci
12468c2ecf20Sopenharmony_ci		val = (qdisc_is_running(q) ||
12478c2ecf20Sopenharmony_ci		       test_bit(__QDISC_STATE_SCHED, &q->state));
12488c2ecf20Sopenharmony_ci
12498c2ecf20Sopenharmony_ci		spin_unlock_bh(root_lock);
12508c2ecf20Sopenharmony_ci
12518c2ecf20Sopenharmony_ci		if (val)
12528c2ecf20Sopenharmony_ci			return true;
12538c2ecf20Sopenharmony_ci	}
12548c2ecf20Sopenharmony_ci	return false;
12558c2ecf20Sopenharmony_ci}
12568c2ecf20Sopenharmony_ci
12578c2ecf20Sopenharmony_ci/**
12588c2ecf20Sopenharmony_ci * 	dev_deactivate_many - deactivate transmissions on several devices
12598c2ecf20Sopenharmony_ci * 	@head: list of devices to deactivate
12608c2ecf20Sopenharmony_ci *
12618c2ecf20Sopenharmony_ci *	This function returns only when all outstanding transmissions
12628c2ecf20Sopenharmony_ci *	have completed, unless all devices are in dismantle phase.
12638c2ecf20Sopenharmony_ci */
12648c2ecf20Sopenharmony_civoid dev_deactivate_many(struct list_head *head)
12658c2ecf20Sopenharmony_ci{
12668c2ecf20Sopenharmony_ci	struct net_device *dev;
12678c2ecf20Sopenharmony_ci
12688c2ecf20Sopenharmony_ci	list_for_each_entry(dev, head, close_list) {
12698c2ecf20Sopenharmony_ci		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
12708c2ecf20Sopenharmony_ci					 &noop_qdisc);
12718c2ecf20Sopenharmony_ci		if (dev_ingress_queue(dev))
12728c2ecf20Sopenharmony_ci			dev_deactivate_queue(dev, dev_ingress_queue(dev),
12738c2ecf20Sopenharmony_ci					     &noop_qdisc);
12748c2ecf20Sopenharmony_ci
12758c2ecf20Sopenharmony_ci		dev_watchdog_down(dev);
12768c2ecf20Sopenharmony_ci	}
12778c2ecf20Sopenharmony_ci
12788c2ecf20Sopenharmony_ci	/* Wait for outstanding qdisc-less dev_queue_xmit calls or
12798c2ecf20Sopenharmony_ci	 * outstanding qdisc enqueuing calls.
12808c2ecf20Sopenharmony_ci	 * This is avoided if all devices are in dismantle phase :
12818c2ecf20Sopenharmony_ci	 * Caller will call synchronize_net() for us
12828c2ecf20Sopenharmony_ci	 */
12838c2ecf20Sopenharmony_ci	synchronize_net();
12848c2ecf20Sopenharmony_ci
12858c2ecf20Sopenharmony_ci	list_for_each_entry(dev, head, close_list) {
12868c2ecf20Sopenharmony_ci		netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
12878c2ecf20Sopenharmony_ci
12888c2ecf20Sopenharmony_ci		if (dev_ingress_queue(dev))
12898c2ecf20Sopenharmony_ci			dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
12908c2ecf20Sopenharmony_ci	}
12918c2ecf20Sopenharmony_ci
12928c2ecf20Sopenharmony_ci	/* Wait for outstanding qdisc_run calls. */
12938c2ecf20Sopenharmony_ci	list_for_each_entry(dev, head, close_list) {
12948c2ecf20Sopenharmony_ci		while (some_qdisc_is_busy(dev)) {
12958c2ecf20Sopenharmony_ci			/* wait_event() would avoid this sleep-loop but would
12968c2ecf20Sopenharmony_ci			 * require expensive checks in the fast paths of packet
12978c2ecf20Sopenharmony_ci			 * processing which isn't worth it.
12988c2ecf20Sopenharmony_ci			 */
12998c2ecf20Sopenharmony_ci			schedule_timeout_uninterruptible(1);
13008c2ecf20Sopenharmony_ci		}
13018c2ecf20Sopenharmony_ci	}
13028c2ecf20Sopenharmony_ci}
13038c2ecf20Sopenharmony_ci
13048c2ecf20Sopenharmony_civoid dev_deactivate(struct net_device *dev)
13058c2ecf20Sopenharmony_ci{
13068c2ecf20Sopenharmony_ci	LIST_HEAD(single);
13078c2ecf20Sopenharmony_ci
13088c2ecf20Sopenharmony_ci	list_add(&dev->close_list, &single);
13098c2ecf20Sopenharmony_ci	dev_deactivate_many(&single);
13108c2ecf20Sopenharmony_ci	list_del(&single);
13118c2ecf20Sopenharmony_ci}
13128c2ecf20Sopenharmony_ciEXPORT_SYMBOL(dev_deactivate);
13138c2ecf20Sopenharmony_ci
13148c2ecf20Sopenharmony_cistatic int qdisc_change_tx_queue_len(struct net_device *dev,
13158c2ecf20Sopenharmony_ci				     struct netdev_queue *dev_queue)
13168c2ecf20Sopenharmony_ci{
13178c2ecf20Sopenharmony_ci	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
13188c2ecf20Sopenharmony_ci	const struct Qdisc_ops *ops = qdisc->ops;
13198c2ecf20Sopenharmony_ci
13208c2ecf20Sopenharmony_ci	if (ops->change_tx_queue_len)
13218c2ecf20Sopenharmony_ci		return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
13228c2ecf20Sopenharmony_ci	return 0;
13238c2ecf20Sopenharmony_ci}
13248c2ecf20Sopenharmony_ci
13258c2ecf20Sopenharmony_civoid dev_qdisc_change_real_num_tx(struct net_device *dev,
13268c2ecf20Sopenharmony_ci				  unsigned int new_real_tx)
13278c2ecf20Sopenharmony_ci{
13288c2ecf20Sopenharmony_ci	struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
13298c2ecf20Sopenharmony_ci
13308c2ecf20Sopenharmony_ci	if (qdisc->ops->change_real_num_tx)
13318c2ecf20Sopenharmony_ci		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
13328c2ecf20Sopenharmony_ci}
13338c2ecf20Sopenharmony_ci
13348c2ecf20Sopenharmony_ciint dev_qdisc_change_tx_queue_len(struct net_device *dev)
13358c2ecf20Sopenharmony_ci{
13368c2ecf20Sopenharmony_ci	bool up = dev->flags & IFF_UP;
13378c2ecf20Sopenharmony_ci	unsigned int i;
13388c2ecf20Sopenharmony_ci	int ret = 0;
13398c2ecf20Sopenharmony_ci
13408c2ecf20Sopenharmony_ci	if (up)
13418c2ecf20Sopenharmony_ci		dev_deactivate(dev);
13428c2ecf20Sopenharmony_ci
13438c2ecf20Sopenharmony_ci	for (i = 0; i < dev->num_tx_queues; i++) {
13448c2ecf20Sopenharmony_ci		ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
13458c2ecf20Sopenharmony_ci
13468c2ecf20Sopenharmony_ci		/* TODO: revert changes on a partial failure */
13478c2ecf20Sopenharmony_ci		if (ret)
13488c2ecf20Sopenharmony_ci			break;
13498c2ecf20Sopenharmony_ci	}
13508c2ecf20Sopenharmony_ci
13518c2ecf20Sopenharmony_ci	if (up)
13528c2ecf20Sopenharmony_ci		dev_activate(dev);
13538c2ecf20Sopenharmony_ci	return ret;
13548c2ecf20Sopenharmony_ci}
13558c2ecf20Sopenharmony_ci
13568c2ecf20Sopenharmony_cistatic void dev_init_scheduler_queue(struct net_device *dev,
13578c2ecf20Sopenharmony_ci				     struct netdev_queue *dev_queue,
13588c2ecf20Sopenharmony_ci				     void *_qdisc)
13598c2ecf20Sopenharmony_ci{
13608c2ecf20Sopenharmony_ci	struct Qdisc *qdisc = _qdisc;
13618c2ecf20Sopenharmony_ci
13628c2ecf20Sopenharmony_ci	rcu_assign_pointer(dev_queue->qdisc, qdisc);
13638c2ecf20Sopenharmony_ci	dev_queue->qdisc_sleeping = qdisc;
13648c2ecf20Sopenharmony_ci}
13658c2ecf20Sopenharmony_ci
13668c2ecf20Sopenharmony_civoid dev_init_scheduler(struct net_device *dev)
13678c2ecf20Sopenharmony_ci{
13688c2ecf20Sopenharmony_ci	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13698c2ecf20Sopenharmony_ci	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
13708c2ecf20Sopenharmony_ci	if (dev_ingress_queue(dev))
13718c2ecf20Sopenharmony_ci		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
13728c2ecf20Sopenharmony_ci
13738c2ecf20Sopenharmony_ci	timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
13748c2ecf20Sopenharmony_ci}
13758c2ecf20Sopenharmony_ci
13768c2ecf20Sopenharmony_civoid dev_shutdown(struct net_device *dev)
13778c2ecf20Sopenharmony_ci{
13788c2ecf20Sopenharmony_ci	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
13798c2ecf20Sopenharmony_ci	if (dev_ingress_queue(dev))
13808c2ecf20Sopenharmony_ci		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
13818c2ecf20Sopenharmony_ci	qdisc_put(rtnl_dereference(dev->qdisc));
13828c2ecf20Sopenharmony_ci	rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13838c2ecf20Sopenharmony_ci
13848c2ecf20Sopenharmony_ci	WARN_ON(timer_pending(&dev->watchdog_timer));
13858c2ecf20Sopenharmony_ci}
13868c2ecf20Sopenharmony_ci
13878c2ecf20Sopenharmony_civoid psched_ratecfg_precompute(struct psched_ratecfg *r,
13888c2ecf20Sopenharmony_ci			       const struct tc_ratespec *conf,
13898c2ecf20Sopenharmony_ci			       u64 rate64)
13908c2ecf20Sopenharmony_ci{
13918c2ecf20Sopenharmony_ci	memset(r, 0, sizeof(*r));
13928c2ecf20Sopenharmony_ci	r->overhead = conf->overhead;
13938c2ecf20Sopenharmony_ci	r->mpu = conf->mpu;
13948c2ecf20Sopenharmony_ci	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
13958c2ecf20Sopenharmony_ci	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
13968c2ecf20Sopenharmony_ci	r->mult = 1;
13978c2ecf20Sopenharmony_ci	/*
13988c2ecf20Sopenharmony_ci	 * The deal here is to replace a divide by a reciprocal one
13998c2ecf20Sopenharmony_ci	 * in fast path (a reciprocal divide is a multiply and a shift)
14008c2ecf20Sopenharmony_ci	 *
14018c2ecf20Sopenharmony_ci	 * Normal formula would be :
14028c2ecf20Sopenharmony_ci	 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
14038c2ecf20Sopenharmony_ci	 *
14048c2ecf20Sopenharmony_ci	 * We compute mult/shift to use instead :
14058c2ecf20Sopenharmony_ci	 *  time_in_ns = (len * mult) >> shift;
14068c2ecf20Sopenharmony_ci	 *
14078c2ecf20Sopenharmony_ci	 * We try to get the highest possible mult value for accuracy,
14088c2ecf20Sopenharmony_ci	 * but have to make sure no overflows will ever happen.
14098c2ecf20Sopenharmony_ci	 */
14108c2ecf20Sopenharmony_ci	if (r->rate_bytes_ps > 0) {
14118c2ecf20Sopenharmony_ci		u64 factor = NSEC_PER_SEC;
14128c2ecf20Sopenharmony_ci
14138c2ecf20Sopenharmony_ci		for (;;) {
14148c2ecf20Sopenharmony_ci			r->mult = div64_u64(factor, r->rate_bytes_ps);
14158c2ecf20Sopenharmony_ci			if (r->mult & (1U << 31) || factor & (1ULL << 63))
14168c2ecf20Sopenharmony_ci				break;
14178c2ecf20Sopenharmony_ci			factor <<= 1;
14188c2ecf20Sopenharmony_ci			r->shift++;
14198c2ecf20Sopenharmony_ci		}
14208c2ecf20Sopenharmony_ci	}
14218c2ecf20Sopenharmony_ci}
14228c2ecf20Sopenharmony_ciEXPORT_SYMBOL(psched_ratecfg_precompute);
14238c2ecf20Sopenharmony_ci
14248c2ecf20Sopenharmony_cistatic void mini_qdisc_rcu_func(struct rcu_head *head)
14258c2ecf20Sopenharmony_ci{
14268c2ecf20Sopenharmony_ci}
14278c2ecf20Sopenharmony_ci
14288c2ecf20Sopenharmony_civoid mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
14298c2ecf20Sopenharmony_ci			  struct tcf_proto *tp_head)
14308c2ecf20Sopenharmony_ci{
14318c2ecf20Sopenharmony_ci	/* Protected with chain0->filter_chain_lock.
14328c2ecf20Sopenharmony_ci	 * Can't access chain directly because tp_head can be NULL.
14338c2ecf20Sopenharmony_ci	 */
14348c2ecf20Sopenharmony_ci	struct mini_Qdisc *miniq_old =
14358c2ecf20Sopenharmony_ci		rcu_dereference_protected(*miniqp->p_miniq, 1);
14368c2ecf20Sopenharmony_ci	struct mini_Qdisc *miniq;
14378c2ecf20Sopenharmony_ci
14388c2ecf20Sopenharmony_ci	if (!tp_head) {
14398c2ecf20Sopenharmony_ci		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
14408c2ecf20Sopenharmony_ci		/* Wait for flying RCU callback before it is freed. */
14418c2ecf20Sopenharmony_ci		rcu_barrier();
14428c2ecf20Sopenharmony_ci		return;
14438c2ecf20Sopenharmony_ci	}
14448c2ecf20Sopenharmony_ci
14458c2ecf20Sopenharmony_ci	miniq = !miniq_old || miniq_old == &miniqp->miniq2 ?
14468c2ecf20Sopenharmony_ci		&miniqp->miniq1 : &miniqp->miniq2;
14478c2ecf20Sopenharmony_ci
14488c2ecf20Sopenharmony_ci	/* We need to make sure that readers won't see the miniq
14498c2ecf20Sopenharmony_ci	 * we are about to modify. So wait until previous call_rcu callback
14508c2ecf20Sopenharmony_ci	 * is done.
14518c2ecf20Sopenharmony_ci	 */
14528c2ecf20Sopenharmony_ci	rcu_barrier();
14538c2ecf20Sopenharmony_ci	miniq->filter_list = tp_head;
14548c2ecf20Sopenharmony_ci	rcu_assign_pointer(*miniqp->p_miniq, miniq);
14558c2ecf20Sopenharmony_ci
14568c2ecf20Sopenharmony_ci	if (miniq_old)
14578c2ecf20Sopenharmony_ci		/* This is counterpart of the rcu barriers above. We need to
14588c2ecf20Sopenharmony_ci		 * block potential new user of miniq_old until all readers
14598c2ecf20Sopenharmony_ci		 * are not seeing it.
14608c2ecf20Sopenharmony_ci		 */
14618c2ecf20Sopenharmony_ci		call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
14628c2ecf20Sopenharmony_ci}
14638c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mini_qdisc_pair_swap);
14648c2ecf20Sopenharmony_ci
14658c2ecf20Sopenharmony_civoid mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
14668c2ecf20Sopenharmony_ci				struct tcf_block *block)
14678c2ecf20Sopenharmony_ci{
14688c2ecf20Sopenharmony_ci	miniqp->miniq1.block = block;
14698c2ecf20Sopenharmony_ci	miniqp->miniq2.block = block;
14708c2ecf20Sopenharmony_ci}
14718c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mini_qdisc_pair_block_init);
14728c2ecf20Sopenharmony_ci
14738c2ecf20Sopenharmony_civoid mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
14748c2ecf20Sopenharmony_ci			  struct mini_Qdisc __rcu **p_miniq)
14758c2ecf20Sopenharmony_ci{
14768c2ecf20Sopenharmony_ci	miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
14778c2ecf20Sopenharmony_ci	miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
14788c2ecf20Sopenharmony_ci	miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
14798c2ecf20Sopenharmony_ci	miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
14808c2ecf20Sopenharmony_ci	miniqp->p_miniq = p_miniq;
14818c2ecf20Sopenharmony_ci}
14828c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mini_qdisc_pair_init);
1483