1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __NET_SCHED_GENERIC_H 3#define __NET_SCHED_GENERIC_H 4 5#include <linux/netdevice.h> 6#include <linux/types.h> 7#include <linux/rcupdate.h> 8#include <linux/pkt_sched.h> 9#include <linux/pkt_cls.h> 10#include <linux/percpu.h> 11#include <linux/dynamic_queue_limits.h> 12#include <linux/list.h> 13#include <linux/refcount.h> 14#include <linux/workqueue.h> 15#include <linux/mutex.h> 16#include <linux/rwsem.h> 17#include <linux/atomic.h> 18#include <linux/hashtable.h> 19#include <net/gen_stats.h> 20#include <net/rtnetlink.h> 21#include <net/flow_offload.h> 22 23struct Qdisc_ops; 24struct qdisc_walker; 25struct tcf_walker; 26struct module; 27struct bpf_flow_keys; 28 29struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34}; 35 36enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 __QDISC_STATE_MISSED, 40}; 41 42struct qdisc_size_table { 43 struct rcu_head rcu; 44 struct list_head list; 45 struct tc_sizespec szopts; 46 int refcnt; 47 u16 data[]; 48}; 49 50/* similar to sk_buff_head, but skb->prev pointer is undefined. */ 51struct qdisc_skb_head { 52 struct sk_buff *head; 53 struct sk_buff *tail; 54 __u32 qlen; 55 spinlock_t lock; 56}; 57 58struct Qdisc { 59 int (*enqueue)(struct sk_buff *skb, 60 struct Qdisc *sch, 61 struct sk_buff **to_free); 62 struct sk_buff * (*dequeue)(struct Qdisc *sch); 63 unsigned int flags; 64#define TCQ_F_BUILTIN 1 65#define TCQ_F_INGRESS 2 66#define TCQ_F_CAN_BYPASS 4 67#define TCQ_F_MQROOT 8 68#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 69 * q->dev_queue : It can test 70 * netif_xmit_frozen_or_stopped() before 71 * dequeueing next packet. 72 * Its true for MQ/MQPRIO slaves, or non 73 * multiqueue device. 74 */ 75#define TCQ_F_WARN_NONWC (1 << 16) 76#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 77#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 78 * qdisc_tree_decrease_qlen() should stop. 79 */ 80#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 81#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 82#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 83 u32 limit; 84 const struct Qdisc_ops *ops; 85 struct qdisc_size_table __rcu *stab; 86 struct hlist_node hash; 87 u32 handle; 88 u32 parent; 89 90 struct netdev_queue *dev_queue; 91 92 struct net_rate_estimator __rcu *rate_est; 93 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 94 struct gnet_stats_queue __percpu *cpu_qstats; 95 int pad; 96 refcount_t refcnt; 97 98 /* 99 * For performance sake on SMP, we put highly modified fields at the end 100 */ 101 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 102 struct qdisc_skb_head q; 103 struct gnet_stats_basic_packed bstats; 104 seqcount_t running; 105 struct gnet_stats_queue qstats; 106 unsigned long state; 107 struct Qdisc *next_sched; 108 struct sk_buff_head skb_bad_txq; 109 110 spinlock_t busylock ____cacheline_aligned_in_smp; 111 spinlock_t seqlock; 112 113 /* for NOLOCK qdisc, true if there are no enqueued skbs */ 114 bool empty; 115 struct rcu_head rcu; 116 117 /* private data */ 118 long privdata[] ____cacheline_aligned; 119}; 120 121static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 122{ 123 if (qdisc->flags & TCQ_F_BUILTIN) 124 return; 125 refcount_inc(&qdisc->refcnt); 126} 127 128/* Intended to be used by unlocked users, when concurrent qdisc release is 129 * possible. 130 */ 131 132static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 133{ 134 if (qdisc->flags & TCQ_F_BUILTIN) 135 return qdisc; 136 if (refcount_inc_not_zero(&qdisc->refcnt)) 137 return qdisc; 138 return NULL; 139} 140 141static inline bool qdisc_is_running(struct Qdisc *qdisc) 142{ 143 if (qdisc->flags & TCQ_F_NOLOCK) 144 return spin_is_locked(&qdisc->seqlock); 145 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 146} 147 148static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 149{ 150 return q->flags & TCQ_F_CPUSTATS; 151} 152 153static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 154{ 155 if (qdisc_is_percpu_stats(qdisc)) 156 return READ_ONCE(qdisc->empty); 157 return !READ_ONCE(qdisc->q.qlen); 158} 159 160static inline bool qdisc_run_begin(struct Qdisc *qdisc) 161{ 162 if (qdisc->flags & TCQ_F_NOLOCK) { 163 if (spin_trylock(&qdisc->seqlock)) 164 goto nolock_empty; 165 166 /* No need to insist if the MISSED flag was already set. 167 * Note that test_and_set_bit() also gives us memory ordering 168 * guarantees wrt potential earlier enqueue() and below 169 * spin_trylock(), both of which are necessary to prevent races 170 */ 171 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) 172 return false; 173 174 /* Try to take the lock again to make sure that we will either 175 * grab it or the CPU that still has it will see MISSED set 176 * when testing it in qdisc_run_end() 177 */ 178 if (!spin_trylock(&qdisc->seqlock)) 179 return false; 180 181nolock_empty: 182 WRITE_ONCE(qdisc->empty, false); 183 } else if (qdisc_is_running(qdisc)) { 184 return false; 185 } 186 /* Variant of write_seqcount_begin() telling lockdep a trylock 187 * was attempted. 188 */ 189 raw_write_seqcount_begin(&qdisc->running); 190 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 191 return true; 192} 193 194static inline void qdisc_run_end(struct Qdisc *qdisc) 195{ 196 write_seqcount_end(&qdisc->running); 197 if (qdisc->flags & TCQ_F_NOLOCK) { 198 spin_unlock(&qdisc->seqlock); 199 200 /* spin_unlock() only has store-release semantic. The unlock 201 * and test_bit() ordering is a store-load ordering, so a full 202 * memory barrier is needed here. 203 */ 204 smp_mb(); 205 206 if (unlikely(test_bit(__QDISC_STATE_MISSED, 207 &qdisc->state))) { 208 clear_bit(__QDISC_STATE_MISSED, &qdisc->state); 209 __netif_schedule(qdisc); 210 } 211 } 212} 213 214static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 215{ 216 return qdisc->flags & TCQ_F_ONETXQUEUE; 217} 218 219static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 220{ 221#ifdef CONFIG_BQL 222 /* Non-BQL migrated drivers will return 0, too. */ 223 return dql_avail(&txq->dql); 224#else 225 return 0; 226#endif 227} 228 229struct Qdisc_class_ops { 230 unsigned int flags; 231 /* Child qdisc manipulation */ 232 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 233 int (*graft)(struct Qdisc *, unsigned long cl, 234 struct Qdisc *, struct Qdisc **, 235 struct netlink_ext_ack *extack); 236 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 237 void (*qlen_notify)(struct Qdisc *, unsigned long); 238 239 /* Class manipulation routines */ 240 unsigned long (*find)(struct Qdisc *, u32 classid); 241 int (*change)(struct Qdisc *, u32, u32, 242 struct nlattr **, unsigned long *, 243 struct netlink_ext_ack *); 244 int (*delete)(struct Qdisc *, unsigned long); 245 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 246 247 /* Filter manipulation */ 248 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 249 unsigned long arg, 250 struct netlink_ext_ack *extack); 251 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 252 u32 classid); 253 void (*unbind_tcf)(struct Qdisc *, unsigned long); 254 255 /* rtnetlink specific */ 256 int (*dump)(struct Qdisc *, unsigned long, 257 struct sk_buff *skb, struct tcmsg*); 258 int (*dump_stats)(struct Qdisc *, unsigned long, 259 struct gnet_dump *); 260}; 261 262/* Qdisc_class_ops flag values */ 263 264/* Implements API that doesn't require rtnl lock */ 265enum qdisc_class_ops_flags { 266 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 267}; 268 269struct Qdisc_ops { 270 struct Qdisc_ops *next; 271 const struct Qdisc_class_ops *cl_ops; 272 char id[IFNAMSIZ]; 273 int priv_size; 274 unsigned int static_flags; 275 276 int (*enqueue)(struct sk_buff *skb, 277 struct Qdisc *sch, 278 struct sk_buff **to_free); 279 struct sk_buff * (*dequeue)(struct Qdisc *); 280 struct sk_buff * (*peek)(struct Qdisc *); 281 282 int (*init)(struct Qdisc *sch, struct nlattr *arg, 283 struct netlink_ext_ack *extack); 284 void (*reset)(struct Qdisc *); 285 void (*destroy)(struct Qdisc *); 286 int (*change)(struct Qdisc *sch, 287 struct nlattr *arg, 288 struct netlink_ext_ack *extack); 289 void (*attach)(struct Qdisc *sch); 290 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 291 void (*change_real_num_tx)(struct Qdisc *sch, 292 unsigned int new_real_tx); 293 294 int (*dump)(struct Qdisc *, struct sk_buff *); 295 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 296 297 void (*ingress_block_set)(struct Qdisc *sch, 298 u32 block_index); 299 void (*egress_block_set)(struct Qdisc *sch, 300 u32 block_index); 301 u32 (*ingress_block_get)(struct Qdisc *sch); 302 u32 (*egress_block_get)(struct Qdisc *sch); 303 304 struct module *owner; 305}; 306 307 308struct tcf_result { 309 union { 310 struct { 311 unsigned long class; 312 u32 classid; 313 }; 314 const struct tcf_proto *goto_tp; 315 316 /* used in the skb_tc_reinsert function */ 317 struct { 318 bool ingress; 319 struct gnet_stats_queue *qstats; 320 }; 321 }; 322}; 323 324struct tcf_chain; 325 326struct tcf_proto_ops { 327 struct list_head head; 328 char kind[IFNAMSIZ]; 329 330 int (*classify)(struct sk_buff *, 331 const struct tcf_proto *, 332 struct tcf_result *); 333 int (*init)(struct tcf_proto*); 334 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 335 struct netlink_ext_ack *extack); 336 337 void* (*get)(struct tcf_proto*, u32 handle); 338 void (*put)(struct tcf_proto *tp, void *f); 339 int (*change)(struct net *net, struct sk_buff *, 340 struct tcf_proto*, unsigned long, 341 u32 handle, struct nlattr **, 342 void **, bool, bool, 343 struct netlink_ext_ack *); 344 int (*delete)(struct tcf_proto *tp, void *arg, 345 bool *last, bool rtnl_held, 346 struct netlink_ext_ack *); 347 bool (*delete_empty)(struct tcf_proto *tp); 348 void (*walk)(struct tcf_proto *tp, 349 struct tcf_walker *arg, bool rtnl_held); 350 int (*reoffload)(struct tcf_proto *tp, bool add, 351 flow_setup_cb_t *cb, void *cb_priv, 352 struct netlink_ext_ack *extack); 353 void (*hw_add)(struct tcf_proto *tp, 354 void *type_data); 355 void (*hw_del)(struct tcf_proto *tp, 356 void *type_data); 357 void (*bind_class)(void *, u32, unsigned long, 358 void *, unsigned long); 359 void * (*tmplt_create)(struct net *net, 360 struct tcf_chain *chain, 361 struct nlattr **tca, 362 struct netlink_ext_ack *extack); 363 void (*tmplt_destroy)(void *tmplt_priv); 364 365 /* rtnetlink specific */ 366 int (*dump)(struct net*, struct tcf_proto*, void *, 367 struct sk_buff *skb, struct tcmsg*, 368 bool); 369 int (*terse_dump)(struct net *net, 370 struct tcf_proto *tp, void *fh, 371 struct sk_buff *skb, 372 struct tcmsg *t, bool rtnl_held); 373 int (*tmplt_dump)(struct sk_buff *skb, 374 struct net *net, 375 void *tmplt_priv); 376 377 struct module *owner; 378 int flags; 379}; 380 381/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 382 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 383 * conditions can occur when filters are inserted/deleted simultaneously. 384 */ 385enum tcf_proto_ops_flags { 386 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 387}; 388 389struct tcf_proto { 390 /* Fast access part */ 391 struct tcf_proto __rcu *next; 392 void __rcu *root; 393 394 /* called under RCU BH lock*/ 395 int (*classify)(struct sk_buff *, 396 const struct tcf_proto *, 397 struct tcf_result *); 398 __be16 protocol; 399 400 /* All the rest */ 401 u32 prio; 402 void *data; 403 const struct tcf_proto_ops *ops; 404 struct tcf_chain *chain; 405 /* Lock protects tcf_proto shared state and can be used by unlocked 406 * classifiers to protect their private data. 407 */ 408 spinlock_t lock; 409 bool deleting; 410 refcount_t refcnt; 411 struct rcu_head rcu; 412 struct hlist_node destroy_ht_node; 413}; 414 415struct qdisc_skb_cb { 416 struct { 417 unsigned int pkt_len; 418 u16 slave_dev_queue_mapping; 419 u16 tc_classid; 420 }; 421#define QDISC_CB_PRIV_LEN 20 422 unsigned char data[QDISC_CB_PRIV_LEN]; 423 u16 mru; 424}; 425 426typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 427 428struct tcf_chain { 429 /* Protects filter_chain. */ 430 struct mutex filter_chain_lock; 431 struct tcf_proto __rcu *filter_chain; 432 struct list_head list; 433 struct tcf_block *block; 434 u32 index; /* chain index */ 435 unsigned int refcnt; 436 unsigned int action_refcnt; 437 bool explicitly_created; 438 bool flushing; 439 const struct tcf_proto_ops *tmplt_ops; 440 void *tmplt_priv; 441 struct rcu_head rcu; 442}; 443 444struct tcf_block { 445 /* Lock protects tcf_block and lifetime-management data of chains 446 * attached to the block (refcnt, action_refcnt, explicitly_created). 447 */ 448 struct mutex lock; 449 struct list_head chain_list; 450 u32 index; /* block index for shared blocks */ 451 u32 classid; /* which class this block belongs to */ 452 refcount_t refcnt; 453 struct net *net; 454 struct Qdisc *q; 455 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 456 struct flow_block flow_block; 457 struct list_head owner_list; 458 bool keep_dst; 459 atomic_t offloadcnt; /* Number of oddloaded filters */ 460 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 461 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 462 struct { 463 struct tcf_chain *chain; 464 struct list_head filter_chain_list; 465 } chain0; 466 struct rcu_head rcu; 467 DECLARE_HASHTABLE(proto_destroy_ht, 7); 468 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 469}; 470 471#ifdef CONFIG_PROVE_LOCKING 472static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 473{ 474 return lockdep_is_held(&chain->filter_chain_lock); 475} 476 477static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 478{ 479 return lockdep_is_held(&tp->lock); 480} 481#else 482static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain) 483{ 484 return true; 485} 486 487static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 488{ 489 return true; 490} 491#endif /* #ifdef CONFIG_PROVE_LOCKING */ 492 493#define tcf_chain_dereference(p, chain) \ 494 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 495 496#define tcf_proto_dereference(p, tp) \ 497 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 498 499static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 500{ 501 struct qdisc_skb_cb *qcb; 502 503 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 504 BUILD_BUG_ON(sizeof(qcb->data) < sz); 505} 506 507static inline int qdisc_qlen_cpu(const struct Qdisc *q) 508{ 509 return this_cpu_ptr(q->cpu_qstats)->qlen; 510} 511 512static inline int qdisc_qlen(const struct Qdisc *q) 513{ 514 return q->q.qlen; 515} 516 517static inline int qdisc_qlen_sum(const struct Qdisc *q) 518{ 519 __u32 qlen = q->qstats.qlen; 520 int i; 521 522 if (qdisc_is_percpu_stats(q)) { 523 for_each_possible_cpu(i) 524 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 525 } else { 526 qlen += q->q.qlen; 527 } 528 529 return qlen; 530} 531 532static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 533{ 534 return (struct qdisc_skb_cb *)skb->cb; 535} 536 537static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 538{ 539 return &qdisc->q.lock; 540} 541 542static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 543{ 544 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 545 546 return q; 547} 548 549static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 550{ 551 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 552} 553 554static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 555{ 556 return qdisc->dev_queue->qdisc_sleeping; 557} 558 559/* The qdisc root lock is a mechanism by which to top level 560 * of a qdisc tree can be locked from any qdisc node in the 561 * forest. This allows changing the configuration of some 562 * aspect of the qdisc tree while blocking out asynchronous 563 * qdisc access in the packet processing paths. 564 * 565 * It is only legal to do this when the root will not change 566 * on us. Otherwise we'll potentially lock the wrong qdisc 567 * root. This is enforced by holding the RTNL semaphore, which 568 * all users of this lock accessor must do. 569 */ 570static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 571{ 572 struct Qdisc *root = qdisc_root(qdisc); 573 574 ASSERT_RTNL(); 575 return qdisc_lock(root); 576} 577 578static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 579{ 580 struct Qdisc *root = qdisc_root_sleeping(qdisc); 581 582 ASSERT_RTNL(); 583 return qdisc_lock(root); 584} 585 586static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 587{ 588 struct Qdisc *root = qdisc_root_sleeping(qdisc); 589 590 ASSERT_RTNL(); 591 return &root->running; 592} 593 594static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 595{ 596 return qdisc->dev_queue->dev; 597} 598 599static inline void sch_tree_lock(const struct Qdisc *q) 600{ 601 spin_lock_bh(qdisc_root_sleeping_lock(q)); 602} 603 604static inline void sch_tree_unlock(const struct Qdisc *q) 605{ 606 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 607} 608 609extern struct Qdisc noop_qdisc; 610extern struct Qdisc_ops noop_qdisc_ops; 611extern struct Qdisc_ops pfifo_fast_ops; 612extern struct Qdisc_ops mq_qdisc_ops; 613extern struct Qdisc_ops noqueue_qdisc_ops; 614extern const struct Qdisc_ops *default_qdisc_ops; 615static inline const struct Qdisc_ops * 616get_default_qdisc_ops(const struct net_device *dev, int ntx) 617{ 618 return ntx < dev->real_num_tx_queues ? 619 default_qdisc_ops : &pfifo_fast_ops; 620} 621 622struct Qdisc_class_common { 623 u32 classid; 624 struct hlist_node hnode; 625}; 626 627struct Qdisc_class_hash { 628 struct hlist_head *hash; 629 unsigned int hashsize; 630 unsigned int hashmask; 631 unsigned int hashelems; 632}; 633 634static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 635{ 636 id ^= id >> 8; 637 id ^= id >> 4; 638 return id & mask; 639} 640 641static inline struct Qdisc_class_common * 642qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 643{ 644 struct Qdisc_class_common *cl; 645 unsigned int h; 646 647 if (!id) 648 return NULL; 649 650 h = qdisc_class_hash(id, hash->hashmask); 651 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 652 if (cl->classid == id) 653 return cl; 654 } 655 return NULL; 656} 657 658static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 659{ 660 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 661 662 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 663} 664 665int qdisc_class_hash_init(struct Qdisc_class_hash *); 666void qdisc_class_hash_insert(struct Qdisc_class_hash *, 667 struct Qdisc_class_common *); 668void qdisc_class_hash_remove(struct Qdisc_class_hash *, 669 struct Qdisc_class_common *); 670void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 671void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 672 673int dev_qdisc_change_tx_queue_len(struct net_device *dev); 674void dev_qdisc_change_real_num_tx(struct net_device *dev, 675 unsigned int new_real_tx); 676void dev_init_scheduler(struct net_device *dev); 677void dev_shutdown(struct net_device *dev); 678void dev_activate(struct net_device *dev); 679void dev_deactivate(struct net_device *dev); 680void dev_deactivate_many(struct list_head *head); 681struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 682 struct Qdisc *qdisc); 683void qdisc_reset(struct Qdisc *qdisc); 684void qdisc_put(struct Qdisc *qdisc); 685void qdisc_put_unlocked(struct Qdisc *qdisc); 686void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 687#ifdef CONFIG_NET_SCHED 688int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 689 void *type_data); 690void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 691 struct Qdisc *new, struct Qdisc *old, 692 enum tc_setup_type type, void *type_data, 693 struct netlink_ext_ack *extack); 694#else 695static inline int 696qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 697 void *type_data) 698{ 699 q->flags &= ~TCQ_F_OFFLOADED; 700 return 0; 701} 702 703static inline void 704qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 705 struct Qdisc *new, struct Qdisc *old, 706 enum tc_setup_type type, void *type_data, 707 struct netlink_ext_ack *extack) 708{ 709} 710#endif 711struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 712 const struct Qdisc_ops *ops, 713 struct netlink_ext_ack *extack); 714void qdisc_free(struct Qdisc *qdisc); 715struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 716 const struct Qdisc_ops *ops, u32 parentid, 717 struct netlink_ext_ack *extack); 718void __qdisc_calculate_pkt_len(struct sk_buff *skb, 719 const struct qdisc_size_table *stab); 720int skb_do_redirect(struct sk_buff *); 721 722static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 723{ 724#ifdef CONFIG_NET_CLS_ACT 725 return skb->tc_at_ingress; 726#else 727 return false; 728#endif 729} 730 731static inline bool skb_skip_tc_classify(struct sk_buff *skb) 732{ 733#ifdef CONFIG_NET_CLS_ACT 734 if (skb->tc_skip_classify) { 735 skb->tc_skip_classify = 0; 736 return true; 737 } 738#endif 739 return false; 740} 741 742/* Reset all TX qdiscs greater than index of a device. */ 743static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 744{ 745 struct Qdisc *qdisc; 746 747 for (; i < dev->num_tx_queues; i++) { 748 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 749 if (qdisc) { 750 spin_lock_bh(qdisc_lock(qdisc)); 751 qdisc_reset(qdisc); 752 spin_unlock_bh(qdisc_lock(qdisc)); 753 } 754 } 755} 756 757/* Are all TX queues of the device empty? */ 758static inline bool qdisc_all_tx_empty(const struct net_device *dev) 759{ 760 unsigned int i; 761 762 rcu_read_lock(); 763 for (i = 0; i < dev->num_tx_queues; i++) { 764 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 765 const struct Qdisc *q = rcu_dereference(txq->qdisc); 766 767 if (!qdisc_is_empty(q)) { 768 rcu_read_unlock(); 769 return false; 770 } 771 } 772 rcu_read_unlock(); 773 return true; 774} 775 776/* Are any of the TX qdiscs changing? */ 777static inline bool qdisc_tx_changing(const struct net_device *dev) 778{ 779 unsigned int i; 780 781 for (i = 0; i < dev->num_tx_queues; i++) { 782 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 783 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 784 return true; 785 } 786 return false; 787} 788 789/* Is the device using the noop qdisc on all queues? */ 790static inline bool qdisc_tx_is_noop(const struct net_device *dev) 791{ 792 unsigned int i; 793 794 for (i = 0; i < dev->num_tx_queues; i++) { 795 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 796 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 797 return false; 798 } 799 return true; 800} 801 802static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 803{ 804 return qdisc_skb_cb(skb)->pkt_len; 805} 806 807/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 808enum net_xmit_qdisc_t { 809 __NET_XMIT_STOLEN = 0x00010000, 810 __NET_XMIT_BYPASS = 0x00020000, 811}; 812 813#ifdef CONFIG_NET_CLS_ACT 814#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 815#else 816#define net_xmit_drop_count(e) (1) 817#endif 818 819static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 820 const struct Qdisc *sch) 821{ 822#ifdef CONFIG_NET_SCHED 823 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 824 825 if (stab) 826 __qdisc_calculate_pkt_len(skb, stab); 827#endif 828} 829 830static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 831 struct sk_buff **to_free) 832{ 833 qdisc_calculate_pkt_len(skb, sch); 834 return sch->enqueue(skb, sch, to_free); 835} 836 837static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 838 __u64 bytes, __u32 packets) 839{ 840 bstats->bytes += bytes; 841 bstats->packets += packets; 842} 843 844static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 845 const struct sk_buff *skb) 846{ 847 _bstats_update(bstats, 848 qdisc_pkt_len(skb), 849 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 850} 851 852static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 853 __u64 bytes, __u32 packets) 854{ 855 u64_stats_update_begin(&bstats->syncp); 856 _bstats_update(&bstats->bstats, bytes, packets); 857 u64_stats_update_end(&bstats->syncp); 858} 859 860static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 861 const struct sk_buff *skb) 862{ 863 u64_stats_update_begin(&bstats->syncp); 864 bstats_update(&bstats->bstats, skb); 865 u64_stats_update_end(&bstats->syncp); 866} 867 868static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 869 const struct sk_buff *skb) 870{ 871 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 872} 873 874static inline void qdisc_bstats_update(struct Qdisc *sch, 875 const struct sk_buff *skb) 876{ 877 bstats_update(&sch->bstats, skb); 878} 879 880static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 881 const struct sk_buff *skb) 882{ 883 sch->qstats.backlog -= qdisc_pkt_len(skb); 884} 885 886static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 887 const struct sk_buff *skb) 888{ 889 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 890} 891 892static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 893 const struct sk_buff *skb) 894{ 895 sch->qstats.backlog += qdisc_pkt_len(skb); 896} 897 898static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 899 const struct sk_buff *skb) 900{ 901 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 902} 903 904static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 905{ 906 this_cpu_inc(sch->cpu_qstats->qlen); 907} 908 909static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 910{ 911 this_cpu_dec(sch->cpu_qstats->qlen); 912} 913 914static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 915{ 916 this_cpu_inc(sch->cpu_qstats->requeues); 917} 918 919static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 920{ 921 sch->qstats.drops += count; 922} 923 924static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 925{ 926 qstats->drops++; 927} 928 929static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 930{ 931 qstats->overlimits++; 932} 933 934static inline void qdisc_qstats_drop(struct Qdisc *sch) 935{ 936 qstats_drop_inc(&sch->qstats); 937} 938 939static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 940{ 941 this_cpu_inc(sch->cpu_qstats->drops); 942} 943 944static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 945{ 946 sch->qstats.overlimits++; 947} 948 949static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 950{ 951 __u32 qlen = qdisc_qlen_sum(sch); 952 953 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 954} 955 956static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 957 __u32 *backlog) 958{ 959 struct gnet_stats_queue qstats = { 0 }; 960 __u32 len = qdisc_qlen_sum(sch); 961 962 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); 963 *qlen = qstats.qlen; 964 *backlog = qstats.backlog; 965} 966 967static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 968{ 969 __u32 qlen, backlog; 970 971 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 972 qdisc_tree_reduce_backlog(sch, qlen, backlog); 973} 974 975static inline void qdisc_purge_queue(struct Qdisc *sch) 976{ 977 __u32 qlen, backlog; 978 979 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 980 qdisc_reset(sch); 981 qdisc_tree_reduce_backlog(sch, qlen, backlog); 982} 983 984static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 985{ 986 qh->head = NULL; 987 qh->tail = NULL; 988 qh->qlen = 0; 989} 990 991static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 992 struct qdisc_skb_head *qh) 993{ 994 struct sk_buff *last = qh->tail; 995 996 if (last) { 997 skb->next = NULL; 998 last->next = skb; 999 qh->tail = skb; 1000 } else { 1001 qh->tail = skb; 1002 qh->head = skb; 1003 } 1004 qh->qlen++; 1005} 1006 1007static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1008{ 1009 __qdisc_enqueue_tail(skb, &sch->q); 1010 qdisc_qstats_backlog_inc(sch, skb); 1011 return NET_XMIT_SUCCESS; 1012} 1013 1014static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1015 struct qdisc_skb_head *qh) 1016{ 1017 skb->next = qh->head; 1018 1019 if (!qh->head) 1020 qh->tail = skb; 1021 qh->head = skb; 1022 qh->qlen++; 1023} 1024 1025static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1026{ 1027 struct sk_buff *skb = qh->head; 1028 1029 if (likely(skb != NULL)) { 1030 qh->head = skb->next; 1031 qh->qlen--; 1032 if (qh->head == NULL) 1033 qh->tail = NULL; 1034 skb->next = NULL; 1035 } 1036 1037 return skb; 1038} 1039 1040static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1041{ 1042 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1043 1044 if (likely(skb != NULL)) { 1045 qdisc_qstats_backlog_dec(sch, skb); 1046 qdisc_bstats_update(sch, skb); 1047 } 1048 1049 return skb; 1050} 1051 1052/* Instead of calling kfree_skb() while root qdisc lock is held, 1053 * queue the skb for future freeing at end of __dev_xmit_skb() 1054 */ 1055static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1056{ 1057 skb->next = *to_free; 1058 *to_free = skb; 1059} 1060 1061static inline void __qdisc_drop_all(struct sk_buff *skb, 1062 struct sk_buff **to_free) 1063{ 1064 if (skb->prev) 1065 skb->prev->next = *to_free; 1066 else 1067 skb->next = *to_free; 1068 *to_free = skb; 1069} 1070 1071static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1072 struct qdisc_skb_head *qh, 1073 struct sk_buff **to_free) 1074{ 1075 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1076 1077 if (likely(skb != NULL)) { 1078 unsigned int len = qdisc_pkt_len(skb); 1079 1080 qdisc_qstats_backlog_dec(sch, skb); 1081 __qdisc_drop(skb, to_free); 1082 return len; 1083 } 1084 1085 return 0; 1086} 1087 1088static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1089{ 1090 const struct qdisc_skb_head *qh = &sch->q; 1091 1092 return qh->head; 1093} 1094 1095/* generic pseudo peek method for non-work-conserving qdisc */ 1096static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1097{ 1098 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1099 1100 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1101 if (!skb) { 1102 skb = sch->dequeue(sch); 1103 1104 if (skb) { 1105 __skb_queue_head(&sch->gso_skb, skb); 1106 /* it's still part of the queue */ 1107 qdisc_qstats_backlog_inc(sch, skb); 1108 sch->q.qlen++; 1109 } 1110 } 1111 1112 return skb; 1113} 1114 1115static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1116 struct sk_buff *skb) 1117{ 1118 if (qdisc_is_percpu_stats(sch)) { 1119 qdisc_qstats_cpu_backlog_dec(sch, skb); 1120 qdisc_bstats_cpu_update(sch, skb); 1121 qdisc_qstats_cpu_qlen_dec(sch); 1122 } else { 1123 qdisc_qstats_backlog_dec(sch, skb); 1124 qdisc_bstats_update(sch, skb); 1125 sch->q.qlen--; 1126 } 1127} 1128 1129static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1130 unsigned int pkt_len) 1131{ 1132 if (qdisc_is_percpu_stats(sch)) { 1133 qdisc_qstats_cpu_qlen_inc(sch); 1134 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1135 } else { 1136 sch->qstats.backlog += pkt_len; 1137 sch->q.qlen++; 1138 } 1139} 1140 1141/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1142static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1143{ 1144 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1145 1146 if (skb) { 1147 skb = __skb_dequeue(&sch->gso_skb); 1148 if (qdisc_is_percpu_stats(sch)) { 1149 qdisc_qstats_cpu_backlog_dec(sch, skb); 1150 qdisc_qstats_cpu_qlen_dec(sch); 1151 } else { 1152 qdisc_qstats_backlog_dec(sch, skb); 1153 sch->q.qlen--; 1154 } 1155 } else { 1156 skb = sch->dequeue(sch); 1157 } 1158 1159 return skb; 1160} 1161 1162static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1163{ 1164 /* 1165 * We do not know the backlog in bytes of this list, it 1166 * is up to the caller to correct it 1167 */ 1168 ASSERT_RTNL(); 1169 if (qh->qlen) { 1170 rtnl_kfree_skbs(qh->head, qh->tail); 1171 1172 qh->head = NULL; 1173 qh->tail = NULL; 1174 qh->qlen = 0; 1175 } 1176} 1177 1178static inline void qdisc_reset_queue(struct Qdisc *sch) 1179{ 1180 __qdisc_reset_queue(&sch->q); 1181} 1182 1183static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1184 struct Qdisc **pold) 1185{ 1186 struct Qdisc *old; 1187 1188 sch_tree_lock(sch); 1189 old = *pold; 1190 *pold = new; 1191 if (old != NULL) 1192 qdisc_purge_queue(old); 1193 sch_tree_unlock(sch); 1194 1195 return old; 1196} 1197 1198static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1199{ 1200 rtnl_kfree_skbs(skb, skb); 1201 qdisc_qstats_drop(sch); 1202} 1203 1204static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1205 struct sk_buff **to_free) 1206{ 1207 __qdisc_drop(skb, to_free); 1208 qdisc_qstats_cpu_drop(sch); 1209 1210 return NET_XMIT_DROP; 1211} 1212 1213static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1214 struct sk_buff **to_free) 1215{ 1216 __qdisc_drop(skb, to_free); 1217 qdisc_qstats_drop(sch); 1218 1219 return NET_XMIT_DROP; 1220} 1221 1222static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1223 struct sk_buff **to_free) 1224{ 1225 __qdisc_drop_all(skb, to_free); 1226 qdisc_qstats_drop(sch); 1227 1228 return NET_XMIT_DROP; 1229} 1230 1231/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1232 long it will take to send a packet given its size. 1233 */ 1234static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1235{ 1236 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1237 if (slot < 0) 1238 slot = 0; 1239 slot >>= rtab->rate.cell_log; 1240 if (slot > 255) 1241 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1242 return rtab->data[slot]; 1243} 1244 1245struct psched_ratecfg { 1246 u64 rate_bytes_ps; /* bytes per second */ 1247 u32 mult; 1248 u16 overhead; 1249 u16 mpu; 1250 u8 linklayer; 1251 u8 shift; 1252}; 1253 1254static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1255 unsigned int len) 1256{ 1257 len += r->overhead; 1258 1259 if (len < r->mpu) 1260 len = r->mpu; 1261 1262 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1263 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1264 1265 return ((u64)len * r->mult) >> r->shift; 1266} 1267 1268void psched_ratecfg_precompute(struct psched_ratecfg *r, 1269 const struct tc_ratespec *conf, 1270 u64 rate64); 1271 1272static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1273 const struct psched_ratecfg *r) 1274{ 1275 memset(res, 0, sizeof(*res)); 1276 1277 /* legacy struct tc_ratespec has a 32bit @rate field 1278 * Qdisc using 64bit rate should add new attributes 1279 * in order to maintain compatibility. 1280 */ 1281 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1282 1283 res->overhead = r->overhead; 1284 res->mpu = r->mpu; 1285 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1286} 1287 1288/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1289 * The fast path only needs to access filter list and to update stats 1290 */ 1291struct mini_Qdisc { 1292 struct tcf_proto *filter_list; 1293 struct tcf_block *block; 1294 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 1295 struct gnet_stats_queue __percpu *cpu_qstats; 1296 struct rcu_head rcu; 1297}; 1298 1299static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1300 const struct sk_buff *skb) 1301{ 1302 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1303} 1304 1305static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1306{ 1307 this_cpu_inc(miniq->cpu_qstats->drops); 1308} 1309 1310struct mini_Qdisc_pair { 1311 struct mini_Qdisc miniq1; 1312 struct mini_Qdisc miniq2; 1313 struct mini_Qdisc __rcu **p_miniq; 1314}; 1315 1316void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1317 struct tcf_proto *tp_head); 1318void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1319 struct mini_Qdisc __rcu **p_miniq); 1320void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1321 struct tcf_block *block); 1322 1323/* Make sure qdisc is no longer in SCHED state. */ 1324static inline void qdisc_synchronize(const struct Qdisc *q) 1325{ 1326 while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1327 msleep(1); 1328} 1329 1330#endif 1331