1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __NET_PKT_SCHED_H 3#define __NET_PKT_SCHED_H 4 5#include <linux/jiffies.h> 6#include <linux/ktime.h> 7#include <linux/if_vlan.h> 8#include <linux/netdevice.h> 9#include <net/sch_generic.h> 10#include <net/net_namespace.h> 11#include <uapi/linux/pkt_sched.h> 12 13#define DEFAULT_TX_QUEUE_LEN 1000 14#define STAB_SIZE_LOG_MAX 30 15 16struct qdisc_walker { 17 int stop; 18 int skip; 19 int count; 20 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); 21}; 22 23static inline void *qdisc_priv(struct Qdisc *q) 24{ 25 return &q->privdata; 26} 27 28/* 29 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth 30 31 Normal IP packet size ~ 512byte, hence: 32 33 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for 34 10Mbit ethernet. 35 36 10msec resolution -> <50Kbit/sec. 37 38 The result: [34]86 is not good choice for QoS router :-( 39 40 The things are not so bad, because we may use artificial 41 clock evaluated by integration of network data flow 42 in the most critical places. 43 */ 44 45typedef u64 psched_time_t; 46typedef long psched_tdiff_t; 47 48/* Avoid doing 64 bit divide */ 49#define PSCHED_SHIFT 6 50#define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) 51#define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) 52 53#define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) 54#define PSCHED_PASTPERFECT 0 55 56static inline psched_time_t psched_get_time(void) 57{ 58 return PSCHED_NS2TICKS(ktime_get_ns()); 59} 60 61static inline psched_tdiff_t 62psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) 63{ 64 return min(tv1 - tv2, bound); 65} 66 67struct qdisc_watchdog { 68 u64 last_expires; 69 struct hrtimer timer; 70 struct Qdisc *qdisc; 71}; 72 73void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, 74 clockid_t clockid); 75void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 76 77void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 78 u64 delta_ns); 79 80static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, 81 u64 expires) 82{ 83 return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL); 84} 85 86static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, 87 psched_time_t expires) 88{ 89 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); 90} 91 92void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); 93 94extern struct Qdisc_ops pfifo_qdisc_ops; 95extern struct Qdisc_ops bfifo_qdisc_ops; 96extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; 97 98int fifo_set_limit(struct Qdisc *q, unsigned int limit); 99struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 100 unsigned int limit, 101 struct netlink_ext_ack *extack); 102 103int register_qdisc(struct Qdisc_ops *qops); 104int unregister_qdisc(struct Qdisc_ops *qops); 105void qdisc_get_default(char *id, size_t len); 106int qdisc_set_default(const char *id); 107 108void qdisc_hash_add(struct Qdisc *q, bool invisible); 109void qdisc_hash_del(struct Qdisc *q); 110struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 111struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); 112struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 113 struct nlattr *tab, 114 struct netlink_ext_ack *extack); 115void qdisc_put_rtab(struct qdisc_rate_table *tab); 116void qdisc_put_stab(struct qdisc_size_table *tab); 117void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 118bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 119 struct net_device *dev, struct netdev_queue *txq, 120 spinlock_t *root_lock, bool validate); 121 122void __qdisc_run(struct Qdisc *q); 123 124static inline void qdisc_run(struct Qdisc *q) 125{ 126 if (qdisc_run_begin(q)) { 127 __qdisc_run(q); 128 qdisc_run_end(q); 129 } 130} 131 132extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 133 134/* Calculate maximal size of packet seen by hard_start_xmit 135 routine of this device. 136 */ 137static inline unsigned int psched_mtu(const struct net_device *dev) 138{ 139 return READ_ONCE(dev->mtu) + dev->hard_header_len; 140} 141 142static inline struct net *qdisc_net(struct Qdisc *q) 143{ 144 return dev_net(q->dev_queue->dev); 145} 146 147struct tc_cbs_qopt_offload { 148 u8 enable; 149 s32 queue; 150 s32 hicredit; 151 s32 locredit; 152 s32 idleslope; 153 s32 sendslope; 154}; 155 156struct tc_etf_qopt_offload { 157 u8 enable; 158 s32 queue; 159}; 160 161struct tc_taprio_sched_entry { 162 u8 command; /* TC_TAPRIO_CMD_* */ 163 164 /* The gate_mask in the offloading side refers to traffic classes */ 165 u32 gate_mask; 166 u32 interval; 167}; 168 169struct tc_taprio_qopt_offload { 170 u8 enable; 171 ktime_t base_time; 172 u64 cycle_time; 173 u64 cycle_time_extension; 174 175 size_t num_entries; 176 struct tc_taprio_sched_entry entries[]; 177}; 178 179/* Reference counting */ 180struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 181 *offload); 182void taprio_offload_free(struct tc_taprio_qopt_offload *offload); 183 184/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is 185 * not mistaken for a software timestamp, because this will otherwise prevent 186 * the dispatch of hardware timestamps to the socket. 187 */ 188static inline void skb_txtime_consumed(struct sk_buff *skb) 189{ 190 skb->tstamp = ktime_set(0, 0); 191} 192 193#endif 194