Lines Matching defs:sch
380 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
387 skb->dev = qdisc_dev(sch);
394 static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
397 fq_erase_head(sch, flow, skb);
400 qdisc_qstats_backlog_dec(sch, skb);
401 sch->q.qlen--;
442 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
445 struct fq_sched_data *q = qdisc_priv(sch);
448 if (unlikely(sch->q.qlen >= sch->limit))
449 return qdisc_drop(skb, sch, to_free);
464 return qdisc_drop(skb, sch, to_free);
476 return qdisc_drop(skb, sch, to_free);
480 qdisc_qstats_backlog_inc(sch, skb);
494 sch->q.qlen++;
526 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
528 struct fq_sched_data *q = qdisc_priv(sch);
536 if (!sch->q.qlen)
541 fq_dequeue_skb(sch, &q->internal, skb);
584 fq_dequeue_skb(sch, f, skb);
642 qdisc_bstats_update(sch, skb);
662 static void fq_reset(struct Qdisc *sch)
664 struct fq_sched_data *q = qdisc_priv(sch);
670 sch->q.qlen = 0;
671 sch->qstats.backlog = 0;
747 static int fq_resize(struct Qdisc *sch, u32 log)
749 struct fq_sched_data *q = qdisc_priv(sch);
759 netdev_queue_numa_node_read(sch->dev_queue));
766 sch_tree_lock(sch);
775 sch_tree_unlock(sch);
806 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
809 struct fq_sched_data *q = qdisc_priv(sch);
820 sch_tree_lock(sch);
833 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
899 sch_tree_unlock(sch);
900 err = fq_resize(sch, fq_log);
901 sch_tree_lock(sch);
903 while (sch->q.qlen > sch->limit) {
904 struct sk_buff *skb = fq_dequeue(sch);
912 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
914 sch_tree_unlock(sch);
918 static void fq_destroy(struct Qdisc *sch)
920 struct fq_sched_data *q = qdisc_priv(sch);
922 fq_reset(sch);
927 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
930 struct fq_sched_data *q = qdisc_priv(sch);
933 sch->limit = 10000;
935 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
936 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
957 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
960 err = fq_change(sch, opt, extack);
962 err = fq_resize(sch, q->fq_trees_log);
967 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
969 struct fq_sched_data *q = qdisc_priv(sch);
983 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
1008 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1010 struct fq_sched_data *q = qdisc_priv(sch);
1013 sch_tree_lock(sch);
1032 sch_tree_unlock(sch);