Lines Matching refs:cl
213 struct htb_class *cl;
224 cl = htb_find(skb->priority, sch);
225 if (cl) {
226 if (cl->level == 0)
227 return cl;
229 tcf = rcu_dereference_bh(cl->filter_list);
247 cl = (void *)res.class;
248 if (!cl) {
251 cl = htb_find(res.classid, sch);
252 if (!cl)
255 if (!cl->level)
256 return cl; /* we hit leaf; return it */
259 tcf = rcu_dereference_bh(cl->filter_list);
262 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
263 if (!cl || cl->level)
265 return cl;
275 struct htb_class *cl, int prio)
284 if (cl->common.classid > c->common.classid)
289 rb_link_node(&cl->node[prio], parent, p);
290 rb_insert_color(&cl->node[prio], root);
297 * change its mode in cl->pq_key microseconds. Make sure that class is not
301 struct htb_class *cl, s64 delay)
303 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
305 cl->pq_key = q->now + delay;
306 if (cl->pq_key == q->now)
307 cl->pq_key++;
310 if (q->near_ev_cache[cl->level] > cl->pq_key)
311 q->near_ev_cache[cl->level] = cl->pq_key;
317 if (cl->pq_key >= c->pq_key)
322 rb_link_node(&cl->pq_node, parent, p);
323 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
344 struct htb_class *cl, int mask)
346 q->row_mask[cl->level] |= mask;
350 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
373 struct htb_class *cl, int mask)
376 struct htb_level *hlevel = &q->hlevel[cl->level];
383 if (hprio->ptr == cl->node + prio)
386 htb_safe_rb_erase(cl->node + prio, &hprio->row);
390 q->row_mask[cl->level] &= ~m;
397 * for priorities it is participating on. cl->cmode must be new
398 * (activated) mode. It does nothing if cl->prio_activity == 0.
400 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
402 struct htb_class *p = cl->parent;
403 long m, mask = cl->prio_activity;
405 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
420 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
423 cl = p;
424 p = cl->parent;
427 if (cl->cmode == HTB_CAN_SEND && mask)
428 htb_add_class_to_row(q, cl, mask);
434 * cl->cmode must represent old mode (before deactivation). It does
435 * nothing if cl->prio_activity == 0. Class is removed from all feed
438 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
440 struct htb_class *p = cl->parent;
441 long m, mask = cl->prio_activity;
443 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
450 if (p->inner.clprio[prio].ptr == cl->node + prio) {
455 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
459 htb_safe_rb_erase(cl->node + prio,
467 cl = p;
468 p = cl->parent;
471 if (cl->cmode == HTB_CAN_SEND && mask)
472 htb_remove_class_from_row(q, cl, mask);
475 static inline s64 htb_lowater(const struct htb_class *cl)
478 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
482 static inline s64 htb_hiwater(const struct htb_class *cl)
485 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
494 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
496 * from now to time when cl will change its state.
498 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
499 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
503 htb_class_mode(struct htb_class *cl, s64 *diff)
507 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
512 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
525 * be different from old one and cl->pq_key has to be valid if changing
529 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
531 enum htb_cmode new_mode = htb_class_mode(cl, diff);
533 if (new_mode == cl->cmode)
537 cl->overlimits++;
541 if (cl->prio_activity) { /* not necessary: speed optimization */
542 if (cl->cmode != HTB_CANT_SEND)
543 htb_deactivate_prios(q, cl);
544 cl->cmode = new_mode;
546 htb_activate_prios(q, cl);
548 cl->cmode = new_mode;
552 * htb_activate - inserts leaf cl into appropriate active feeds
558 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
560 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
562 if (!cl->prio_activity) {
563 cl->prio_activity = 1 << cl->prio;
564 htb_activate_prios(q, cl);
569 * htb_deactivate - remove leaf cl from active feeds
574 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
576 WARN_ON(!cl->prio_activity);
578 htb_deactivate_prios(q, cl);
579 cl->prio_activity = 0;
588 struct htb_class *cl = htb_classify(skb, sch, &ret);
590 if (cl == HTB_DIRECT) {
599 } else if (!cl) {
605 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
609 cl->drops++;
613 htb_activate(q, cl);
621 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
623 s64 toks = diff + cl->tokens;
625 if (toks > cl->buffer)
626 toks = cl->buffer;
627 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
628 if (toks <= -cl->mbuffer)
629 toks = 1 - cl->mbuffer;
631 cl->tokens = toks;
634 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
636 s64 toks = diff + cl->ctokens;
638 if (toks > cl->cbuffer)
639 toks = cl->cbuffer;
640 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
641 if (toks <= -cl->mbuffer)
642 toks = 1 - cl->mbuffer;
644 cl->ctokens = toks;
650 * Routine assumes that packet "bytes" long was dequeued from leaf cl
658 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
665 while (cl) {
666 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
667 if (cl->level >= level) {
668 if (cl->level == level)
669 cl->xstats.lends++;
670 htb_accnt_tokens(cl, bytes, diff);
672 cl->xstats.borrows++;
673 cl->tokens += diff; /* we moved t_c; update tokens */
675 htb_accnt_ctokens(cl, bytes, diff);
676 cl->t_c = q->now;
678 old_mode = cl->cmode;
680 htb_change_class_mode(q, cl, &diff);
681 if (old_mode != cl->cmode) {
683 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
684 if (cl->cmode != HTB_CAN_SEND)
685 htb_add_to_wait_tree(q, cl, diff);
689 if (cl->level)
690 bstats_update(&cl->bstats, skb);
692 cl = cl->parent;
701 * Note: Applied are events whose have cl->pq_key <= q->now.
714 struct htb_class *cl;
721 cl = rb_entry(p, struct htb_class, pq_node);
722 if (cl->pq_key > q->now)
723 return cl->pq_key;
726 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
727 htb_change_class_mode(q, cl, &diff);
728 if (cl->cmode != HTB_CAN_SEND)
729 htb_add_to_wait_tree(q, cl, diff);
749 struct htb_class *cl =
752 if (id > cl->common.classid) {
754 } else if (id < cl->common.classid) {
807 struct htb_class *cl;
810 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
811 if (!cl->level)
812 return cl;
813 clp = &cl->inner.clprio[prio];
830 struct htb_class *cl, *start;
835 start = cl = htb_lookup_leaf(hprio, prio);
839 if (unlikely(!cl))
847 if (unlikely(cl->leaf.q->q.qlen == 0)) {
849 htb_deactivate(q, cl);
857 if (cl == start) /* fix start if we just deleted it */
859 cl = next;
863 skb = cl->leaf.q->dequeue(cl->leaf.q);
867 qdisc_warn_nonwc("htb", cl->leaf.q);
868 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
870 cl = htb_lookup_leaf(hprio, prio);
872 } while (cl != start);
875 bstats_update(&cl->bstats, skb);
876 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
877 if (cl->leaf.deficit[level] < 0) {
878 cl->leaf.deficit[level] += cl->quantum;
879 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
885 if (!cl->leaf.q->q.qlen)
886 htb_deactivate(q, cl);
887 htb_charge_class(q, cl, level, skb);
955 struct htb_class *cl;
959 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
960 if (cl->level)
961 memset(&cl->inner, 0, sizeof(cl->inner));
963 if (cl->leaf.q)
964 qdisc_reset(cl->leaf.q);
966 cl->prio_activity = 0;
967 cl->cmode = HTB_CAN_SEND;
1078 struct htb_class *cl = (struct htb_class *)arg;
1085 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1086 tcm->tcm_handle = cl->common.classid;
1087 if (!cl->level && cl->leaf.q)
1088 tcm->tcm_info = cl->leaf.q->handle;
1096 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1097 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1098 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1099 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1100 opt.quantum = cl->quantum;
1101 opt.prio = cl->prio;
1102 opt.level = cl->level;
1105 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1106 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1109 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1110 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1124 struct htb_class *cl = (struct htb_class *)arg;
1126 .drops = cl->drops,
1127 .overlimits = cl->overlimits,
1131 if (!cl->level && cl->leaf.q)
1132 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1134 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1136 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1140 d, NULL, &cl->bstats) < 0 ||
1141 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1145 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1151 struct htb_class *cl = (struct htb_class *)arg;
1153 if (cl->level)
1157 cl->common.classid, extack)) == NULL)
1160 *old = qdisc_replace(sch, new, &cl->leaf.q);
1166 struct htb_class *cl = (struct htb_class *)arg;
1167 return !cl->level ? cl->leaf.q : NULL;
1172 struct htb_class *cl = (struct htb_class *)arg;
1174 htb_deactivate(qdisc_priv(sch), cl);
1177 static inline int htb_parent_last_child(struct htb_class *cl)
1179 if (!cl->parent)
1182 if (cl->parent->children > 1)
1188 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1191 struct htb_class *parent = cl->parent;
1193 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1208 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1210 if (!cl->level) {
1211 WARN_ON(!cl->leaf.q);
1212 qdisc_put(cl->leaf.q);
1214 gen_kill_estimator(&cl->rate_est);
1215 tcf_block_put(cl->block);
1216 kfree(cl);
1223 struct htb_class *cl;
1236 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1237 tcf_block_put(cl->block);
1238 cl->block = NULL;
1242 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1244 htb_destroy_class(sch, cl);
1253 struct htb_class *cl = (struct htb_class *)arg;
1261 if (cl->children || cl->filter_cnt)
1264 if (!cl->level && htb_parent_last_child(cl)) {
1266 cl->parent->common.classid,
1273 if (!cl->level)
1274 qdisc_purge_queue(cl->leaf.q);
1277 qdisc_class_hash_remove(&q->clhash, &cl->common);
1278 if (cl->parent)
1279 cl->parent->children--;
1281 if (cl->prio_activity)
1282 htb_deactivate(q, cl);
1284 if (cl->cmode != HTB_CAN_SEND)
1285 htb_safe_rb_erase(&cl->pq_node,
1286 &q->hlevel[cl->level].wait_pq);
1289 htb_parent_to_leaf(q, cl, new_q);
1293 htb_destroy_class(sch, cl);
1303 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1339 if (!cl) { /* new class */
1368 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1369 if (!cl)
1372 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1374 kfree(cl);
1378 err = gen_new_estimator(&cl->bstats, NULL,
1379 &cl->rate_est,
1384 tcf_block_put(cl->block);
1385 kfree(cl);
1390 cl->children = 0;
1391 RB_CLEAR_NODE(&cl->pq_node);
1394 RB_CLEAR_NODE(&cl->node[prio]);
1420 cl->leaf.q = new_q ? new_q : &noop_qdisc;
1422 cl->common.classid = classid;
1423 cl->parent = parent;
1426 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1427 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1428 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1429 cl->t_c = ktime_get_ns();
1430 cl->cmode = HTB_CAN_SEND;
1433 qdisc_class_hash_insert(&q->clhash, &cl->common);
1436 if (cl->leaf.q != &noop_qdisc)
1437 qdisc_hash_add(cl->leaf.q, true);
1440 err = gen_replace_estimator(&cl->bstats, NULL,
1441 &cl->rate_est,
1455 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1456 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1459 * is really leaf before changing cl->leaf !
1461 if (!cl->level) {
1462 u64 quantum = cl->rate.rate_bytes_ps;
1465 cl->quantum = min_t(u64, quantum, INT_MAX);
1467 if (!hopt->quantum && cl->quantum < 1000) {
1469 cl->quantum = 1000;
1471 if (!hopt->quantum && cl->quantum > 200000) {
1473 cl->quantum = 200000;
1476 cl->quantum = hopt->quantum;
1477 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1478 cl->prio = TC_HTB_NUMPRIO - 1;
1481 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1482 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1489 cl->common.classid, (warn == -1 ? "small" : "big"));
1493 *arg = (unsigned long)cl;
1504 struct htb_class *cl = (struct htb_class *)arg;
1506 return cl ? cl->block : q->block;
1512 struct htb_class *cl = htb_find(classid, sch);
1514 /*if (cl && !cl->level) return 0;
1523 if (cl)
1524 cl->filter_cnt++;
1525 return (unsigned long)cl;
1530 struct htb_class *cl = (struct htb_class *)arg;
1532 if (cl)
1533 cl->filter_cnt--;
1539 struct htb_class *cl;
1546 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1551 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {