Lines Matching refs:cl
163 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
181 struct cbq_class *cl;
183 for (cl = this->tparent; cl; cl = cl->tparent) {
184 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
210 struct cbq_class *cl = NULL;
219 (cl = cbq_class_lookup(q, prio)) != NULL)
220 return cl;
237 cl = (void *)res.class;
238 if (!cl) {
240 cl = cbq_class_lookup(q, res.classid);
241 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
242 cl = defmap[TC_PRIO_BESTEFFORT];
244 if (cl == NULL)
247 if (cl->level >= head->level)
257 return cbq_reclassify(skb, cl);
260 if (cl->level == 0)
261 return cl;
268 head = cl;
272 cl = head;
278 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
279 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
282 return cl;
291 static inline void cbq_activate_class(struct cbq_class *cl)
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
294 int prio = cl->cpriority;
298 q->active[prio] = cl;
301 cl->next_alive = cl_tail->next_alive;
302 cl_tail->next_alive = cl;
304 cl->next_alive = cl;
319 struct cbq_class *cl;
323 cl = cl_prev->next_alive;
324 if (cl == this) {
325 cl_prev->next_alive = cl->next_alive;
326 cl->next_alive = NULL;
328 if (cl == q->active[prio]) {
330 if (cl == q->active[prio]) {
338 } while ((cl_prev = cl) != q->active[prio]);
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
346 if (toplevel > cl->level) {
350 if (cl->undertime < now) {
351 q->toplevel = cl->level;
354 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
364 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
367 q->rx_class = cl;
369 if (cl == NULL) {
376 ret = qdisc_enqueue(skb, cl->q, to_free);
379 cbq_mark_toplevel(q, cl);
380 if (!cl->next_alive)
381 cbq_activate_class(cl);
387 cbq_mark_toplevel(q, cl);
388 cl->qstats.drops++;
394 static void cbq_overlimit(struct cbq_class *cl)
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
397 psched_tdiff_t delay = cl->undertime - q->now;
399 if (!cl->delayed) {
400 delay += cl->offtime;
407 * place, apparently they forgot to shift it by cl->ewma_log.
409 if (cl->avgidle < 0)
410 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
411 if (cl->avgidle < cl->minidle)
412 cl->avgidle = cl->minidle;
415 cl->undertime = q->now + delay;
417 cl->xstats.overactions++;
418 cl->delayed = 1;
431 for (b = cl->borrow; b; b = b->borrow) {
447 struct cbq_class *cl;
455 cl = cl_prev->next_alive;
456 if (now - cl->penalized > 0) {
457 cl_prev->next_alive = cl->next_alive;
458 cl->next_alive = NULL;
459 cl->cpriority = cl->priority;
460 cl->delayed = 0;
461 cbq_activate_class(cl);
463 if (cl == q->active[prio]) {
465 if (cl == q->active[prio]) {
471 cl = cl_prev->next_alive;
472 } else if (sched - cl->penalized > 0)
473 sched = cl->penalized;
474 } while ((cl_prev = cl) != q->active[prio]);
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
532 if (cl && q->toplevel >= borrowed->level) {
533 if (cl->q->q.qlen > 1) {
554 struct cbq_class *cl = this;
564 for ( ; cl; cl = cl->share) {
565 long avgidle = cl->avgidle;
568 cl->bstats.packets++;
569 cl->bstats.bytes += len;
578 idle = now - cl->last;
580 avgidle = cl->maxidle;
582 idle -= L2T(cl, len);
585 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
586 * cl->avgidle == true_avgidle/W,
589 avgidle += idle - (avgidle>>cl->ewma_log);
595 if (avgidle < cl->minidle)
596 avgidle = cl->minidle;
598 cl->avgidle = avgidle;
606 * idle = (1 - W)*(-cl->avgidle);
608 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
620 idle += L2T(cl, len);
622 cl->undertime = now + idle;
626 cl->undertime = PSCHED_PASTPERFECT;
627 if (avgidle > cl->maxidle)
628 cl->avgidle = cl->maxidle;
630 cl->avgidle = avgidle;
632 if ((s64)(now - cl->last) > 0)
633 cl->last = now;
640 cbq_under_limit(struct cbq_class *cl)
642 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
643 struct cbq_class *this_cl = cl;
645 if (cl->tparent == NULL)
646 return cl;
648 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
649 cl->delayed = 0;
650 return cl;
664 cl = cl->borrow;
665 if (!cl) {
670 if (cl->level > q->toplevel)
672 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
674 cl->delayed = 0;
675 return cl;
682 struct cbq_class *cl_tail, *cl_prev, *cl;
687 cl = cl_prev->next_alive;
694 struct cbq_class *borrow = cl;
696 if (cl->q->q.qlen &&
697 (borrow = cbq_under_limit(cl)) == NULL)
700 if (cl->deficit <= 0) {
705 cl->deficit += cl->quantum;
709 skb = cl->q->dequeue(cl->q);
712 * It could occur even if cl->q->q.qlen != 0
713 * f.e. if cl->q == "tbf"
718 cl->deficit -= qdisc_pkt_len(skb);
719 q->tx_class = cl;
721 if (borrow != cl) {
724 cl->xstats.borrows++;
727 cl->xstats.borrows += qdisc_pkt_len(skb);
732 if (cl->deficit <= 0) {
733 q->active[prio] = cl;
734 cl = cl->next_alive;
735 cl->deficit += cl->quantum;
740 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
744 cl_prev->next_alive = cl->next_alive;
745 cl->next_alive = NULL;
748 if (cl == cl_tail) {
753 if (cl == cl_tail) {
757 if (cl->q->q.qlen)
758 cbq_activate_class(cl);
764 if (cl->q->q.qlen)
765 cbq_activate_class(cl);
767 cl = cl_prev;
771 cl_prev = cl;
772 cl = cl->next_alive;
871 struct cbq_class *cl;
873 cl = this->children;
874 if (cl) {
876 if (cl->level > level)
877 level = cl->level;
878 } while ((cl = cl->sibling) != this->children);
886 struct cbq_class *cl;
893 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
897 if (cl->priority == prio) {
898 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
901 if (cl->quantum <= 0 ||
902 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
904 cl->common.classid, cl->quantum);
905 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
911 static void cbq_sync_defmap(struct cbq_class *cl)
913 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
914 struct cbq_class *split = cl->split;
922 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
947 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
952 split = cl->split;
959 for (split = cl->tparent; split; split = split->tparent)
967 if (cl->split != split) {
968 cl->defmap = 0;
969 cbq_sync_defmap(cl);
970 cl->split = split;
971 cl->defmap = def & mask;
973 cl->defmap = (cl->defmap & ~mask) | (def & mask);
975 cbq_sync_defmap(cl);
980 struct cbq_class *cl, **clp;
987 cl = *clp;
989 if (cl == this) {
990 *clp = cl->sibling;
993 clp = &cl->sibling;
994 } while ((cl = *clp) != this->sibling);
1029 struct cbq_class *cl;
1046 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1047 qdisc_reset(cl->q);
1049 cl->next_alive = NULL;
1050 cl->undertime = PSCHED_PASTPERFECT;
1051 cl->avgidle = cl->maxidle;
1052 cl->deficit = cl->quantum;
1053 cl->cpriority = cl->priority;
1059 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1062 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1063 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1066 cl->ewma_log = lss->ewma_log;
1068 cl->avpkt = lss->avpkt;
1070 cl->minidle = -(long)lss->minidle;
1072 cl->maxidle = lss->maxidle;
1073 cl->avgidle = lss->maxidle;
1076 cl->offtime = lss->offtime;
1080 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1082 q->nclasses[cl->priority]--;
1083 q->quanta[cl->priority] -= cl->weight;
1084 cbq_normalize_quanta(q, cl->priority);
1087 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1089 q->nclasses[cl->priority]++;
1090 q->quanta[cl->priority] += cl->weight;
1091 cbq_normalize_quanta(q, cl->priority);
1094 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1096 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1099 cl->allot = wrr->allot;
1101 cl->weight = wrr->weight;
1103 cl->priority = wrr->priority - 1;
1104 cl->cpriority = cl->priority;
1105 if (cl->priority >= cl->priority2)
1106 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1109 cbq_addprio(q, cl);
1113 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1115 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1231 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1235 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1244 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1250 if (cl->borrow == NULL)
1252 if (cl->share == NULL)
1254 opt.ewma_log = cl->ewma_log;
1255 opt.level = cl->level;
1256 opt.avpkt = cl->avpkt;
1257 opt.maxidle = cl->maxidle;
1258 opt.minidle = (u32)(-cl->minidle);
1259 opt.offtime = cl->offtime;
1270 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1277 opt.allot = cl->allot;
1278 opt.priority = cl->priority + 1;
1279 opt.cpriority = cl->cpriority + 1;
1280 opt.weight = cl->weight;
1290 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1295 if (cl->split || cl->defmap) {
1296 opt.split = cl->split ? cl->split->common.classid : 0;
1297 opt.defmap = cl->defmap;
1309 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1311 if (cbq_dump_lss(skb, cl) < 0 ||
1312 cbq_dump_rate(skb, cl) < 0 ||
1313 cbq_dump_wrr(skb, cl) < 0 ||
1314 cbq_dump_fopt(skb, cl) < 0)
1349 struct cbq_class *cl = (struct cbq_class *)arg;
1352 if (cl->tparent)
1353 tcm->tcm_parent = cl->tparent->common.classid;
1356 tcm->tcm_handle = cl->common.classid;
1357 tcm->tcm_info = cl->q->handle;
1362 if (cbq_dump_attr(skb, cl) < 0)
1376 struct cbq_class *cl = (struct cbq_class *)arg;
1379 cl->xstats.avgidle = cl->avgidle;
1380 cl->xstats.undertime = 0;
1381 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1383 if (cl->undertime != PSCHED_PASTPERFECT)
1384 cl->xstats.undertime = cl->undertime - q->now;
1387 d, NULL, &cl->bstats) < 0 ||
1388 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1389 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1392 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1398 struct cbq_class *cl = (struct cbq_class *)arg;
1402 cl->common.classid, extack);
1407 *old = qdisc_replace(sch, new, &cl->q);
1413 struct cbq_class *cl = (struct cbq_class *)arg;
1415 return cl->q;
1420 struct cbq_class *cl = (struct cbq_class *)arg;
1422 cbq_deactivate_class(cl);
1432 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1436 WARN_ON(cl->filters);
1438 tcf_block_put(cl->block);
1439 qdisc_put(cl->q);
1440 qdisc_put_rtab(cl->R_tab);
1441 gen_kill_estimator(&cl->rate_est);
1442 if (cl != &q->link)
1443 kfree(cl);
1450 struct cbq_class *cl;
1462 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1463 tcf_block_put(cl->block);
1464 cl->block = NULL;
1468 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1470 cbq_destroy_class(sch, cl);
1481 struct cbq_class *cl = (struct cbq_class *)*arg;
1496 if (cl) {
1499 if (cl->tparent &&
1500 cl->tparent->common.classid != parentid) {
1504 if (!cl->tparent && parentid != TC_H_ROOT) {
1518 err = gen_replace_estimator(&cl->bstats, NULL,
1519 &cl->rate_est,
1533 if (cl->next_alive != NULL)
1534 cbq_deactivate_class(cl);
1537 qdisc_put_rtab(cl->R_tab);
1538 cl->R_tab = rtab;
1542 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1545 cbq_rmprio(q, cl);
1546 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1550 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1552 if (cl->q->q.qlen)
1553 cbq_activate_class(cl);
1609 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1610 if (cl == NULL)
1613 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1615 kfree(cl);
1620 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1626 tcf_block_put(cl->block);
1627 kfree(cl);
1632 cl->R_tab = rtab;
1634 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1636 if (!cl->q)
1637 cl->q = &noop_qdisc;
1639 qdisc_hash_add(cl->q, true);
1641 cl->common.classid = classid;
1642 cl->tparent = parent;
1643 cl->qdisc = sch;
1644 cl->allot = parent->allot;
1645 cl->quantum = cl->allot;
1646 cl->weight = cl->R_tab->rate.rate;
1649 cbq_link_class(cl);
1650 cl->borrow = cl->tparent;
1651 if (cl->tparent != &q->link)
1652 cl->share = cl->tparent;
1654 cl->minidle = -0x7FFFFFFF;
1655 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1656 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1657 if (cl->ewma_log == 0)
1658 cl->ewma_log = q->link.ewma_log;
1659 if (cl->maxidle == 0)
1660 cl->maxidle = q->link.maxidle;
1661 if (cl->avpkt == 0)
1662 cl->avpkt = q->link.avpkt;
1664 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1669 *arg = (unsigned long)cl;
1680 struct cbq_class *cl = (struct cbq_class *)arg;
1682 if (cl->filters || cl->children || cl == &q->link)
1687 qdisc_purge_queue(cl->q);
1689 if (cl->next_alive)
1690 cbq_deactivate_class(cl);
1692 if (q->tx_borrowed == cl)
1694 if (q->tx_class == cl) {
1699 if (q->rx_class == cl)
1703 cbq_unlink_class(cl);
1704 cbq_adjust_levels(cl->tparent);
1705 cl->defmap = 0;
1706 cbq_sync_defmap(cl);
1708 cbq_rmprio(q, cl);
1711 cbq_destroy_class(sch, cl);
1719 struct cbq_class *cl = (struct cbq_class *)arg;
1721 if (cl == NULL)
1722 cl = &q->link;
1724 return cl->block;
1732 struct cbq_class *cl = cbq_class_lookup(q, classid);
1734 if (cl) {
1735 if (p && p->level <= cl->level)
1737 cl->filters++;
1738 return (unsigned long)cl;
1745 struct cbq_class *cl = (struct cbq_class *)arg;
1747 cl->filters--;
1753 struct cbq_class *cl;
1760 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1765 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {