2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
18 * For all the glorious comments look at include/net/red.h
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <net/pkt_sched.h>
30 #define GRED_DEF_PRIO (MAX_DPs / 2)
31 #define GRED_VQ_MASK (MAX_DPs - 1)
33 struct gred_sched_data;
36 struct gred_sched_data
38 u32 limit; /* HARD maximal queue length */
39 u32 DP; /* the drop pramaters */
40 u32 bytesin; /* bytes seen on virtualQ so far*/
41 u32 packetsin; /* packets seen on virtualQ so far*/
42 u32 backlog; /* bytes on the virtualQ */
43 u8 prio; /* the prio of this vq */
45 struct red_parms parms;
46 struct red_stats stats;
56 struct gred_sched_data *tab[MAX_DPs];
60 struct red_parms wred_set;
63 static inline int gred_wred_mode(struct gred_sched *table)
65 return test_bit(GRED_WRED_MODE, &table->flags);
68 static inline void gred_enable_wred_mode(struct gred_sched *table)
70 __set_bit(GRED_WRED_MODE, &table->flags);
73 static inline void gred_disable_wred_mode(struct gred_sched *table)
75 __clear_bit(GRED_WRED_MODE, &table->flags);
78 static inline int gred_rio_mode(struct gred_sched *table)
80 return test_bit(GRED_RIO_MODE, &table->flags);
83 static inline void gred_enable_rio_mode(struct gred_sched *table)
85 __set_bit(GRED_RIO_MODE, &table->flags);
88 static inline void gred_disable_rio_mode(struct gred_sched *table)
90 __clear_bit(GRED_RIO_MODE, &table->flags);
93 static inline int gred_wred_mode_check(struct Qdisc *sch)
95 struct gred_sched *table = qdisc_priv(sch);
98 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
99 for (i = 0; i < table->DPs; i++) {
100 struct gred_sched_data *q = table->tab[i];
106 for (n = 0; n < table->DPs; n++)
107 if (table->tab[n] && table->tab[n] != q &&
108 table->tab[n]->prio == q->prio)
115 static inline unsigned int gred_backlog(struct gred_sched *table,
116 struct gred_sched_data *q,
119 if (gred_wred_mode(table))
120 return sch->qstats.backlog;
125 static inline u16 tc_index_to_dp(struct sk_buff *skb)
127 return skb->tc_index & GRED_VQ_MASK;
130 static inline void gred_load_wred_set(struct gred_sched *table,
131 struct gred_sched_data *q)
133 q->parms.qavg = table->wred_set.qavg;
134 q->parms.qidlestart = table->wred_set.qidlestart;
137 static inline void gred_store_wred_set(struct gred_sched *table,
138 struct gred_sched_data *q)
140 table->wred_set.qavg = q->parms.qavg;
143 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
145 struct gred_sched_data *q=NULL;
146 struct gred_sched *t= qdisc_priv(sch);
147 unsigned long qavg = 0;
148 u16 dp = tc_index_to_dp(skb);
150 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
153 if ((q = t->tab[dp]) == NULL) {
154 /* Pass through packets not assigned to a DP
155 * if no default DP has been configured. This
156 * allows for DP flows to be left untouched.
158 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
159 return qdisc_enqueue_tail(skb, sch);
164 /* fix tc_index? --could be controvesial but needed for
166 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
169 /* sum up all the qaves of prios <= to ours to get the new qave */
170 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
173 for (i = 0; i < t->DPs; i++) {
174 if (t->tab[i] && t->tab[i]->prio < q->prio &&
175 !red_is_idling(&t->tab[i]->parms))
176 qavg +=t->tab[i]->parms.qavg;
182 q->bytesin += skb->len;
184 if (gred_wred_mode(t))
185 gred_load_wred_set(t, q);
187 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
189 if (red_is_idling(&q->parms))
190 red_end_of_idle_period(&q->parms);
192 if (gred_wred_mode(t))
193 gred_store_wred_set(t, q);
195 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
200 sch->qstats.overlimits++;
201 q->stats.prob_drop++;
202 goto congestion_drop;
205 sch->qstats.overlimits++;
206 q->stats.forced_drop++;
207 goto congestion_drop;
210 if (q->backlog + skb->len <= q->limit) {
211 q->backlog += skb->len;
212 return qdisc_enqueue_tail(skb, sch);
217 return qdisc_drop(skb, sch);
220 qdisc_drop(skb, sch);
224 static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
226 struct gred_sched *t = qdisc_priv(sch);
227 struct gred_sched_data *q;
228 u16 dp = tc_index_to_dp(skb);
230 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
232 printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
233 "for requeue, screwing up backlog.\n",
234 tc_index_to_dp(skb));
236 if (red_is_idling(&q->parms))
237 red_end_of_idle_period(&q->parms);
238 q->backlog += skb->len;
241 return qdisc_requeue(skb, sch);
244 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
247 struct gred_sched *t = qdisc_priv(sch);
249 skb = qdisc_dequeue_head(sch);
252 struct gred_sched_data *q;
253 u16 dp = tc_index_to_dp(skb);
255 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
257 printk(KERN_WARNING "GRED: Unable to relocate "
258 "VQ 0x%x after dequeue, screwing up "
259 "backlog.\n", tc_index_to_dp(skb));
261 q->backlog -= skb->len;
263 if (!q->backlog && !gred_wred_mode(t))
264 red_start_of_idle_period(&q->parms);
270 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
271 red_start_of_idle_period(&t->wred_set);
276 static unsigned int gred_drop(struct Qdisc* sch)
279 struct gred_sched *t = qdisc_priv(sch);
281 skb = qdisc_dequeue_tail(sch);
283 unsigned int len = skb->len;
284 struct gred_sched_data *q;
285 u16 dp = tc_index_to_dp(skb);
287 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
289 printk(KERN_WARNING "GRED: Unable to relocate "
290 "VQ 0x%x while dropping, screwing up "
291 "backlog.\n", tc_index_to_dp(skb));
296 if (!q->backlog && !gred_wred_mode(t))
297 red_start_of_idle_period(&q->parms);
300 qdisc_drop(skb, sch);
304 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
305 red_start_of_idle_period(&t->wred_set);
311 static void gred_reset(struct Qdisc* sch)
314 struct gred_sched *t = qdisc_priv(sch);
316 qdisc_reset_queue(sch);
318 for (i = 0; i < t->DPs; i++) {
319 struct gred_sched_data *q = t->tab[i];
324 red_restart(&q->parms);
329 static inline void gred_destroy_vq(struct gred_sched_data *q)
334 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
336 struct gred_sched *table = qdisc_priv(sch);
337 struct tc_gred_sopt *sopt;
340 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
343 sopt = RTA_DATA(dps);
345 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
349 table->DPs = sopt->DPs;
350 table->def = sopt->def_DP;
353 * Every entry point to GRED is synchronized with the above code
354 * and the DP is checked against DPs, i.e. shadowed VQs can no
355 * longer be found so we can unlock right here.
357 sch_tree_unlock(sch);
360 gred_enable_rio_mode(table);
361 gred_disable_wred_mode(table);
362 if (gred_wred_mode_check(sch))
363 gred_enable_wred_mode(table);
365 gred_disable_rio_mode(table);
366 gred_disable_wred_mode(table);
369 for (i = table->DPs; i < MAX_DPs; i++) {
371 printk(KERN_WARNING "GRED: Warning: Destroying "
372 "shadowed VQ 0x%x\n", i);
373 gred_destroy_vq(table->tab[i]);
374 table->tab[i] = NULL;
381 static inline int gred_change_vq(struct Qdisc *sch, int dp,
382 struct tc_gred_qopt *ctl, int prio, u8 *stab)
384 struct gred_sched *table = qdisc_priv(sch);
385 struct gred_sched_data *q;
387 if (table->tab[dp] == NULL) {
388 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
389 if (table->tab[dp] == NULL)
391 memset(table->tab[dp], 0, sizeof(*q));
397 q->limit = ctl->limit;
400 red_end_of_idle_period(&q->parms);
402 red_set_parms(&q->parms,
403 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
404 ctl->Scell_log, stab);
409 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
411 struct gred_sched *table = qdisc_priv(sch);
412 struct tc_gred_qopt *ctl;
413 struct rtattr *tb[TCA_GRED_MAX];
414 int err = -EINVAL, prio = GRED_DEF_PRIO;
417 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
420 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
421 return gred_change_table_def(sch, opt);
423 if (tb[TCA_GRED_PARMS-1] == NULL ||
424 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
425 tb[TCA_GRED_STAB-1] == NULL ||
426 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
429 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
430 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
432 if (ctl->DP >= table->DPs)
435 if (gred_rio_mode(table)) {
436 if (ctl->prio == 0) {
437 int def_prio = GRED_DEF_PRIO;
439 if (table->tab[table->def])
440 def_prio = table->tab[table->def]->prio;
442 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
443 "setting default to %d\n", ctl->DP, def_prio);
452 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
456 if (gred_rio_mode(table)) {
457 gred_disable_wred_mode(table);
458 if (gred_wred_mode_check(sch))
459 gred_enable_wred_mode(table);
465 sch_tree_unlock(sch);
470 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
472 struct rtattr *tb[TCA_GRED_MAX];
474 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
477 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
480 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
483 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
485 struct gred_sched *table = qdisc_priv(sch);
486 struct rtattr *parms, *opts = NULL;
488 struct tc_gred_sopt sopt = {
490 .def_DP = table->def,
491 .grio = gred_rio_mode(table),
494 opts = RTA_NEST(skb, TCA_OPTIONS);
495 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
496 parms = RTA_NEST(skb, TCA_GRED_PARMS);
498 for (i = 0; i < MAX_DPs; i++) {
499 struct gred_sched_data *q = table->tab[i];
500 struct tc_gred_qopt opt;
502 memset(&opt, 0, sizeof(opt));
505 /* hack -- fix at some point with proper message
506 This is how we indicate to tc that there is no VQ
509 opt.DP = MAX_DPs + i;
513 opt.limit = q->limit;
515 opt.backlog = q->backlog;
517 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
518 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
519 opt.Wlog = q->parms.Wlog;
520 opt.Plog = q->parms.Plog;
521 opt.Scell_log = q->parms.Scell_log;
522 opt.other = q->stats.other;
523 opt.early = q->stats.prob_drop;
524 opt.forced = q->stats.forced_drop;
525 opt.pdrop = q->stats.pdrop;
526 opt.packets = q->packetsin;
527 opt.bytesin = q->bytesin;
529 if (gred_wred_mode(table)) {
530 q->parms.qidlestart =
531 table->tab[table->def]->parms.qidlestart;
532 q->parms.qavg = table->tab[table->def]->parms.qavg;
535 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
538 RTA_APPEND(skb, sizeof(opt), &opt);
541 RTA_NEST_END(skb, parms);
543 return RTA_NEST_END(skb, opts);
546 return RTA_NEST_CANCEL(skb, opts);
549 static void gred_destroy(struct Qdisc *sch)
551 struct gred_sched *table = qdisc_priv(sch);
554 for (i = 0; i < table->DPs; i++) {
556 gred_destroy_vq(table->tab[i]);
560 static struct Qdisc_ops gred_qdisc_ops = {
562 .priv_size = sizeof(struct gred_sched),
563 .enqueue = gred_enqueue,
564 .dequeue = gred_dequeue,
565 .requeue = gred_requeue,
569 .destroy = gred_destroy,
570 .change = gred_change,
572 .owner = THIS_MODULE,
575 static int __init gred_module_init(void)
577 return register_qdisc(&gred_qdisc_ops);
580 static void __exit gred_module_exit(void)
582 unregister_qdisc(&gred_qdisc_ops);
585 module_init(gred_module_init)
586 module_exit(gred_module_exit)
588 MODULE_LICENSE("GPL");