]> err.no Git - linux-2.6/blobdiff - net/sched/sch_htb.c
[PATCH] make 1-bit bitfields unsigned
[linux-2.6] / net / sched / sch_htb.c
index 6c6cac65255f703abf6b15d439dd10a3b7d83eb0..215e68c2b615f1f55edb544a1dbb6a47d99928a6 100644 (file)
@@ -104,7 +104,7 @@ struct htb_class {
        /* topology */
        int level;              /* our level (see above) */
        struct htb_class *parent;       /* parent class */
-       struct list_head hlist; /* classid hash list item */
+       struct hlist_node hlist;        /* classid hash list item */
        struct list_head sibling;       /* sibling list item */
        struct list_head children;      /* children list */
 
@@ -163,8 +163,8 @@ static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
 
 struct htb_sched {
        struct list_head root;  /* root classes list */
-       struct list_head hash[HTB_HSIZE];       /* hashed by classid */
-       struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
+       struct hlist_head hash[HTB_HSIZE];      /* hashed by classid */
+       struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 
        /* self list - roots of self generating tree */
        struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
@@ -220,12 +220,13 @@ static inline int htb_hash(u32 h)
 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 {
        struct htb_sched *q = qdisc_priv(sch);
-       struct list_head *p;
+       struct hlist_node *p;
+       struct htb_class *cl;
+
        if (TC_H_MAJ(handle) != sch->handle)
                return NULL;
 
-       list_for_each(p, q->hash + htb_hash(handle)) {
-               struct htb_class *cl = list_entry(p, struct htb_class, hlist);
+       hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
                if (cl->classid == handle)
                        return cl;
        }
@@ -365,7 +366,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
  * When we are past last key we return NULL.
  * Average complexity is 2 steps per call.
  */
-static void htb_next_rb_node(struct rb_node **n)
+static inline void htb_next_rb_node(struct rb_node **n)
 {
        *n = rb_next(*n);
 }
@@ -387,6 +388,18 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
        }
 }
 
+/* If this triggers, it is a bug in this code, but it need not be fatal */
+static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
+{
+       if (RB_EMPTY_NODE(rb)) {
+               WARN_ON(1);
+       } else {
+               rb_erase(rb, root);
+               RB_CLEAR_NODE(rb);
+       }
+}
+
+
 /**
  * htb_remove_class_from_row - removes class from its row
  *
@@ -400,10 +413,12 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
 
        while (mask) {
                int prio = ffz(~mask);
+
                mask &= ~(1 << prio);
                if (q->ptr[cl->level][prio] == cl->node + prio)
                        htb_next_rb_node(q->ptr[cl->level] + prio);
-               rb_erase(cl->node + prio, q->row[cl->level] + prio);
+
+               htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
                if (!q->row[cl->level][prio].rb_node)
                        m |= 1 << prio;
        }
@@ -471,7 +486,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
                                p->un.inner.ptr[prio] = NULL;
                        }
 
-                       rb_erase(cl->node + prio, p->un.inner.feed + prio);
+                       htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
 
                        if (!p->un.inner.feed[prio].rb_node)
                                mask |= 1 << prio;
@@ -675,7 +690,9 @@ static void htb_rate_timer(unsigned long arg)
 {
        struct Qdisc *sch = (struct Qdisc *)arg;
        struct htb_sched *q = qdisc_priv(sch);
-       struct list_head *p;
+       struct hlist_node *p;
+       struct htb_class *cl;
+
 
        /* lock queue so that we can muck with it */
        spin_lock_bh(&sch->dev->queue_lock);
@@ -686,9 +703,8 @@ static void htb_rate_timer(unsigned long arg)
        /* scan and recompute one bucket at time */
        if (++q->recmp_bucket >= HTB_HSIZE)
                q->recmp_bucket = 0;
-       list_for_each(p, q->hash + q->recmp_bucket) {
-               struct htb_class *cl = list_entry(p, struct htb_class, hlist);
 
+       hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
                RT_GEN(cl->sum_bytes, cl->rate_bytes);
                RT_GEN(cl->sum_packets, cl->rate_packets);
        }
@@ -737,7 +753,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
                htb_change_class_mode(q, cl, &diff);
                if (old_mode != cl->cmode) {
                        if (old_mode != HTB_CAN_SEND)
-                               rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+                               htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
                        if (cl->cmode != HTB_CAN_SEND)
                                htb_add_to_wait_tree(q, cl, diff);
                }
@@ -770,17 +786,16 @@ static long htb_do_events(struct htb_sched *q, int level)
        for (i = 0; i < 500; i++) {
                struct htb_class *cl;
                long diff;
-               struct rb_node *p = q->wait_pq[level].rb_node;
+               struct rb_node *p = rb_first(&q->wait_pq[level]);
+
                if (!p)
                        return 0;
-               while (p->rb_left)
-                       p = p->rb_left;
 
                cl = rb_entry(p, struct htb_class, pq_node);
                if (time_after(cl->pq_key, q->jiffies)) {
                        return cl->pq_key - q->jiffies;
                }
-               rb_erase(p, q->wait_pq + level);
+               htb_safe_rb_erase(p, q->wait_pq + level);
                diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
                htb_change_class_mode(q, cl, &diff);
                if (cl->cmode != HTB_CAN_SEND)
@@ -1041,10 +1056,10 @@ static void htb_reset(struct Qdisc *sch)
        int i;
 
        for (i = 0; i < HTB_HSIZE; i++) {
-               struct list_head *p;
-               list_for_each(p, q->hash + i) {
-                       struct htb_class *cl =
-                           list_entry(p, struct htb_class, hlist);
+               struct hlist_node *p;
+               struct htb_class *cl;
+
+               hlist_for_each_entry(cl, p, q->hash + i, hlist) {
                        if (cl->level)
                                memset(&cl->un.inner, 0, sizeof(cl->un.inner));
                        else {
@@ -1091,7 +1106,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
 
        INIT_LIST_HEAD(&q->root);
        for (i = 0; i < HTB_HSIZE; i++)
-               INIT_LIST_HEAD(q->hash + i);
+               INIT_HLIST_HEAD(q->hash + i);
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
                INIT_LIST_HEAD(q->drops + i);
 
@@ -1208,17 +1223,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
        struct htb_class *cl = (struct htb_class *)arg;
 
        if (cl && !cl->level) {
-               if (new == NULL && (new = qdisc_create_dflt(sch->dev,
-                                                           &pfifo_qdisc_ops))
+               if (new == NULL &&
+                   (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+                                            cl->classid))
                    == NULL)
                        return -ENOBUFS;
                sch_tree_lock(sch);
                if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
-                       if (cl->prio_activity)
-                               htb_deactivate(qdisc_priv(sch), cl);
-
-                       /* TODO: is it correct ? Why CBQ doesn't do it ? */
-                       sch->q.qlen -= (*old)->q.qlen;
+                       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
                        qdisc_reset(*old);
                }
                sch_tree_unlock(sch);
@@ -1233,6 +1245,14 @@ static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
        return (cl && !cl->level) ? cl->un.leaf.q : NULL;
 }
 
+static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+{
+       struct htb_class *cl = (struct htb_class *)arg;
+
+       if (cl->un.leaf.q->q.qlen == 0)
+               htb_deactivate(qdisc_priv(sch), cl);
+}
+
 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
 {
        struct htb_class *cl = htb_find(classid, sch);
@@ -1254,9 +1274,9 @@ static void htb_destroy_filters(struct tcf_proto **fl)
 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
 {
        struct htb_sched *q = qdisc_priv(sch);
+
        if (!cl->level) {
                BUG_TRAP(cl->un.leaf.q);
-               sch->q.qlen -= cl->un.leaf.q->q.qlen;
                qdisc_destroy(cl->un.leaf.q);
        }
        qdisc_put_rtab(cl->rate);
@@ -1269,14 +1289,14 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
                                                  struct htb_class, sibling));
 
        /* note: this delete may happen twice (see htb_delete) */
-       list_del(&cl->hlist);
+       hlist_del_init(&cl->hlist);
        list_del(&cl->sibling);
 
        if (cl->prio_activity)
                htb_deactivate(q, cl);
 
        if (cl->cmode != HTB_CAN_SEND)
-               rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+               htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 
        kfree(cl);
 }
@@ -1307,6 +1327,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)arg;
+       unsigned int qlen;
 
        // TODO: why don't allow to delete subtree ? references ? does
        // tc subsys quarantee us that in htb_destroy it holds no class
@@ -1317,7 +1338,14 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
        sch_tree_lock(sch);
 
        /* delete from hash and active; remainder in destroy_class */
-       list_del_init(&cl->hlist);
+       hlist_del_init(&cl->hlist);
+
+       if (!cl->level) {
+               qlen = cl->un.leaf.q->q.qlen;
+               qdisc_reset(cl->un.leaf.q);
+               qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+       }
+
        if (cl->prio_activity)
                htb_deactivate(q, cl);
 
@@ -1365,6 +1393,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
        if (!cl) {              /* new class */
                struct Qdisc *new_q;
+               int prio;
+
                /* check for valid classid */
                if (!classid || TC_H_MAJ(classid ^ sch->handle)
                    || htb_find(classid, sch))
@@ -1381,25 +1411,32 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
                cl->refcnt = 1;
                INIT_LIST_HEAD(&cl->sibling);
-               INIT_LIST_HEAD(&cl->hlist);
+               INIT_HLIST_NODE(&cl->hlist);
                INIT_LIST_HEAD(&cl->children);
                INIT_LIST_HEAD(&cl->un.leaf.drop_list);
+               RB_CLEAR_NODE(&cl->pq_node);
+
+               for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
+                       RB_CLEAR_NODE(&cl->node[prio]);
 
                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
                   so that can't be used inside of sch_tree_lock
                   -- thanks to Karlis Peisenieks */
-               new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+               new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
+                       unsigned int qlen = parent->un.leaf.q->q.qlen;
+
                        /* turn parent into inner node */
-                       sch->q.qlen -= parent->un.leaf.q->q.qlen;
+                       qdisc_reset(parent->un.leaf.q);
+                       qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
                        qdisc_destroy(parent->un.leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
 
                        /* remove from evt list because of level change */
                        if (parent->cmode != HTB_CAN_SEND) {
-                               rb_erase(&parent->pq_node, q->wait_pq);
+                               htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
                                parent->cmode = HTB_CAN_SEND;
                        }
                        parent->level = (parent->parent ? parent->parent->level
@@ -1420,7 +1457,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                cl->cmode = HTB_CAN_SEND;
 
                /* attach to the hash list and parent's family */
-               list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
+               hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
                list_add_tail(&cl->sibling,
                              parent ? &parent->children : &q->root);
        } else
@@ -1520,10 +1557,10 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                return;
 
        for (i = 0; i < HTB_HSIZE; i++) {
-               struct list_head *p;
-               list_for_each(p, q->hash + i) {
-                       struct htb_class *cl =
-                           list_entry(p, struct htb_class, hlist);
+               struct hlist_node *p;
+               struct htb_class *cl;
+
+               hlist_for_each_entry(cl, p, q->hash + i, hlist) {
                        if (arg->count < arg->skip) {
                                arg->count++;
                                continue;
@@ -1540,6 +1577,7 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 static struct Qdisc_class_ops htb_class_ops = {
        .graft          =       htb_graft,
        .leaf           =       htb_leaf,
+       .qlen_notify    =       htb_qlen_notify,
        .get            =       htb_get,
        .put            =       htb_put,
        .change         =       htb_change_class,