]> err.no Git - linux-2.6/commitdiff
net_sched: Add qdisc __NET_XMIT_BYPASS flag
authorJarek Poplawski <jarkao2@gmail.com>
Tue, 5 Aug 2008 05:39:11 +0000 (22:39 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 5 Aug 2008 05:39:11 +0000 (22:39 -0700)
Patrick McHardy <kaber@trash.net> noticed that it would be nice to
handle NET_XMIT_BYPASS by NET_XMIT_SUCCESS with an internal qdisc flag
__NET_XMIT_BYPASS and to remove the mapping from dev_queue_xmit().

David Miller <davem@davemloft.net> spotted a serious bug in the first
version of this patch.

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sch_generic.h
net/core/dev.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_dsmark.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_sfq.c

index f15b045a85e95d300514757a18fa2fbb67c80df9..a7abfda3e447896a0be5b06ed1809aa6e0b185fb 100644 (file)
@@ -343,14 +343,14 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
        return qdisc_skb_cb(skb)->pkt_len;
 }
 
-#ifdef CONFIG_NET_CLS_ACT
-/* additional qdisc xmit flags */
+/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
 enum net_xmit_qdisc_t {
        __NET_XMIT_STOLEN = 0x00010000,
+       __NET_XMIT_BYPASS = 0x00020000,
 };
 
+#ifdef CONFIG_NET_CLS_ACT
 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
-
 #else
 #define net_xmit_drop_count(e) (1)
 #endif
index fc6c9881eca8faa8ca5283cb3efb5df16991796b..01993ad74e760235267e0ade106bdcac16ae6890 100644 (file)
@@ -1805,7 +1805,6 @@ gso:
 
                spin_unlock(root_lock);
 
-               rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
                goto out;
        }
 
index 27dd773481bc10a641149b5184072aca3670c28d..43d37256c15e3864655364ee96822a834266b89e 100644 (file)
@@ -457,7 +457,7 @@ drop: __maybe_unused
                return 0;
        }
        tasklet_schedule(&p->task);
-       return NET_XMIT_BYPASS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 /*
index 765ae5659000affa3e4a204cfc7dbd27c4bedd93..4e261ce62f48071dc3a0569cbfd0d5915a9577dc 100644 (file)
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
            (cl = cbq_class_lookup(q, prio)) != NULL)
                return cl;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        for (;;) {
                int result = 0;
                defmap = head->defaults;
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        q->rx_class = cl;
 #endif
        if (cl == NULL) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
index 7170275d9f9939927bfe398cd8934d98ce8ba94a..edd1298f85f673a975a87787f6d75bb1c5e6939f 100644 (file)
@@ -268,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 drop:
        kfree_skb(skb);
        sch->qstats.drops++;
-       return NET_XMIT_BYPASS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
index 5cf9ae716118fc30666e2846f22b837392990d48..c2b8d9cce3d27175376f425cea18c9d86b971623 100644 (file)
@@ -1159,7 +1159,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                if (cl->level == 0)
                        return cl;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        tcf = q->root.filter_list;
        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        cl = hfsc_classify(skb, sch, &err);
        if (cl == NULL) {
-               if (err == NET_XMIT_BYPASS)
+               if (err & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return err;
index 538d79b489ae0ec680ce3d509a94f70741b365f3..be35422711a35aef3c3b6dfcc007557d89c168e6 100644 (file)
@@ -214,7 +214,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
        if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
                return cl;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        tcf = q->filter_list;
        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -567,7 +567,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                }
 #ifdef CONFIG_NET_CLS_ACT
        } else if (!cl) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
@@ -612,7 +612,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
                }
 #ifdef CONFIG_NET_CLS_ACT
        } else if (!cl) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
index 6cd6f2bc749e7e29a87905fc43018484f72a4ede..fb0294d0b55e6e71ed87c9b7e1206e755ad5561b 100644 (file)
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (count == 0) {
                sch->qstats.drops++;
                kfree_skb(skb);
-               return NET_XMIT_BYPASS;
+               return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        }
 
        skb_orphan(skb);
index adb1a52b77d3872ffa9dd77c31ef34fef231b1fc..eac197610edf28b6b7bb019769c260d9f723fbe9 100644 (file)
@@ -38,7 +38,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
        struct tcf_result res;
        int err;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        if (TC_H_MAJ(skb->priority) != sch->handle) {
                err = tc_classify(skb, q->filter_list, &res);
 #ifdef CONFIG_NET_CLS_ACT
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 #ifdef CONFIG_NET_CLS_ACT
        if (qdisc == NULL) {
 
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
@@ -103,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
        qdisc = prio_classify(skb, sch, &ret);
 #ifdef CONFIG_NET_CLS_ACT
        if (qdisc == NULL) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
index 3a456e1b829ab3298bd8b51a9ef5ee82f473d35d..6e041d10dbdb944c1f79d2fd24194e35d66f5ba0 100644 (file)
@@ -171,7 +171,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
        if (!q->filter_list)
                return sfq_hash(q, skb) + 1;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        result = tc_classify(skb, q->filter_list, &res);
        if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        hash = sfq_classify(skb, sch, &ret);
        if (hash == 0) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
 
        hash = sfq_classify(skb, sch, &ret);
        if (hash == 0) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;