__QDISC_STATE_SCHED,
};
+struct qdisc_size_table {
+ struct list_head list;
+ struct tc_sizespec szopts;
+ int refcnt;
+ u16 data[];
+};
+
struct Qdisc
{
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
#define TCQ_F_INGRESS 4
int padded;
struct Qdisc_ops *ops;
+ struct qdisc_size_table *stab;
u32 handle;
u32 parent;
atomic_t refcnt;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);
+ void *u32_node;
+
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented.
*/
struct tcf_proto_ops *ops;
};
+struct qdisc_skb_cb {
+ unsigned int pkt_len;
+ char data[];
+};
+
+static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+{
+ return (struct qdisc_skb_cb *)skb->cb;
+}
+
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
return &qdisc->q.lock;
return qdisc->dev_queue->qdisc;
}
+/* The qdisc root lock is a mechanism by which to top level
+ * of a qdisc tree can be locked from any qdisc node in the
+ * forest. This allows changing the configuration of some
+ * aspect of the qdisc tree while blocking out asynchronous
+ * qdisc access in the packet processing paths.
+ *
+ * It is only legal to do this when the root will not change
+ * on us. Otherwise we'll potentially lock the wrong qdisc
+ * root. This is enforced by holding the RTNL semaphore, which
+ * all users of this lock accessor must do.
+ */
static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
+ ASSERT_RTNL();
return qdisc_lock(root);
}
extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
struct netdev_queue *dev_queue,
struct Qdisc_ops *ops, u32 parentid);
+extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ struct qdisc_size_table *stab);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
return true;
}
+static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+{
+ return qdisc_skb_cb(skb)->pkt_len;
+}
+
+/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
+enum net_xmit_qdisc_t {
+ __NET_XMIT_STOLEN = 0x00010000,
+ __NET_XMIT_BYPASS = 0x00020000,
+};
+
+#ifdef CONFIG_NET_CLS_ACT
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+#ifdef CONFIG_NET_SCHED
+ if (sch->stab)
+ qdisc_calculate_pkt_len(skb, sch->stab);
+#endif
+ return sch->enqueue(skb, sch);
+}
+
+static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)
{
__skb_queue_tail(list, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS;
struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= skb->len;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
return skb;
}
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= skb->len;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
return skb;
}
struct sk_buff_head *list)
{
__skb_queue_head(list, skb);
- sch->qstats.backlog += skb->len;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
return NET_XMIT_SUCCESS;
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- skb_queue_purge(list);
+ __skb_queue_purge(list);
}
static inline void qdisc_reset_queue(struct Qdisc *sch)
struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
if (likely(skb != NULL)) {
- unsigned int len = skb->len;
+ unsigned int len = qdisc_pkt_len(skb);
kfree_skb(skb);
return len;
}