list += prio2band[skb->priority&TC_PRIO_MAX];
- if (list->qlen < qdisc->dev->tx_queue_len) {
- __skb_queue_tail(list, skb);
+ if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
qdisc->q.qlen++;
- qdisc->bstats.bytes += skb->len;
- qdisc->bstats.packets++;
- return 0;
+ return __qdisc_enqueue_tail(skb, qdisc, list);
}
- qdisc->qstats.drops++;
- kfree_skb(skb);
- return NET_XMIT_DROP;
+
+ return qdisc_drop(skb, qdisc);
}
static struct sk_buff *
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
- struct sk_buff *skb;
for (prio = 0; prio < 3; prio++, list++) {
- skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
if (skb) {
qdisc->q.qlen--;
return skb;
list += prio2band[skb->priority&TC_PRIO_MAX];
- __skb_queue_head(list, skb);
qdisc->q.qlen++;
- qdisc->qstats.requeues++;
- return 0;
+ return __qdisc_requeue(skb, qdisc, list);
}
static void
struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio=0; prio < 3; prio++)
- skb_queue_purge(list+prio);
+ __qdisc_reset_queue(qdisc, list + prio);
+
+ qdisc->qstats.backlog = 0;
qdisc->q.qlen = 0;
}