Optimize teql_enqueue so that it first checks limits before enqueing.
Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct net_device *dev = sch->dev;
struct teql_sched_data *q = qdisc_priv(sch);
- __skb_queue_tail(&q->q, skb);
- if (q->q.qlen <= dev->tx_queue_len) {
+ if (q->q.qlen < dev->tx_queue_len) {
+ __skb_queue_tail(&q->q, skb);
sch->bstats.bytes += skb->len;
sch->bstats.packets++;
return 0;
}
- __skb_unlink(skb, &q->q);
kfree_skb(skb);
sch->qstats.drops++;
return NET_XMIT_DROP;