struct tcf_proto_ops *ops;
};
+static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
+{
+ return qdisc->dev_queue->qdisc;
+}
+
+static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
+{
+ struct Qdisc *root = qdisc_root(qdisc);
+
+ return &root->dev_queue->lock;
+}
+
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
if (tca[TCA_RATE]) {
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
- &sch->dev_queue->lock,
+ qdisc_root_lock(sch),
tca[TCA_RATE]);
if (err) {
/*
}
if (tca[TCA_RATE])
gen_replace_estimator(&sch->bstats, &sch->rate_est,
- &sch->dev_queue->lock, tca[TCA_RATE]);
+ qdisc_root_lock(sch), tca[TCA_RATE]);
return 0;
}
q->qstats.qlen = q->q.qlen;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
+ TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
+ TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
+ spinlock_t *root_lock = qdisc_root_lock(sch);
struct cbq_sched_data *q = qdisc_priv(sch);
- spin_lock_bh(&sch->dev_queue->lock);
+ spin_lock_bh(root_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
#endif
cbq_destroy_class(sch, cl);
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock,
+ qdisc_root_lock(sch),
tca[TCA_RATE]);
return 0;
}
if (tca[TCA_RATE])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock, tca[TCA_RATE]);
+ qdisc_root_lock(sch), tca[TCA_RATE]);
*arg = (unsigned long)cl;
return 0;
{
int ret = NETDEV_TX_BUSY;
struct net_device *dev;
+ spinlock_t *root_lock;
struct sk_buff *skb;
/* Dequeue packet */
if (unlikely((skb = dequeue_skb(q)) == NULL))
return 0;
- /* And release queue */
- spin_unlock(&txq->lock);
+ root_lock = qdisc_root_lock(q);
+
+ /* And release qdisc */
+ spin_unlock(root_lock);
dev = txq->dev;
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
- spin_lock(&txq->lock);
+ spin_lock(root_lock);
switch (ret) {
case NETDEV_TX_OK:
.owner = THIS_MODULE,
};
+static struct netdev_queue noop_netdev_queue = {
+ .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
+ .qdisc = &noop_qdisc,
+};
+
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
+ .dev_queue = &noop_netdev_queue,
};
EXPORT_SYMBOL(noop_qdisc);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
+ spinlock_t *root_lock;
struct Qdisc *q;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc;
+ root_lock = qdisc_root_lock(q);
if (lock)
- spin_lock_bh(&dev_queue->lock);
+ spin_lock_bh(root_lock);
val = test_bit(__QDISC_STATE_RUNNING, &q->state);
if (lock)
- spin_unlock_bh(&dev_queue->lock);
+ spin_unlock_bh(root_lock);
if (val)
return true;
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock,
+ qdisc_root_lock(sch),
tca[TCA_RATE]);
return 0;
}
if (tca[TCA_RATE])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock, tca[TCA_RATE]);
+ qdisc_root_lock(sch), tca[TCA_RATE]);
*arg = (unsigned long)cl;
return 0;
}
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
+ spinlock_t *root_lock = qdisc_root_lock(sch);
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *nest;
struct tc_htb_glob gopt;
- spin_lock_bh(&sch->dev_queue->lock);
+ spin_lock_bh(root_lock);
gopt.direct_pkts = q->direct_pkts;
gopt.version = HTB_VER;
NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
nla_nest_end(skb, nest);
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
return skb->len;
nla_put_failure:
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
nla_nest_cancel(skb, nest);
return -1;
}
struct sk_buff *skb, struct tcmsg *tcm)
{
struct htb_class *cl = (struct htb_class *)arg;
+ spinlock_t *root_lock = qdisc_root_lock(sch);
struct nlattr *nest;
struct tc_htb_opt opt;
- spin_lock_bh(&sch->dev_queue->lock);
+ spin_lock_bh(root_lock);
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
if (!cl->level && cl->un.leaf.q)
NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
nla_nest_end(skb, nest);
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
return skb->len;
nla_put_failure:
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
nla_nest_cancel(skb, nest);
return -1;
}
goto failure;
gen_new_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock,
+ qdisc_root_lock(sch),
tca[TCA_RATE] ? : &est.nla);
cl->refcnt = 1;
cl->children = 0;
} else {
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev_queue->lock,
+ qdisc_root_lock(sch),
tca[TCA_RATE]);
sch_tree_lock(sch);
}
* skb will be queued.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- struct Qdisc *rootq = sch->dev_queue->qdisc;
+ struct Qdisc *rootq = qdisc_root(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
struct netem_sched_data *q = qdisc_priv(sch);
unsigned long n = nla_len(attr)/sizeof(__s16);
const __s16 *data = nla_data(attr);
+ spinlock_t *root_lock;
struct disttable *d;
int i;
for (i = 0; i < n; i++)
d->table[i] = data[i];
- spin_lock_bh(&sch->dev_queue->lock);
+ root_lock = qdisc_root_lock(sch);
+
+ spin_lock_bh(root_lock);
d = xchg(&q->delay_dist, d);
- spin_unlock_bh(&sch->dev_queue->lock);
+ spin_unlock_bh(root_lock);
kfree(d);
return 0;