Add netif_addr_{lock,unlock}{,_bh}() helpers.
Use them to protect operations that operate on or read
the network device unicast and multicast address lists.
Also use them in cases where the code simply wants to
block calls into the driver's ->set_rx_mode() and
->set_multicast_list() methods.
Signed-off-by: David S. Miller <davem@davemloft.net>
14 files changed:
local_irq_save(flags);
netif_tx_lock(dev);
local_irq_save(flags);
netif_tx_lock(dev);
spin_lock(&priv->lock);
/*
spin_lock(&priv->lock);
/*
}
spin_unlock(&priv->lock);
}
spin_unlock(&priv->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock(dev);
local_irq_restore(flags);
netif_tx_unlock(dev);
local_irq_restore(flags);
dvb_net_feed_stop(dev);
priv->rx_mode = RX_MODE_UNI;
netif_tx_lock_bh(dev);
dvb_net_feed_stop(dev);
priv->rx_mode = RX_MODE_UNI;
netif_tx_lock_bh(dev);
if (dev->flags & IFF_PROMISC) {
dprintk("%s: promiscuous mode\n", dev->name);
if (dev->flags & IFF_PROMISC) {
dprintk("%s: promiscuous mode\n", dev->name);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
dvb_net_feed_start(dev);
}
netif_tx_unlock_bh(dev);
dvb_net_feed_start(dev);
}
}
netif_tx_lock_bh(bond_dev);
}
netif_tx_lock_bh(bond_dev);
+ netif_addr_lock(bond_dev);
/* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
/* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ netif_addr_unlock(bond_dev);
netif_tx_unlock_bh(bond_dev);
}
netif_tx_unlock_bh(bond_dev);
}
/* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
/* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
+ netif_addr_lock(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_addr_unlock(bond_dev);
netif_tx_unlock_bh(bond_dev);
}
netif_tx_unlock_bh(bond_dev);
}
/* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
/* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
+ netif_addr_lock(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_addr_unlock(bond_dev);
netif_tx_unlock_bh(bond_dev);
}
netif_tx_unlock_bh(bond_dev);
}
bond_work_cancel_all(bond);
netif_tx_lock_bh(bond_dev);
bond_work_cancel_all(bond);
netif_tx_lock_bh(bond_dev);
+ netif_addr_lock(bond_dev);
bond_mc_list_destroy(bond);
bond_mc_list_destroy(bond);
+ netif_addr_unlock(bond_dev);
netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
*/
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
*/
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
}
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
}
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
/* stop rx engine */
spin_lock_irq(&np->lock);
/* stop rx engine */
/* restart rx engine */
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
/* restart rx engine */
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
} else {
nv_copy_mac_to_hw(dev);
netif_tx_unlock_bh(dev);
} else {
nv_copy_mac_to_hw(dev);
printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
}
netif_tx_unlock_bh(dev);
}
}
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
/* with plain spinlock lockdep complains */
spin_lock_irqsave(&np->lock, flags);
/* stop engines */
/* with plain spinlock lockdep complains */
spin_lock_irqsave(&np->lock, flags);
/* stop engines */
*/
nv_stop_rxtx(dev);
spin_unlock_irqrestore(&np->lock, flags);
*/
nv_stop_rxtx(dev);
spin_unlock_irqrestore(&np->lock, flags);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
printk(KERN_INFO "%s: link down.\n", dev->name);
}
netif_tx_unlock_bh(dev);
printk(KERN_INFO "%s: link down.\n", dev->name);
}
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
/* restart engines */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
/* restart engines */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
}
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
}
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
napi_disable(&np->napi);
#endif
netif_tx_lock_bh(dev);
napi_disable(&np->napi);
#endif
netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
/* drain rx queue */
nv_drain_rxtx(dev);
spin_unlock_irq(&np->lock);
/* drain rx queue */
nv_drain_rxtx(dev);
spin_unlock_irq(&np->lock);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
netif_tx_unlock_bh(dev);
return 0;
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
netif_tx_unlock_bh(dev);
return 0;
static inline void emac_netif_stop(struct emac_instance *dev)
{
netif_tx_lock_bh(dev->ndev);
static inline void emac_netif_stop(struct emac_instance *dev)
{
netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
+ netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
dev->ndev->trans_start = jiffies; /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
netif_tx_unlock_bh(dev->ndev);
dev->ndev->trans_start = jiffies; /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
static inline void emac_netif_start(struct emac_instance *dev)
{
netif_tx_lock_bh(dev->ndev);
static inline void emac_netif_start(struct emac_instance *dev)
{
netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
dev->no_mcast = 0;
if (dev->mcast_pending && netif_running(dev->ndev))
__emac_set_multicast_list(dev);
dev->no_mcast = 0;
if (dev->mcast_pending && netif_running(dev->ndev))
__emac_set_multicast_list(dev);
+ netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
netif_wake_queue(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
netif_wake_queue(dev->ndev);
/* Serialise against efx_set_multicast_list() */
if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev);
/* Serialise against efx_set_multicast_list() */
if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev);
+ netif_addr_lock(efx->net_dev);
+ netif_addr_unlock(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
}
netif_tx_unlock_bh(efx->net_dev);
}
}
return nr_addrs;
netif_tx_lock_bh(dev);
return nr_addrs;
netif_tx_lock_bh(dev);
for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
lbs_deb_net("mcast address %s:%s skipped\n", dev->name,
for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
lbs_deb_net("mcast address %s:%s skipped\n", dev->name,
print_mac(mac, mc_list->dmi_addr));
i++;
}
print_mac(mac, mc_list->dmi_addr));
i++;
}
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
if (mc_list)
return -EOVERFLOW;
netif_tx_unlock_bh(dev);
if (mc_list)
return -EOVERFLOW;
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
+static inline void netif_addr_lock(struct net_device *dev)
+{
+ spin_lock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_lock_bh(struct net_device *dev)
+{
+ spin_lock_bh(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock(struct net_device *dev)
+{
+ spin_unlock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock_bh(struct net_device *dev)
+{
+ spin_unlock_bh(&dev->addr_list_lock);
+}
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
void dev_set_rx_mode(struct net_device *dev)
{
netif_tx_lock_bh(dev);
void dev_set_rx_mode(struct net_device *dev)
{
netif_tx_lock_bh(dev);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
ASSERT_RTNL();
netif_tx_lock_bh(dev);
ASSERT_RTNL();
netif_tx_lock_bh(dev);
err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
if (!err)
__dev_set_rx_mode(dev);
err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
if (!err)
__dev_set_rx_mode(dev);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return err;
}
netif_tx_unlock_bh(dev);
return err;
}
ASSERT_RTNL();
netif_tx_lock_bh(dev);
ASSERT_RTNL();
netif_tx_lock_bh(dev);
err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
if (!err)
__dev_set_rx_mode(dev);
err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
if (!err)
__dev_set_rx_mode(dev);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return err;
}
netif_tx_unlock_bh(dev);
return err;
}
int err = 0;
netif_tx_lock_bh(to);
int err = 0;
netif_tx_lock_bh(to);
err = __dev_addr_sync(&to->uc_list, &to->uc_count,
&from->uc_list, &from->uc_count);
if (!err)
__dev_set_rx_mode(to);
err = __dev_addr_sync(&to->uc_list, &to->uc_count,
&from->uc_list, &from->uc_count);
if (!err)
__dev_set_rx_mode(to);
netif_tx_unlock_bh(to);
return err;
}
netif_tx_unlock_bh(to);
return err;
}
void dev_unicast_unsync(struct net_device *to, struct net_device *from)
{
netif_tx_lock_bh(from);
void dev_unicast_unsync(struct net_device *to, struct net_device *from)
{
netif_tx_lock_bh(from);
__dev_addr_unsync(&to->uc_list, &to->uc_count,
&from->uc_list, &from->uc_count);
__dev_set_rx_mode(to);
__dev_addr_unsync(&to->uc_list, &to->uc_count,
&from->uc_list, &from->uc_count);
__dev_set_rx_mode(to);
+ netif_addr_unlock(from);
netif_tx_unlock_bh(from);
}
EXPORT_SYMBOL(dev_unicast_unsync);
netif_tx_unlock_bh(from);
}
EXPORT_SYMBOL(dev_unicast_unsync);
static void dev_addr_discard(struct net_device *dev)
{
netif_tx_lock_bh(dev);
static void dev_addr_discard(struct net_device *dev)
{
netif_tx_lock_bh(dev);
__dev_addr_discard(&dev->uc_list);
dev->uc_count = 0;
__dev_addr_discard(&dev->uc_list);
dev->uc_count = 0;
__dev_addr_discard(&dev->mc_list);
dev->mc_count = 0;
__dev_addr_discard(&dev->mc_list);
dev->mc_count = 0;
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
netif_tx_unlock_bh(dev);
}
int err;
netif_tx_lock_bh(dev);
int err;
netif_tx_lock_bh(dev);
err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
addr, alen, glbl);
if (!err) {
err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
addr, alen, glbl);
if (!err) {
__dev_set_rx_mode(dev);
}
__dev_set_rx_mode(dev);
}
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return err;
}
netif_tx_unlock_bh(dev);
return err;
}
int err;
netif_tx_lock_bh(dev);
int err;
netif_tx_lock_bh(dev);
err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
if (!err)
__dev_set_rx_mode(dev);
err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
if (!err)
__dev_set_rx_mode(dev);
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return err;
}
netif_tx_unlock_bh(dev);
return err;
}
int err = 0;
netif_tx_lock_bh(to);
int err = 0;
netif_tx_lock_bh(to);
err = __dev_addr_sync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
if (!err)
__dev_set_rx_mode(to);
err = __dev_addr_sync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
if (!err)
__dev_set_rx_mode(to);
netif_tx_unlock_bh(to);
return err;
netif_tx_unlock_bh(to);
return err;
void dev_mc_unsync(struct net_device *to, struct net_device *from)
{
netif_tx_lock_bh(from);
void dev_mc_unsync(struct net_device *to, struct net_device *from)
{
netif_tx_lock_bh(from);
__dev_addr_unsync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
__dev_set_rx_mode(to);
__dev_addr_unsync(&to->mc_list, &to->mc_count,
&from->mc_list, &from->mc_count);
__dev_set_rx_mode(to);
+ netif_addr_unlock(from);
netif_tx_unlock_bh(from);
}
EXPORT_SYMBOL(dev_mc_unsync);
netif_tx_unlock_bh(from);
}
EXPORT_SYMBOL(dev_mc_unsync);
return 0;
netif_tx_lock_bh(dev);
return 0;
netif_tx_lock_bh(dev);
for (m = dev->mc_list; m; m = m->next) {
int i;
for (m = dev->mc_list; m; m = m->next) {
int i;
+ netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
}
netif_tx_unlock_bh(dev);
return 0;
}
local->fif_other_bss++;
netif_tx_lock_bh(local->mdev);
local->fif_other_bss++;
netif_tx_lock_bh(local->mdev);
+ netif_addr_lock(local->mdev);
ieee80211_configure_filter(local);
ieee80211_configure_filter(local);
+ netif_addr_unlock(local->mdev);
netif_tx_unlock_bh(local->mdev);
break;
case IEEE80211_IF_TYPE_STA:
netif_tx_unlock_bh(local->mdev);
break;
case IEEE80211_IF_TYPE_STA:
local->fif_other_bss--;
netif_tx_lock_bh(local->mdev);
local->fif_other_bss--;
netif_tx_lock_bh(local->mdev);
+ netif_addr_lock(local->mdev);
ieee80211_configure_filter(local);
ieee80211_configure_filter(local);
+ netif_addr_unlock(local->mdev);
netif_tx_unlock_bh(local->mdev);
break;
case IEEE80211_IF_TYPE_MESH_POINT:
netif_tx_unlock_bh(local->mdev);
break;
case IEEE80211_IF_TYPE_MESH_POINT:
netif_tx_lock_bh(local->mdev);
netif_tx_lock_bh(local->mdev);
+ netif_addr_lock(local->mdev);
local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
local->ops->configure_filter(local_to_hw(local),
FIF_BCN_PRBRESP_PROMISC,
local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
local->ops->configure_filter(local_to_hw(local),
FIF_BCN_PRBRESP_PROMISC,
local->mdev->mc_count,
local->mdev->mc_list);
local->mdev->mc_count,
local->mdev->mc_list);
+ netif_addr_unlock(local->mdev);
netif_tx_unlock_bh(local->mdev);
rcu_read_lock();
netif_tx_unlock_bh(local->mdev);
rcu_read_lock();
local->scan_dev = dev;
netif_tx_lock_bh(local->mdev);
local->scan_dev = dev;
netif_tx_lock_bh(local->mdev);
+ netif_addr_lock(local->mdev);
local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
local->ops->configure_filter(local_to_hw(local),
FIF_BCN_PRBRESP_PROMISC,
&local->filter_flags,
local->mdev->mc_count,
local->mdev->mc_list);
local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
local->ops->configure_filter(local_to_hw(local),
FIF_BCN_PRBRESP_PROMISC,
&local->filter_flags,
local->mdev->mc_count,
local->mdev->mc_list);
+ netif_addr_unlock(local->mdev);
netif_tx_unlock_bh(local->mdev);
/* TODO: start scan as soon as all nullfunc frames are ACKed */
netif_tx_unlock_bh(local->mdev);
/* TODO: start scan as soon as all nullfunc frames are ACKed */