X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fforcedeth.c;h=62b38a4494b8c3660fecd27b34d216fbf946c571;hb=58dfd9c16e88b9d790e7df8f2c5b03b7887db54c;hp=4ab39c554d0d995c684434a6b78758b498a30091;hpb=c99ce7ee75db7836e2faba932affd2aadd1e942f;p=linux-2.6 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 4ab39c554d..62b38a4494 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -669,9 +669,9 @@ static const struct register_test nv_registers_test[] = { * critical parts: * - rx is (pseudo-) lockless: it relies on the single-threading provided * by the arch code for interrupts. - * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission + * - tx setup is lockless: it relies on netif_tx_lock. Actual submission * needs dev->priv->lock :-( - * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. + * - set_multicast_list: preparation lockless, relies on netif_tx_lock. */ /* in dev: base, irq */ @@ -1405,7 +1405,7 @@ static void drain_ring(struct net_device *dev) /* * nv_start_xmit: dev->hard_start_xmit function - * Called with dev->xmit_lock held. + * Called with netif_tx_lock held. */ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -1599,7 +1599,7 @@ static void nv_tx_done(struct net_device *dev) /* * nv_tx_timeout: dev->tx_timeout function - * Called with dev->xmit_lock held. + * Called with netif_tx_lock held. */ static void nv_tx_timeout(struct net_device *dev) { @@ -1930,7 +1930,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) * Changing the MTU is a rare event, it shouldn't matter. */ nv_disable_irq(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); @@ -1958,7 +1958,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) nv_start_rx(dev); nv_start_tx(dev); spin_unlock(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); nv_enable_irq(dev); } return 0; @@ -1993,7 +1993,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); if (netif_running(dev)) { - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); /* stop rx engine */ @@ -2005,7 +2005,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) /* restart rx engine */ nv_start_rx(dev); spin_unlock_irq(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } else { nv_copy_mac_to_hw(dev); } @@ -2014,7 +2014,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) /* * nv_set_multicast: dev->set_multicast function - * Called with dev->xmit_lock held. + * Called with netif_tx_lock held. */ static void nv_set_multicast(struct net_device *dev) { @@ -2991,13 +2991,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } if (ecmd->autoneg == AUTONEG_ENABLE) { @@ -3131,13 +3131,13 @@ static int nv_nway_reset(struct net_device *dev) netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); printk(KERN_INFO "%s: link down.\n", dev->name); } @@ -3244,7 +3244,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri if (netif_running(dev)) { nv_disable_irq(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); @@ -3303,7 +3303,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri nv_start_rx(dev); nv_start_tx(dev); spin_unlock(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); nv_enable_irq(dev); } return 0; @@ -3339,13 +3339,13 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); @@ -3729,7 +3729,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 if (test->flags & ETH_TEST_FL_OFFLINE) { if (netif_running(dev)) { netif_stop_queue(dev); - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); nv_disable_hw_interrupts(dev, np->irqmask); if (!(np->msi_flags & NV_MSI_X_ENABLED)) { @@ -3745,7 +3745,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 nv_drain_rx(dev); nv_drain_tx(dev); spin_unlock_irq(&np->lock); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } if (!nv_register_test(dev)) {