]> err.no Git - linux-2.6/blobdiff - net/core/netpoll.c
pm: drop unnecessary includes from pm.h
[linux-2.6] / net / core / netpoll.c
index c635de52526c9ddcd155cbf12d87b315a116c80f..c12720895ecf90c049723f3cf9d854e78586ebeb 100644 (file)
@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work)
 
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
+               struct netdev_queue *txq;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
                        __kfree_skb(skb);
                        continue;
                }
 
+               txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+
                local_irq_save(flags);
-               netif_tx_lock(dev);
-               if ((netif_queue_stopped(dev) ||
-                    netif_subqueue_stopped(dev, skb)) ||
-                    dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_tx_queue_stopped(txq) ||
+                   dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
-                       netif_tx_unlock(dev);
+                       __netif_tx_unlock(txq);
                        local_irq_restore(flags);
 
                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
                        return;
                }
-               netif_tx_unlock(dev);
+               __netif_tx_unlock(txq);
                local_irq_restore(flags);
        }
 }
@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
        /* don't get messages out of order, and no recursion */
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
+               struct netdev_queue *txq;
                unsigned long flags;
 
+               txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+
                local_irq_save(flags);
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
-                       if (netif_tx_trylock(dev)) {
-                               if (!netif_queue_stopped(dev) &&
-                                   !netif_subqueue_stopped(dev, skb))
+                       if (__netif_tx_trylock(txq)) {
+                               if (!netif_tx_queue_stopped(txq))
                                        status = dev->hard_start_xmit(skb, dev);
-                               netif_tx_unlock(dev);
+                               __netif_tx_unlock(txq);
 
                                if (status == NETDEV_TX_OK)
                                        break;
@@ -390,9 +394,7 @@ static void arp_reply(struct sk_buff *skb)
        if (skb->dev->flags & IFF_NOARP)
                return;
 
-       if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
-                                (2 * skb->dev->addr_len) +
-                                (2 * sizeof(u32)))))
+       if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
                return;
 
        skb_reset_network_header(skb);
@@ -420,8 +422,8 @@ static void arp_reply(struct sk_buff *skb)
            ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
                return;
 
-       size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
-       send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
+       size = arp_hdr_len(skb->dev);
+       send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
                            LL_RESERVED_SPACE(np->dev));
 
        if (!send_skb)