]> err.no Git - linux-2.6/commitdiff
[BNX2]: Restructure RX fast path handling.
authorMichael Chan <mchan@broadcom.com>
Wed, 12 Dec 2007 19:17:01 +0000 (11:17 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 28 Jan 2008 22:57:28 +0000 (14:57 -0800)
Add a new function to handle new SKB allocation and to prepare the
completed SKB.  This makes it easier to add support for non-linear
SKB.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/bnx2.c

index dfe50c286d95fb5ff2c3b3f853920096647b5ffc..14119fb5964d634bba3dd1b3a56075fe07bec69b 100644 (file)
@@ -2379,6 +2379,27 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
 }
 
+static int
+bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
+           dma_addr_t dma_addr, u32 ring_idx)
+{
+       int err;
+       u16 prod = ring_idx & 0xffff;
+
+       err = bnx2_alloc_rx_skb(bp, prod);
+       if (unlikely(err)) {
+               bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
+               return err;
+       }
+
+       skb_reserve(skb, bp->rx_offset);
+       pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
+                        PCI_DMA_FROMDEVICE);
+
+       skb_put(skb, len);
+       return 0;
+}
+
 static inline u16
 bnx2_get_hw_rx_cons(struct bnx2 *bp)
 {
@@ -2434,7 +2455,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                        L2_FHDR_ERRORS_TOO_SHORT |
                        L2_FHDR_ERRORS_GIANT_FRAME)) {
 
-                       goto reuse_rx;
+                       bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
+                       goto next_rx;
                }
 
                /* Since we don't have a jumbo ring, copy small packets
@@ -2444,8 +2466,11 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                        struct sk_buff *new_skb;
 
                        new_skb = netdev_alloc_skb(bp->dev, len + 2);
-                       if (new_skb == NULL)
-                               goto reuse_rx;
+                       if (new_skb == NULL) {
+                               bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
+                                                 sw_ring_prod);
+                               goto next_rx;
+                       }
 
                        /* aligned copy */
                        skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
@@ -2457,20 +2482,9 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                                sw_ring_cons, sw_ring_prod);
 
                        skb = new_skb;
-               }
-               else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
-                       pci_unmap_single(bp->pdev, dma_addr,
-                               bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
-
-                       skb_reserve(skb, bp->rx_offset);
-                       skb_put(skb, len);
-               }
-               else {
-reuse_rx:
-                       bnx2_reuse_rx_skb(bp, skb,
-                               sw_ring_cons, sw_ring_prod);
+               } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
+                                   (sw_ring_cons << 16) | sw_ring_prod)))
                        goto next_rx;
-               }
 
                skb->protocol = eth_type_trans(skb, bp->dev);