for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
- bnapi->status_blk = (void *)
+ bnapi->status_blk_msix = (void *)
((unsigned long) bp->status_blk +
BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->int_num = i << 24;
return cons;
}
-static void
-bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
+static int
+bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
u16 hw_cons, sw_cons, sw_ring_cons;
+ int tx_pkt = 0;
hw_cons = bnx2_get_hw_tx_cons(bnapi);
sw_cons = bnapi->tx_cons;
sw_cons = NEXT_TX_BD(sw_cons);
dev_kfree_skb(skb);
+ tx_pkt++;
+ if (tx_pkt == budget)
+ break;
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
netif_wake_queue(bp->dev);
netif_tx_unlock(bp->dev);
}
+ return tx_pkt;
}
static void
return IRQ_HANDLED;
}
+static irqreturn_t
+bnx2_tx_msix(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
+
+ prefetch(bnapi->status_blk_msix);
+
+ /* Return here if interrupt is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ netif_rx_schedule(dev, &bnapi->napi);
+ return IRQ_HANDLED;
+}
+
#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
STATUS_ATTN_BITS_TIMER_ABORT)
return 0;
}
+static int bnx2_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block_msix *sblk = bnapi->status_blk_msix;
+
+ do {
+ work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
+ if (unlikely(work_done >= budget))
+ return work_done;
+
+ bnapi->last_status_idx = sblk->status_idx;
+ rmb();
+ } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
+
+ netif_rx_complete(bp->dev, napi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ return work_done;
+}
+
static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
int work_done, int budget)
{
}
if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
- bnx2_tx_int(bp, bnapi);
+ bnx2_tx_int(bp, bnapi, 0);
if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
static void
bnx2_enable_msix(struct bnx2 *bp)
{
+ int i, rc;
+ struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
+
bnx2_setup_msix_tbl(bp);
REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
+ if (rc != 0)
+ return;
+
+ bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
+ bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
+
+ strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
+ strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
+ strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
+ strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
+
+ bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
+ bp->flags |= USING_MSIX_FLAG | ONE_SHOT_MSI_FLAG;
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
}
static void
bnx2_enable_int(bp);
}
}
- if (bp->flags & USING_MSI_FLAG) {
+ if (bp->flags & USING_MSI_FLAG)
printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
- }
+ else if (bp->flags & USING_MSIX_FLAG)
+ printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
netif_start_queue(dev);
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
if (unlikely(bnx2_tx_avail(bp, bnapi) <
(skb_shinfo(skb)->nr_frags + 1))) {
bnapi->bp = bp;
}
netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
+ 64);
}
static int __devinit