#define NVREG_IRQ_TX_FORCED 0x0100
#define NVREG_IRQ_RECOVER_ERROR 0x8000
#define NVREG_IRQMASK_THROUGHPUT 0x00df
-#define NVREG_IRQMASK_CPU 0x0040
+#define NVREG_IRQMASK_CPU 0x0060
#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
- skb->dev = dev;
np->put_rx_ctx->skb = skb;
- np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
- skb->end-skb->data, PCI_DMA_FROMDEVICE);
- np->put_rx_ctx->dma_len = skb->end-skb->data;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ skb->data,
+ skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
- skb->dev = dev;
np->put_rx_ctx->skb = skb;
- np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
- skb->end-skb->data, PCI_DMA_FROMDEVICE);
- np->put_rx_ctx->dma_len = skb->end-skb->data;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ skb->data,
+ skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
wmb();
wmb();
if (np->rx_skb[i].skb) {
pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
- np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
- PCI_DMA_FROMDEVICE);
+ (skb_end_pointer(np->rx_skb[i].skb) -
+ np->rx_skb[i].skb->data),
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skb[i].skb);
np->rx_skb[i].skb = NULL;
}
ret = 0;
goto out;
}
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ skb_tailroom(tx_skb),
+ PCI_DMA_FROMDEVICE);
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
- test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
- tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
}
pci_unmap_page(np->pci_dev, test_dma_addr,
- tx_skb->end-tx_skb->data,
+ (skb_end_pointer(tx_skb) - tx_skb->data),
PCI_DMA_TODEVICE);
dev_kfree_skb_any(tx_skb);
out:
writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
spin_unlock_irq(&np->lock);
-};
-
-static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- /* nothing to do */
-};
+}
/* The mgmt unit and driver use a semaphore to access the phy during init */
static int nv_mgmt_acquire_sema(struct net_device *dev)
drain_ring(dev);
- if (np->wolenabled)
+ if (np->wolenabled) {
+ writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
+ }
/* FIXME: power down nic */
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
dev->vlan_rx_register = nv_vlan_rx_register;
- dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
}
np->msi_flags = 0;
np->wolenabled = 0;
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
- u8 revision_id;
- pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
/* take phy and nic out of low power mode */
powerstate = readl(base + NvRegPowerState2);
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
- revision_id >= 0xA3)
+ pci_dev->revision >= 0xA3)
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
writel(powerstate, base + NvRegPowerState2);
}