X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2F8139cp.c;h=e4b3c5c88542dbf8be95324c5a4c2007b1e6f033;hb=e254e9bff5283aad1af6d74d2a312ee011b84d61;hp=a7573dd92f260819b9570bb485f224304aa58858;hpb=fcec34565827f2edb29d124498aa8f561455f15d;p=linux-2.6 diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index a7573dd92f..e4b3c5c885 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -54,12 +54,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -91,16 +93,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE MODULE_AUTHOR("Jeff Garzik "); MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); +MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static int debug = -1; -MODULE_PARM (debug, "i"); +module_param(debug, int, 0); MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 32; -MODULE_PARM (multicast_filter_limit, "i"); +module_param(multicast_filter_limit, int, 0); MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); #define PFX DRV_NAME ": " @@ -315,7 +318,7 @@ struct cp_desc { struct ring_info { struct sk_buff *skb; dma_addr_t mapping; - unsigned frag; + u32 len; }; struct cp_dma_stats { @@ -397,6 +400,9 @@ struct cp_private { static void __cp_set_rx_mode (struct net_device *dev); static void cp_tx (struct cp_private *cp); static void cp_clean_rings (struct cp_private *cp); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cp_poll_controller(struct net_device *dev); +#endif static struct pci_device_id cp_pci_tbl[] = { { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, @@ -691,6 +697,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void cp_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + cp_interrupt(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif + static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; @@ -710,7 +729,7 @@ static void cp_tx (struct cp_private *cp) BUG(); pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, - skb->len, PCI_DMA_TODEVICE); + cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { @@ -801,7 +820,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) else if (ip->protocol == IPPROTO_UDP) flags |= IPCS | UDPCS; else - BUG(); + WARN_ON(1); /* we need a WARN() */ } txd->opts1 = cpu_to_le32(flags); @@ -809,7 +828,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = mapping; - cp->tx_skb[entry].frag = 0; + cp->tx_skb[entry].len = len; entry = NEXT_TX(entry); } else { struct cp_desc *txd; @@ -827,7 +846,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = first_mapping; - cp->tx_skb[entry].frag = 1; + cp->tx_skb[entry].len = first_len; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -870,7 +889,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) cp->tx_skb[entry].skb = skb; cp->tx_skb[entry].mapping = mapping; - cp->tx_skb[entry].frag = frag + 2; + cp->tx_skb[entry].len = len; entry = NEXT_TX(entry); } @@ -1084,7 +1103,6 @@ static int cp_refill_rx (struct cp_private *cp) cp->rx_skb[i].mapping = pci_map_single(cp->pdev, skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_skb[i].skb = skb; - cp->rx_skb[i].frag = 0; cp->rx_ring[i].opts2 = 0; cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); @@ -1136,9 +1154,6 @@ static void cp_clean_rings (struct cp_private *cp) { unsigned i; - memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); - memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); - for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i].skb) { pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, @@ -1150,13 +1165,18 @@ static void cp_clean_rings (struct cp_private *cp) for (i = 0; i < CP_TX_RING_SIZE; i++) { if (cp->tx_skb[i].skb) { struct sk_buff *skb = cp->tx_skb[i].skb; + pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb(skb); + cp->tx_skb[i].len, PCI_DMA_TODEVICE); + if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag) + dev_kfree_skb(skb); cp->net_stats.tx_dropped++; } } + memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); + memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); } @@ -1496,22 +1516,22 @@ static void cp_get_ethtool_stats (struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct cp_private *cp = netdev_priv(dev); - unsigned int work = 100; int i; + memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats)); + /* begin NIC statistics dump */ cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); cpr32(StatsAddr); - while (work-- > 0) { + for (i = 0; i < 1000; i++) { if ((cpr32(StatsAddr) & DumpStats) == 0) break; - cpu_relax(); + udelay(10); } - - if (cpr32(StatsAddr) & DumpStats) - return /* -EIO */; + cpw32(StatsAddr, 0); + cpw32(StatsAddr + 4, 0); i = 0; tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); @@ -1713,19 +1733,19 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && - !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) && - !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { + !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) && + !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; } else { pci_using_dac = 0; - rc = pci_set_dma_mask(pdev, 0xffffffffULL); + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (rc) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_res; } - rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL); + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (rc) { printk(KERN_ERR PFX "No usable consistent DMA configuration, " "aborting.\n"); @@ -1761,6 +1781,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->get_stats = cp_get_stats; dev->do_ioctl = cp_ioctl; dev->poll = cp_rx_poll; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = cp_poll_controller; +#endif dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ #ifdef BROKEN dev->change_mtu = cp_change_mtu;