X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fsundance.c;h=6b8f4baf87fd309ff33978ebc62876887fa26d7a;hb=8a84fc15ae5cafcc366dd85cf8e1ab2040679abc;hp=ac17377b3e9fd4aa8cde1af0b85e0b0b594570ce;hpb=e2a305ecb5734f24d3a4496605a6fdf27ddf7108;p=linux-2.6 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index ac17377b3e..6b8f4baf87 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -17,12 +17,14 @@ Support and updates available at http://www.scyld.com/network/sundance.html [link no longer provides useful info -jgarzik] + Archives of the mailing list are still available at + http://www.beowulf.org/pipermail/netdrivers/ */ #define DRV_NAME "sundance" -#define DRV_VERSION "1.1" -#define DRV_RELDATE "27-Jun-2006" +#define DRV_VERSION "1.2" +#define DRV_RELDATE "11-Sep-2006" /* The user-configurable values. @@ -107,7 +109,7 @@ static char *media[MAX_UNITS]; #endif /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" KERN_INFO " http://www.scyld.com/network/sundance.html\n"; @@ -429,7 +431,7 @@ static int __set_mac_addr(struct net_device *dev); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_close(struct net_device *dev); -static struct ethtool_ops ethtool_ops; +static const struct ethtool_ops ethtool_ops; static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) { @@ -646,7 +648,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, /* Reset the chip to erase previous misconfiguration. */ if (netif_msg_hw(np)) printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); - iowrite16(0x00ff, ioaddr + ASICCtrl + 2); + sundance_reset(dev, 0x00ff << 16); if (netif_msg_hw(np)) printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); @@ -905,7 +907,7 @@ static void tx_timeout(struct net_device *dev) struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flag; - + netif_stop_queue(dev); tasklet_disable(&np->tx_tasklet); iowrite16(0, ioaddr + IntrEnable); @@ -922,13 +924,13 @@ static void tx_timeout(struct net_device *dev) le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, - le32_to_cpu(np->tx_ring[i].frag[0].addr), + le32_to_cpu(np->tx_ring[i].frag[0].addr), le32_to_cpu(np->tx_ring[i].frag[0].length)); } - printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", - ioread32(np->base + TxListPtr), + printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", + ioread32(np->base + TxListPtr), netif_queue_stopped(dev)); - printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", + printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", np->cur_tx, np->cur_tx % TX_RING_SIZE, np->dirty_tx, np->dirty_tx % TX_RING_SIZE); printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); @@ -1000,9 +1002,9 @@ static void tx_poll (unsigned long data) struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); unsigned head = np->cur_task % TX_RING_SIZE; - struct netdev_desc *txdesc = + struct netdev_desc *txdesc = &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; - + /* Chain the next pointer */ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { int entry = np->cur_task % TX_RING_SIZE; @@ -1072,21 +1074,16 @@ reset_tx (struct net_device *dev) struct sk_buff *skb; int i; int irq = in_interrupt(); - + /* Reset tx logic, TxListPtr will be cleaned */ iowrite16 (TxDisable, ioaddr + MACCtrl1); - iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset, - ioaddr + ASICCtrl + 2); - for (i=50; i > 0; i--) { - if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0) - break; - mdelay(1); - } + sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); + /* free all tx skbuff */ for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; if (skb) { - pci_unmap_single(np->pci_dev, + pci_unmap_single(np->pci_dev, np->tx_ring[i].frag[0].addr, skb->len, PCI_DMA_TODEVICE); if (irq) @@ -1103,7 +1100,7 @@ reset_tx (struct net_device *dev) return 0; } -/* The interrupt handler cleans up after the Tx thread, +/* The interrupt handler cleans up after the Tx thread, and schedule a Rx thread work */ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { @@ -1184,8 +1181,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs } else { hw_frame_id = ioread8(ioaddr + TxFrameId); } - - if (np->pci_rev_id >= 0x14) { + + if (np->pci_rev_id >= 0x14) { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; @@ -1197,7 +1194,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs !(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; - if (sw_frame_id == (hw_frame_id + 1) % + if (sw_frame_id == (hw_frame_id + 1) % TX_RING_SIZE) break; skb = np->tx_skbuff[entry]; @@ -1216,7 +1213,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; - if (!(le32_to_cpu(np->tx_ring[entry].status) + if (!(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; skb = np->tx_skbuff[entry]; @@ -1231,7 +1228,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs } spin_unlock(&np->lock); } - + if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, clear busy flag. */ @@ -1467,8 +1464,6 @@ static void set_rx_mode(struct net_device *dev) int i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - /* Unconditionally log net taps. */ - printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; } else if ((dev->mc_count > multicast_filter_limit) @@ -1574,7 +1569,7 @@ static void set_msglevel(struct net_device *dev, u32 val) np->msg_enable = val; } -static struct ethtool_ops ethtool_ops = { +static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, .get_settings = get_settings, @@ -1603,18 +1598,18 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) case SIOCDEVPRIVATE: for (i=0; itx_ring_dma + i*sizeof(*np->tx_ring)), + (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), - (le32_to_cpu(np->tx_ring[i].status) >> 2) + (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, - le32_to_cpu(np->tx_ring[i].frag[0].addr), + le32_to_cpu(np->tx_ring[i].frag[0].addr), le32_to_cpu(np->tx_ring[i].frag[0].length)); } - printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", - ioread32(np->base + TxListPtr), + printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", + ioread32(np->base + TxListPtr), netif_queue_stopped(dev)); - printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", + printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", np->cur_tx, np->cur_tx % TX_RING_SIZE, np->dirty_tx, np->dirty_tx % TX_RING_SIZE); printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); @@ -1622,7 +1617,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus)); return 0; } - + return rc; } @@ -1736,7 +1731,7 @@ static int __init sundance_init(void) #ifdef MODULE printk(version); #endif - return pci_module_init(&sundance_driver); + return pci_register_driver(&sundance_driver); } static void __exit sundance_exit(void)