1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
114 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118 MODULE_LICENSE("GPL");
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(media, charp, NULL, 0);
123 module_param(flowctrl, int, 0);
124 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
131 I. Board Compatibility
133 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
135 II. Board-specific settings
137 III. Driver operation
141 This driver uses two statically allocated fixed-size descriptor lists
142 formed into rings by a branch from the final descriptor to the beginning of
143 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144 Some chips explicitly use only 2^N sized rings, while others use a
145 'next descriptor' pointer that the driver forms into rings.
147 IIIb/c. Transmit/Receive Structure
149 This driver uses a zero-copy receive and transmit scheme.
150 The driver allocates full frame size skbuffs for the Rx ring buffers at
151 open() time and passes the skb->data field to the chip as receive data
152 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153 a fresh skbuff is allocated and the frame is copied to the new skbuff.
154 When the incoming frame is larger, the skbuff is passed directly up the
155 protocol stack. Buffers consumed this way are replaced by newly allocated
156 skbuffs in a later phase of receives.
158 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159 using a full-sized skbuff for small frames vs. the copying costs of larger
160 frames. New boards are typically used in generously configured machines
161 and the underfilled buffers have negligible impact compared to the benefit of
162 a single allocation size, so the default value of zero results in never
163 copying packets. When copying is done, the cost is usually mitigated by using
164 a combined copy/checksum routine. Copying also preloads the cache, which is
165 most useful with small frames.
167 A subtle aspect of the operation is that the IP header at offset 14 in an
168 ethernet frame isn't longword aligned for further processing.
169 Unaligned buffers are permitted by the Sundance hardware, so
170 frames are received into the skbuff at an offset of "+2", 16-byte aligning
173 IIId. Synchronization
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
178 threaded by the hardware and interrupt handling software.
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183 the 'lp->tx_full' flag.
185 The interrupt handler has exclusive control over the Rx ring and records stats
186 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188 clears both the tx_full and tbusy flags.
194 The Sundance ST201 datasheet, preliminary version.
195 The Kendin KS8723 datasheet, preliminary version.
196 The ICplus IP100 datasheet, preliminary version.
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
204 /* Work-around for Kendin chip bugs. */
205 #ifndef CONFIG_SUNDANCE_MMIO
209 static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
219 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
228 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
236 { } /* terminate list. */
239 /* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
242 /* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
295 StatsTxXSDefer = 0x7a,
301 /* Aliased and bogus values! */
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
310 NetworkReset = 0x0020,
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
324 /* Bits in the RxMode register. */
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
346 struct desc_frag { u32 addr, length; } frag[1];
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
352 DescEndPacket=0x4000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
401 unsigned char pci_rev_id;
404 /* The station address location in the EEPROM. */
405 #define EEPROM_SA_OFFSET 0x10
406 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
407 IntrDrvRqst | IntrTxDone | StatsMax | \
410 static int change_mtu(struct net_device *dev, int new_mtu);
411 static int eeprom_read(void __iomem *ioaddr, int location);
412 static int mdio_read(struct net_device *dev, int phy_id, int location);
413 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414 static int netdev_open(struct net_device *dev);
415 static void check_duplex(struct net_device *dev);
416 static void netdev_timer(unsigned long data);
417 static void tx_timeout(struct net_device *dev);
418 static void init_ring(struct net_device *dev);
419 static int start_tx(struct sk_buff *skb, struct net_device *dev);
420 static int reset_tx (struct net_device *dev);
421 static irqreturn_t intr_handler(int irq, void *dev_instance);
422 static void rx_poll(unsigned long data);
423 static void tx_poll(unsigned long data);
424 static void refill_rx (struct net_device *dev);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void netdev_error(struct net_device *dev, int intr_status);
427 static void set_rx_mode(struct net_device *dev);
428 static int __set_mac_addr(struct net_device *dev);
429 static struct net_device_stats *get_stats(struct net_device *dev);
430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431 static int netdev_close(struct net_device *dev);
432 static const struct ethtool_ops ethtool_ops;
434 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
453 static int __devinit sundance_probe1 (struct pci_dev *pdev,
454 const struct pci_device_id *ent)
456 struct net_device *dev;
457 struct netdev_private *np;
459 int chip_idx = ent->driver_data;
462 void __iomem *ioaddr;
471 int phy, phy_idx = 0;
474 /* when built into the kernel, we only print version if device is found */
476 static int printed_version;
477 if (!printed_version++)
481 if (pci_enable_device(pdev))
483 pci_set_master(pdev);
487 dev = alloc_etherdev(sizeof(*np));
490 SET_MODULE_OWNER(dev);
491 SET_NETDEV_DEV(dev, &pdev->dev);
493 if (pci_request_regions(pdev, DRV_NAME))
496 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
500 for (i = 0; i < 3; i++)
501 ((u16 *)dev->dev_addr)[i] =
502 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
505 dev->base_addr = (unsigned long)ioaddr;
508 np = netdev_priv(dev);
511 np->chip_id = chip_idx;
512 np->msg_enable = (1 << debug) - 1;
513 spin_lock_init(&np->lock);
514 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
515 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
517 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
519 goto err_out_cleardev;
520 np->tx_ring = (struct netdev_desc *)ring_space;
521 np->tx_ring_dma = ring_dma;
523 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
525 goto err_out_unmap_tx;
526 np->rx_ring = (struct netdev_desc *)ring_space;
527 np->rx_ring_dma = ring_dma;
529 np->mii_if.dev = dev;
530 np->mii_if.mdio_read = mdio_read;
531 np->mii_if.mdio_write = mdio_write;
532 np->mii_if.phy_id_mask = 0x1f;
533 np->mii_if.reg_num_mask = 0x1f;
535 /* The chip-specific entries in the device structure. */
536 dev->open = &netdev_open;
537 dev->hard_start_xmit = &start_tx;
538 dev->stop = &netdev_close;
539 dev->get_stats = &get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 SET_ETHTOOL_OPS(dev, ðtool_ops);
543 dev->tx_timeout = &tx_timeout;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->change_mtu = &change_mtu;
546 pci_set_drvdata(pdev, dev);
548 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
550 i = register_netdev(dev);
552 goto err_out_unmap_rx;
554 printk(KERN_INFO "%s: %s at %p, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first, so leave address zero to the end of the loop (32 & 31).
566 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
567 int phyx = phy & 0x1f;
568 int mii_status = mdio_read(dev, phyx, MII_BMSR);
569 if (mii_status != 0xffff && mii_status != 0x0000) {
570 np->phys[phy_idx++] = phyx;
571 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
572 if ((mii_status & 0x0040) == 0)
573 np->mii_preamble_required++;
574 printk(KERN_INFO "%s: MII PHY found at address %d, status "
575 "0x%4.4x advertising %4.4x.\n",
576 dev->name, phyx, mii_status, np->mii_if.advertising);
579 np->mii_preamble_required--;
582 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
583 dev->name, ioread32(ioaddr + ASICCtrl));
584 goto err_out_unregister;
587 np->mii_if.phy_id = np->phys[0];
589 /* Parse override configuration */
591 if (card_idx < MAX_UNITS) {
592 if (media[card_idx] != NULL) {
594 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
595 strcmp (media[card_idx], "4") == 0) {
597 np->mii_if.full_duplex = 1;
598 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
599 || strcmp (media[card_idx], "3") == 0) {
601 np->mii_if.full_duplex = 0;
602 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
603 strcmp (media[card_idx], "2") == 0) {
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
607 strcmp (media[card_idx], "1") == 0) {
609 np->mii_if.full_duplex = 0;
619 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
620 /* Default 100Mbps Full */
623 np->mii_if.full_duplex = 1;
628 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
630 /* If flow control enabled, we need to advertise it.*/
632 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
633 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
634 /* Force media type */
635 if (!np->an_enable) {
637 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
638 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
639 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
640 printk (KERN_INFO "Override speed=%d, %s duplex\n",
641 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
645 /* Perhaps move the reset here? */
646 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
649 sundance_reset(dev, 0x00ff << 16);
650 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
657 unregister_netdev(dev);
659 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
661 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
663 pci_set_drvdata(pdev, NULL);
664 pci_iounmap(pdev, ioaddr);
666 pci_release_regions(pdev);
672 static int change_mtu(struct net_device *dev, int new_mtu)
674 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
676 if (netif_running(dev))
682 #define eeprom_delay(ee_addr) ioread32(ee_addr)
683 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
684 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
686 int boguscnt = 10000; /* Typical 1900 ticks. */
687 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
689 eeprom_delay(ioaddr + EECtrl);
690 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
691 return ioread16(ioaddr + EEData);
693 } while (--boguscnt > 0);
697 /* MII transceiver control section.
698 Read and write the MII registers using software-generated serial
699 MDIO protocol. See the MII specifications or DP83840A data sheet
702 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
703 met by back-to-back 33Mhz PCI cycles. */
704 #define mdio_delay() ioread8(mdio_addr)
707 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
709 #define MDIO_EnbIn (0)
710 #define MDIO_WRITE0 (MDIO_EnbOutput)
711 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
713 /* Generate the preamble required for initial synchronization and
714 a few older transceivers. */
715 static void mdio_sync(void __iomem *mdio_addr)
719 /* Establish sync by sending at least 32 logic ones. */
720 while (--bits >= 0) {
721 iowrite8(MDIO_WRITE1, mdio_addr);
723 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
728 static int mdio_read(struct net_device *dev, int phy_id, int location)
730 struct netdev_private *np = netdev_priv(dev);
731 void __iomem *mdio_addr = np->base + MIICtrl;
732 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
735 if (np->mii_preamble_required)
736 mdio_sync(mdio_addr);
738 /* Shift the read command bits out. */
739 for (i = 15; i >= 0; i--) {
740 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
742 iowrite8(dataval, mdio_addr);
744 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
747 /* Read the two transition, 16 data, and wire-idle bits. */
748 for (i = 19; i > 0; i--) {
749 iowrite8(MDIO_EnbIn, mdio_addr);
751 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
752 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
755 return (retval>>1) & 0xffff;
758 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
768 /* Shift the command bits out. */
769 for (i = 31; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
772 iowrite8(dataval, mdio_addr);
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
777 /* Clear out extra bits. */
778 for (i = 2; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
787 static int netdev_open(struct net_device *dev)
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *ioaddr = np->base;
793 /* Do we need to reset the chip??? */
795 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
799 if (netif_msg_ifup(np))
800 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
801 dev->name, dev->irq);
804 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
805 /* The Tx list pointer is written as packets are queued. */
807 /* Initialize other registers. */
809 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
810 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
812 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
815 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
817 /* Configure the PCI bus bursts and FIFO thresholds. */
819 if (dev->if_port == 0)
820 dev->if_port = np->default_port;
822 spin_lock_init(&np->mcastlock);
825 iowrite16(0, ioaddr + IntrEnable);
826 iowrite16(0, ioaddr + DownCounter);
827 /* Set the chip to poll every N*320nsec. */
828 iowrite8(100, ioaddr + RxDMAPollPeriod);
829 iowrite8(127, ioaddr + TxDMAPollPeriod);
830 /* Fix DFE-580TX packet drop issue */
831 if (np->pci_rev_id >= 0x14)
832 iowrite8(0x01, ioaddr + DebugCtrl1);
833 netif_start_queue(dev);
835 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
837 if (netif_msg_ifup(np))
838 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
839 "MAC Control %x, %4.4x %4.4x.\n",
840 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
841 ioread32(ioaddr + MACCtrl0),
842 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
844 /* Set the timer to check for link beat. */
845 init_timer(&np->timer);
846 np->timer.expires = jiffies + 3*HZ;
847 np->timer.data = (unsigned long)dev;
848 np->timer.function = &netdev_timer; /* timer handler */
849 add_timer(&np->timer);
851 /* Enable interrupts by setting the interrupt mask. */
852 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
857 static void check_duplex(struct net_device *dev)
859 struct netdev_private *np = netdev_priv(dev);
860 void __iomem *ioaddr = np->base;
861 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
862 int negotiated = mii_lpa & np->mii_if.advertising;
866 if (!np->an_enable || mii_lpa == 0xffff) {
867 if (np->mii_if.full_duplex)
868 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
873 /* Autonegotiation */
874 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
875 if (np->mii_if.full_duplex != duplex) {
876 np->mii_if.full_duplex = duplex;
877 if (netif_msg_link(np))
878 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
879 "negotiated capability %4.4x.\n", dev->name,
880 duplex ? "full" : "half", np->phys[0], negotiated);
881 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
885 static void netdev_timer(unsigned long data)
887 struct net_device *dev = (struct net_device *)data;
888 struct netdev_private *np = netdev_priv(dev);
889 void __iomem *ioaddr = np->base;
890 int next_tick = 10*HZ;
892 if (netif_msg_timer(np)) {
893 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
895 dev->name, ioread16(ioaddr + IntrEnable),
896 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
899 np->timer.expires = jiffies + next_tick;
900 add_timer(&np->timer);
903 static void tx_timeout(struct net_device *dev)
905 struct netdev_private *np = netdev_priv(dev);
906 void __iomem *ioaddr = np->base;
909 netif_stop_queue(dev);
910 tasklet_disable(&np->tx_tasklet);
911 iowrite16(0, ioaddr + IntrEnable);
912 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
914 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
915 ioread8(ioaddr + TxFrameId));
919 for (i=0; i<TX_RING_SIZE; i++) {
920 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
921 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
922 le32_to_cpu(np->tx_ring[i].next_desc),
923 le32_to_cpu(np->tx_ring[i].status),
924 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
925 le32_to_cpu(np->tx_ring[i].frag[0].addr),
926 le32_to_cpu(np->tx_ring[i].frag[0].length));
928 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
929 ioread32(np->base + TxListPtr),
930 netif_queue_stopped(dev));
931 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
932 np->cur_tx, np->cur_tx % TX_RING_SIZE,
933 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
934 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
935 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
937 spin_lock_irqsave(&np->lock, flag);
939 /* Stop and restart the chip's Tx processes . */
941 spin_unlock_irqrestore(&np->lock, flag);
945 dev->trans_start = jiffies;
946 np->stats.tx_errors++;
947 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
948 netif_wake_queue(dev);
950 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
951 tasklet_enable(&np->tx_tasklet);
955 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
956 static void init_ring(struct net_device *dev)
958 struct netdev_private *np = netdev_priv(dev);
961 np->cur_rx = np->cur_tx = 0;
962 np->dirty_rx = np->dirty_tx = 0;
965 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
967 /* Initialize all Rx descriptors. */
968 for (i = 0; i < RX_RING_SIZE; i++) {
969 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
970 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
971 np->rx_ring[i].status = 0;
972 np->rx_ring[i].frag[0].length = 0;
973 np->rx_skbuff[i] = NULL;
976 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
977 for (i = 0; i < RX_RING_SIZE; i++) {
978 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
979 np->rx_skbuff[i] = skb;
982 skb->dev = dev; /* Mark as being used by this device. */
983 skb_reserve(skb, 2); /* 16 byte align the IP header. */
984 np->rx_ring[i].frag[0].addr = cpu_to_le32(
985 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
986 PCI_DMA_FROMDEVICE));
987 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
989 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
991 for (i = 0; i < TX_RING_SIZE; i++) {
992 np->tx_skbuff[i] = NULL;
993 np->tx_ring[i].status = 0;
998 static void tx_poll (unsigned long data)
1000 struct net_device *dev = (struct net_device *)data;
1001 struct netdev_private *np = netdev_priv(dev);
1002 unsigned head = np->cur_task % TX_RING_SIZE;
1003 struct netdev_desc *txdesc =
1004 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1006 /* Chain the next pointer */
1007 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1008 int entry = np->cur_task % TX_RING_SIZE;
1009 txdesc = &np->tx_ring[entry];
1011 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1012 entry*sizeof(struct netdev_desc));
1014 np->last_tx = txdesc;
1016 /* Indicate the latest descriptor of tx ring */
1017 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1019 if (ioread32 (np->base + TxListPtr) == 0)
1020 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1021 np->base + TxListPtr);
1026 start_tx (struct sk_buff *skb, struct net_device *dev)
1028 struct netdev_private *np = netdev_priv(dev);
1029 struct netdev_desc *txdesc;
1032 /* Calculate the next Tx descriptor entry. */
1033 entry = np->cur_tx % TX_RING_SIZE;
1034 np->tx_skbuff[entry] = skb;
1035 txdesc = &np->tx_ring[entry];
1037 txdesc->next_desc = 0;
1038 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1039 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1042 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1044 /* Increment cur_tx before tasklet_schedule() */
1047 /* Schedule a tx_poll() task */
1048 tasklet_schedule(&np->tx_tasklet);
1050 /* On some architectures: explicitly flush cache lines here. */
1051 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1052 && !netif_queue_stopped(dev)) {
1055 netif_stop_queue (dev);
1057 dev->trans_start = jiffies;
1058 if (netif_msg_tx_queued(np)) {
1060 "%s: Transmit frame #%d queued in slot %d.\n",
1061 dev->name, np->cur_tx, entry);
1066 /* Reset hardware tx and free all of tx buffers */
1068 reset_tx (struct net_device *dev)
1070 struct netdev_private *np = netdev_priv(dev);
1071 void __iomem *ioaddr = np->base;
1072 struct sk_buff *skb;
1074 int irq = in_interrupt();
1076 /* Reset tx logic, TxListPtr will be cleaned */
1077 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1078 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1080 /* free all tx skbuff */
1081 for (i = 0; i < TX_RING_SIZE; i++) {
1082 np->tx_ring[i].next_desc = 0;
1084 skb = np->tx_skbuff[i];
1086 pci_unmap_single(np->pci_dev,
1087 np->tx_ring[i].frag[0].addr, skb->len,
1090 dev_kfree_skb_irq (skb);
1092 dev_kfree_skb (skb);
1093 np->tx_skbuff[i] = NULL;
1094 np->stats.tx_dropped++;
1097 np->cur_tx = np->dirty_tx = 0;
1101 iowrite8(127, ioaddr + TxDMAPollPeriod);
1103 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1107 /* The interrupt handler cleans up after the Tx thread,
1108 and schedule a Rx thread work */
1109 static irqreturn_t intr_handler(int irq, void *dev_instance)
1111 struct net_device *dev = (struct net_device *)dev_instance;
1112 struct netdev_private *np = netdev_priv(dev);
1113 void __iomem *ioaddr = np->base;
1122 int intr_status = ioread16(ioaddr + IntrStatus);
1123 iowrite16(intr_status, ioaddr + IntrStatus);
1125 if (netif_msg_intr(np))
1126 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1127 dev->name, intr_status);
1129 if (!(intr_status & DEFAULT_INTR))
1134 if (intr_status & (IntrRxDMADone)) {
1135 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1136 ioaddr + IntrEnable);
1138 np->budget = RX_BUDGET;
1139 tasklet_schedule(&np->rx_tasklet);
1141 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1142 tx_status = ioread16 (ioaddr + TxStatus);
1143 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1144 if (netif_msg_tx_done(np))
1146 ("%s: Transmit status is %2.2x.\n",
1147 dev->name, tx_status);
1148 if (tx_status & 0x1e) {
1149 if (netif_msg_tx_err(np))
1150 printk("%s: Transmit error status %4.4x.\n",
1151 dev->name, tx_status);
1152 np->stats.tx_errors++;
1153 if (tx_status & 0x10)
1154 np->stats.tx_fifo_errors++;
1155 if (tx_status & 0x08)
1156 np->stats.collisions++;
1157 if (tx_status & 0x04)
1158 np->stats.tx_fifo_errors++;
1159 if (tx_status & 0x02)
1160 np->stats.tx_window_errors++;
1163 ** This reset has been verified on
1164 ** DFE-580TX boards ! phdm@macqel.be.
1166 if (tx_status & 0x10) { /* TxUnderrun */
1167 /* Restart Tx FIFO and transmitter */
1168 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1169 /* No need to reset the Tx pointer here */
1171 /* Restart the Tx. Need to make sure tx enabled */
1174 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1175 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1180 /* Yup, this is a documentation bug. It cost me *hours*. */
1181 iowrite16 (0, ioaddr + TxStatus);
1183 iowrite32(5000, ioaddr + DownCounter);
1186 tx_status = ioread16 (ioaddr + TxStatus);
1188 hw_frame_id = (tx_status >> 8) & 0xff;
1190 hw_frame_id = ioread8(ioaddr + TxFrameId);
1193 if (np->pci_rev_id >= 0x14) {
1194 spin_lock(&np->lock);
1195 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1196 int entry = np->dirty_tx % TX_RING_SIZE;
1197 struct sk_buff *skb;
1199 sw_frame_id = (le32_to_cpu(
1200 np->tx_ring[entry].status) >> 2) & 0xff;
1201 if (sw_frame_id == hw_frame_id &&
1202 !(le32_to_cpu(np->tx_ring[entry].status)
1205 if (sw_frame_id == (hw_frame_id + 1) %
1208 skb = np->tx_skbuff[entry];
1209 /* Free the original skb. */
1210 pci_unmap_single(np->pci_dev,
1211 np->tx_ring[entry].frag[0].addr,
1212 skb->len, PCI_DMA_TODEVICE);
1213 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1214 np->tx_skbuff[entry] = NULL;
1215 np->tx_ring[entry].frag[0].addr = 0;
1216 np->tx_ring[entry].frag[0].length = 0;
1218 spin_unlock(&np->lock);
1220 spin_lock(&np->lock);
1221 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1222 int entry = np->dirty_tx % TX_RING_SIZE;
1223 struct sk_buff *skb;
1224 if (!(le32_to_cpu(np->tx_ring[entry].status)
1227 skb = np->tx_skbuff[entry];
1228 /* Free the original skb. */
1229 pci_unmap_single(np->pci_dev,
1230 np->tx_ring[entry].frag[0].addr,
1231 skb->len, PCI_DMA_TODEVICE);
1232 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1233 np->tx_skbuff[entry] = NULL;
1234 np->tx_ring[entry].frag[0].addr = 0;
1235 np->tx_ring[entry].frag[0].length = 0;
1237 spin_unlock(&np->lock);
1240 if (netif_queue_stopped(dev) &&
1241 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1242 /* The ring is no longer full, clear busy flag. */
1243 netif_wake_queue (dev);
1245 /* Abnormal error summary/uncommon events handlers. */
1246 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1247 netdev_error(dev, intr_status);
1249 if (netif_msg_intr(np))
1250 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1251 dev->name, ioread16(ioaddr + IntrStatus));
1252 return IRQ_RETVAL(handled);
1255 static void rx_poll(unsigned long data)
1257 struct net_device *dev = (struct net_device *)data;
1258 struct netdev_private *np = netdev_priv(dev);
1259 int entry = np->cur_rx % RX_RING_SIZE;
1260 int boguscnt = np->budget;
1261 void __iomem *ioaddr = np->base;
1264 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1266 struct netdev_desc *desc = &(np->rx_ring[entry]);
1267 u32 frame_status = le32_to_cpu(desc->status);
1270 if (--boguscnt < 0) {
1273 if (!(frame_status & DescOwn))
1275 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1276 if (netif_msg_rx_status(np))
1277 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1279 if (frame_status & 0x001f4000) {
1280 /* There was a error. */
1281 if (netif_msg_rx_err(np))
1282 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1284 np->stats.rx_errors++;
1285 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1286 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1287 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1288 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1289 if (frame_status & 0x00100000) {
1290 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1292 dev->name, frame_status);
1295 struct sk_buff *skb;
1296 #ifndef final_version
1297 if (netif_msg_rx_status(np))
1298 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1299 ", bogus_cnt %d.\n",
1302 /* Check if the packet is long enough to accept without copying
1303 to a minimally-sized skbuff. */
1304 if (pkt_len < rx_copybreak
1305 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1307 skb_reserve(skb, 2); /* 16 byte align the IP header */
1308 pci_dma_sync_single_for_cpu(np->pci_dev,
1311 PCI_DMA_FROMDEVICE);
1313 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1314 pci_dma_sync_single_for_device(np->pci_dev,
1317 PCI_DMA_FROMDEVICE);
1318 skb_put(skb, pkt_len);
1320 pci_unmap_single(np->pci_dev,
1323 PCI_DMA_FROMDEVICE);
1324 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1325 np->rx_skbuff[entry] = NULL;
1327 skb->protocol = eth_type_trans(skb, dev);
1328 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1330 dev->last_rx = jiffies;
1332 entry = (entry + 1) % RX_RING_SIZE;
1337 np->budget -= received;
1338 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1346 np->budget -= received;
1347 if (np->budget <= 0)
1348 np->budget = RX_BUDGET;
1349 tasklet_schedule(&np->rx_tasklet);
1353 static void refill_rx (struct net_device *dev)
1355 struct netdev_private *np = netdev_priv(dev);
1359 /* Refill the Rx ring buffers. */
1360 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1361 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1362 struct sk_buff *skb;
1363 entry = np->dirty_rx % RX_RING_SIZE;
1364 if (np->rx_skbuff[entry] == NULL) {
1365 skb = dev_alloc_skb(np->rx_buf_sz);
1366 np->rx_skbuff[entry] = skb;
1368 break; /* Better luck next round. */
1369 skb->dev = dev; /* Mark as being used by this device. */
1370 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1371 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1372 pci_map_single(np->pci_dev, skb->data,
1373 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1375 /* Perhaps we need not reset this field. */
1376 np->rx_ring[entry].frag[0].length =
1377 cpu_to_le32(np->rx_buf_sz | LastFrag);
1378 np->rx_ring[entry].status = 0;
1383 static void netdev_error(struct net_device *dev, int intr_status)
1385 struct netdev_private *np = netdev_priv(dev);
1386 void __iomem *ioaddr = np->base;
1387 u16 mii_ctl, mii_advertise, mii_lpa;
1390 if (intr_status & LinkChange) {
1391 if (np->an_enable) {
1392 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1393 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1394 mii_advertise &= mii_lpa;
1395 printk (KERN_INFO "%s: Link changed: ", dev->name);
1396 if (mii_advertise & ADVERTISE_100FULL) {
1398 printk ("100Mbps, full duplex\n");
1399 } else if (mii_advertise & ADVERTISE_100HALF) {
1401 printk ("100Mbps, half duplex\n");
1402 } else if (mii_advertise & ADVERTISE_10FULL) {
1404 printk ("10Mbps, full duplex\n");
1405 } else if (mii_advertise & ADVERTISE_10HALF) {
1407 printk ("10Mbps, half duplex\n");
1412 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1413 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1415 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1417 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1421 if (np->flowctrl && np->mii_if.full_duplex) {
1422 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1423 ioaddr + MulticastFilter1+2);
1424 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1428 if (intr_status & StatsMax) {
1431 if (intr_status & IntrPCIErr) {
1432 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1433 dev->name, intr_status);
1434 /* We must do a global reset of DMA to continue. */
1438 static struct net_device_stats *get_stats(struct net_device *dev)
1440 struct netdev_private *np = netdev_priv(dev);
1441 void __iomem *ioaddr = np->base;
1444 /* We should lock this segment of code for SMP eventually, although
1445 the vulnerability window is very small and statistics are
1447 /* The chip only need report frame silently dropped. */
1448 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1449 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1450 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1451 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1452 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1453 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1454 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1455 ioread8(ioaddr + StatsTxDefer);
1456 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1457 ioread8(ioaddr + i);
1458 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1459 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1460 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1461 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1466 static void set_rx_mode(struct net_device *dev)
1468 struct netdev_private *np = netdev_priv(dev);
1469 void __iomem *ioaddr = np->base;
1470 u16 mc_filter[4]; /* Multicast hash filter */
1474 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1475 memset(mc_filter, 0xff, sizeof(mc_filter));
1476 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1477 } else if ((dev->mc_count > multicast_filter_limit)
1478 || (dev->flags & IFF_ALLMULTI)) {
1479 /* Too many to match, or accept all multicasts. */
1480 memset(mc_filter, 0xff, sizeof(mc_filter));
1481 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1482 } else if (dev->mc_count) {
1483 struct dev_mc_list *mclist;
1487 memset (mc_filter, 0, sizeof (mc_filter));
1488 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1489 i++, mclist = mclist->next) {
1490 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1491 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1492 if (crc & 0x80000000) index |= 1 << bit;
1493 mc_filter[index/16] |= (1 << (index % 16));
1495 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1497 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1500 if (np->mii_if.full_duplex && np->flowctrl)
1501 mc_filter[3] |= 0x0200;
1503 for (i = 0; i < 4; i++)
1504 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1505 iowrite8(rx_mode, ioaddr + RxMode);
1508 static int __set_mac_addr(struct net_device *dev)
1510 struct netdev_private *np = netdev_priv(dev);
1513 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1514 iowrite16(addr16, np->base + StationAddr);
1515 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1516 iowrite16(addr16, np->base + StationAddr+2);
1517 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1518 iowrite16(addr16, np->base + StationAddr+4);
1522 static int check_if_running(struct net_device *dev)
1524 if (!netif_running(dev))
1529 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1531 struct netdev_private *np = netdev_priv(dev);
1532 strcpy(info->driver, DRV_NAME);
1533 strcpy(info->version, DRV_VERSION);
1534 strcpy(info->bus_info, pci_name(np->pci_dev));
1537 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1539 struct netdev_private *np = netdev_priv(dev);
1540 spin_lock_irq(&np->lock);
1541 mii_ethtool_gset(&np->mii_if, ecmd);
1542 spin_unlock_irq(&np->lock);
1546 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1548 struct netdev_private *np = netdev_priv(dev);
1550 spin_lock_irq(&np->lock);
1551 res = mii_ethtool_sset(&np->mii_if, ecmd);
1552 spin_unlock_irq(&np->lock);
1556 static int nway_reset(struct net_device *dev)
1558 struct netdev_private *np = netdev_priv(dev);
1559 return mii_nway_restart(&np->mii_if);
1562 static u32 get_link(struct net_device *dev)
1564 struct netdev_private *np = netdev_priv(dev);
1565 return mii_link_ok(&np->mii_if);
1568 static u32 get_msglevel(struct net_device *dev)
1570 struct netdev_private *np = netdev_priv(dev);
1571 return np->msg_enable;
1574 static void set_msglevel(struct net_device *dev, u32 val)
1576 struct netdev_private *np = netdev_priv(dev);
1577 np->msg_enable = val;
1580 static const struct ethtool_ops ethtool_ops = {
1581 .begin = check_if_running,
1582 .get_drvinfo = get_drvinfo,
1583 .get_settings = get_settings,
1584 .set_settings = set_settings,
1585 .nway_reset = nway_reset,
1586 .get_link = get_link,
1587 .get_msglevel = get_msglevel,
1588 .set_msglevel = set_msglevel,
1589 .get_perm_addr = ethtool_op_get_perm_addr,
1592 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1594 struct netdev_private *np = netdev_priv(dev);
1595 void __iomem *ioaddr = np->base;
1599 if (!netif_running(dev))
1602 spin_lock_irq(&np->lock);
1603 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1604 spin_unlock_irq(&np->lock);
1606 case SIOCDEVPRIVATE:
1607 for (i=0; i<TX_RING_SIZE; i++) {
1608 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1609 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1610 le32_to_cpu(np->tx_ring[i].next_desc),
1611 le32_to_cpu(np->tx_ring[i].status),
1612 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1614 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1615 le32_to_cpu(np->tx_ring[i].frag[0].length));
1617 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1618 ioread32(np->base + TxListPtr),
1619 netif_queue_stopped(dev));
1620 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1621 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1622 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1623 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1624 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1625 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1633 static int netdev_close(struct net_device *dev)
1635 struct netdev_private *np = netdev_priv(dev);
1636 void __iomem *ioaddr = np->base;
1637 struct sk_buff *skb;
1640 netif_stop_queue(dev);
1642 if (netif_msg_ifdown(np)) {
1643 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1644 "Rx %4.4x Int %2.2x.\n",
1645 dev->name, ioread8(ioaddr + TxStatus),
1646 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1647 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1648 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1651 /* Disable interrupts by clearing the interrupt mask. */
1652 iowrite16(0x0000, ioaddr + IntrEnable);
1654 /* Stop the chip's Tx and Rx processes. */
1655 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1657 /* Wait and kill tasklet */
1658 tasklet_kill(&np->rx_tasklet);
1659 tasklet_kill(&np->tx_tasklet);
1662 if (netif_msg_hw(np)) {
1663 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1664 (int)(np->tx_ring_dma));
1665 for (i = 0; i < TX_RING_SIZE; i++)
1666 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1667 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1668 np->tx_ring[i].frag[0].length);
1669 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1670 (int)(np->rx_ring_dma));
1671 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1672 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1673 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1674 np->rx_ring[i].frag[0].length);
1677 #endif /* __i386__ debugging only */
1679 free_irq(dev->irq, dev);
1681 del_timer_sync(&np->timer);
1683 /* Free all the skbuffs in the Rx queue. */
1684 for (i = 0; i < RX_RING_SIZE; i++) {
1685 np->rx_ring[i].status = 0;
1686 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1687 skb = np->rx_skbuff[i];
1689 pci_unmap_single(np->pci_dev,
1690 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1691 PCI_DMA_FROMDEVICE);
1693 np->rx_skbuff[i] = NULL;
1696 for (i = 0; i < TX_RING_SIZE; i++) {
1697 skb = np->tx_skbuff[i];
1699 pci_unmap_single(np->pci_dev,
1700 np->tx_ring[i].frag[0].addr, skb->len,
1703 np->tx_skbuff[i] = NULL;
1710 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1712 struct net_device *dev = pci_get_drvdata(pdev);
1715 struct netdev_private *np = netdev_priv(dev);
1717 unregister_netdev(dev);
1718 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1720 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1722 pci_iounmap(pdev, np->base);
1723 pci_release_regions(pdev);
1725 pci_set_drvdata(pdev, NULL);
1729 static struct pci_driver sundance_driver = {
1731 .id_table = sundance_pci_tbl,
1732 .probe = sundance_probe1,
1733 .remove = __devexit_p(sundance_remove1),
1736 static int __init sundance_init(void)
1738 /* when a module, this is printed whether or not devices are found in probe */
1742 return pci_register_driver(&sundance_driver);
1745 static void __exit sundance_exit(void)
1747 pci_unregister_driver(&sundance_driver);
1750 module_init(sundance_init);
1751 module_exit(sundance_exit);