1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
71 * fix power up/down for ethtool that broke in 1.11
75 #define DRV_NAME "epic100"
76 #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77 #define DRV_RELDATE "June 2, 2004"
79 /* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
82 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
84 /* Used to pass the full-duplex flag, etc. */
85 #define MAX_UNITS 8 /* More are supported, limit only on options */
86 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 Setting to > 1518 effectively disables this feature. */
91 static int rx_copybreak;
93 /* Operational parameters that are set at compile time. */
95 /* Keep the ring sizes a power of two for operational efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100 #define TX_RING_SIZE 256
101 #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
102 #define RX_RING_SIZE 256
103 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
104 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
110 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
112 /* Bytes transferred to chip before transmission starts. */
113 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
114 #define TX_FIFO_THRESH 256
115 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/string.h>
120 #include <linux/timer.h>
121 #include <linux/errno.h>
122 #include <linux/ioport.h>
123 #include <linux/slab.h>
124 #include <linux/interrupt.h>
125 #include <linux/pci.h>
126 #include <linux/delay.h>
127 #include <linux/netdevice.h>
128 #include <linux/etherdevice.h>
129 #include <linux/skbuff.h>
130 #include <linux/init.h>
131 #include <linux/spinlock.h>
132 #include <linux/ethtool.h>
133 #include <linux/mii.h>
134 #include <linux/crc32.h>
135 #include <linux/bitops.h>
137 #include <asm/uaccess.h>
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
142 static char version2[] __devinitdata =
143 " http://www.scyld.com/network/epic100.html\n";
144 static char version3[] __devinitdata =
145 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
147 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
148 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
149 MODULE_LICENSE("GPL");
151 module_param(debug, int, 0);
152 module_param(rx_copybreak, int, 0);
153 module_param_array(options, int, NULL, 0);
154 module_param_array(full_duplex, int, NULL, 0);
155 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
156 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
157 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
158 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
163 I. Board Compatibility
165 This device driver is designed for the SMC "EPIC/100", the SMC
166 single-chip Ethernet controllers for PCI. This chip is used on
167 the SMC EtherPower II boards.
169 II. Board-specific settings
171 PCI bus devices are configured by the system at boot time, so no jumpers
172 need to be set on the board. The system BIOS will assign the
173 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
174 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
177 III. Driver operation
183 http://www.smsc.com/main/datasheets/83c171.pdf
184 http://www.smsc.com/main/datasheets/83c175.pdf
185 http://scyld.com/expert/NWay.html
186 http://www.national.com/pf/DP/DP83840A.html
193 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
195 #define EPIC_TOTAL_SIZE 0x100
205 struct epic_chip_info {
207 int io_size; /* Needed for I/O region check or ioremap(). */
208 int drv_flags; /* Driver use, intended as capability flags. */
212 /* indexed by chip_t */
213 static const struct epic_chip_info pci_id_tbl[] = {
214 { "SMSC EPIC/100 83c170",
215 EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
216 { "SMSC EPIC/100 83c170",
217 EPIC_TOTAL_SIZE, TYPE2_INTR },
218 { "SMSC EPIC/C 83c175",
219 EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
223 static struct pci_device_id epic_pci_tbl[] = {
224 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
225 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
226 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
227 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
230 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
248 /* Offsets to registers, using the (ugh) SMC names. */
249 enum epic_registers {
250 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
252 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
253 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
254 LAN0=64, /* MAC address. */
255 MC0=80, /* Multicast filter table. */
256 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
257 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
260 /* Interrupt register bits, using my own meaningful names. */
262 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
263 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
264 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
265 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
266 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
269 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
270 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
273 #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
275 #define EpicNapiEvent (TxEmpty | TxDone | \
276 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
277 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
279 static const u16 media2miictl[16] = {
280 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
281 0, 0, 0, 0, 0, 0, 0, 0 };
283 /* The EPIC100 Rx and Tx buffer descriptors. */
285 struct epic_tx_desc {
292 struct epic_rx_desc {
299 enum desc_status_bits {
303 #define PRIV_ALIGN 15 /* Required alignment mask */
304 struct epic_private {
305 struct epic_rx_desc *rx_ring;
306 struct epic_tx_desc *tx_ring;
307 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
308 struct sk_buff* tx_skbuff[TX_RING_SIZE];
309 /* The addresses of receive-in-place skbuffs. */
310 struct sk_buff* rx_skbuff[RX_RING_SIZE];
312 dma_addr_t tx_ring_dma;
313 dma_addr_t rx_ring_dma;
316 spinlock_t lock; /* Group with Tx control cache line. */
317 spinlock_t napi_lock;
318 unsigned int reschedule_in_poll;
319 unsigned int cur_tx, dirty_tx;
321 unsigned int cur_rx, dirty_rx;
323 unsigned int rx_buf_sz; /* Based on MTU+slack. */
325 struct pci_dev *pci_dev; /* PCI bus location. */
326 int chip_id, chip_flags;
328 struct net_device_stats stats;
329 struct timer_list timer; /* Media selection timer. */
331 unsigned char mc_filter[8];
332 signed char phys[4]; /* MII device addresses. */
333 u16 advertising; /* NWay media advertisement */
335 struct mii_if_info mii;
336 unsigned int tx_full:1; /* The Tx queue is full. */
337 unsigned int default_port:4; /* Last dev->if_port value. */
340 static int epic_open(struct net_device *dev);
341 static int read_eeprom(long ioaddr, int location);
342 static int mdio_read(struct net_device *dev, int phy_id, int location);
343 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
344 static void epic_restart(struct net_device *dev);
345 static void epic_timer(unsigned long data);
346 static void epic_tx_timeout(struct net_device *dev);
347 static void epic_init_ring(struct net_device *dev);
348 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
349 static int epic_rx(struct net_device *dev, int budget);
350 static int epic_poll(struct net_device *dev, int *budget);
351 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
352 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
353 static struct ethtool_ops netdev_ethtool_ops;
354 static int epic_close(struct net_device *dev);
355 static struct net_device_stats *epic_get_stats(struct net_device *dev);
356 static void set_rx_mode(struct net_device *dev);
360 static int __devinit epic_init_one (struct pci_dev *pdev,
361 const struct pci_device_id *ent)
363 static int card_idx = -1;
365 int chip_idx = (int) ent->driver_data;
367 struct net_device *dev;
368 struct epic_private *ep;
369 int i, ret, option = 0, duplex = 0;
373 /* when built into the kernel, we only print version if device is found */
375 static int printed_version;
376 if (!printed_version++)
377 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
378 version, version2, version3);
383 ret = pci_enable_device(pdev);
388 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
389 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
391 goto err_out_disable;
394 pci_set_master(pdev);
396 ret = pci_request_regions(pdev, DRV_NAME);
398 goto err_out_disable;
402 dev = alloc_etherdev(sizeof (*ep));
404 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
405 goto err_out_free_res;
407 SET_MODULE_OWNER(dev);
408 SET_NETDEV_DEV(dev, &pdev->dev);
411 ioaddr = pci_resource_start (pdev, 0);
413 ioaddr = pci_resource_start (pdev, 1);
414 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
416 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
417 goto err_out_free_netdev;
421 pci_set_drvdata(pdev, dev);
424 ep->mii.mdio_read = mdio_read;
425 ep->mii.mdio_write = mdio_write;
426 ep->mii.phy_id_mask = 0x1f;
427 ep->mii.reg_num_mask = 0x1f;
429 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
431 goto err_out_iounmap;
432 ep->tx_ring = (struct epic_tx_desc *)ring_space;
433 ep->tx_ring_dma = ring_dma;
435 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
437 goto err_out_unmap_tx;
438 ep->rx_ring = (struct epic_rx_desc *)ring_space;
439 ep->rx_ring_dma = ring_dma;
441 if (dev->mem_start) {
442 option = dev->mem_start;
443 duplex = (dev->mem_start & 16) ? 1 : 0;
444 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
445 if (options[card_idx] >= 0)
446 option = options[card_idx];
447 if (full_duplex[card_idx] >= 0)
448 duplex = full_duplex[card_idx];
451 dev->base_addr = ioaddr;
454 spin_lock_init(&ep->lock);
455 spin_lock_init(&ep->napi_lock);
456 ep->reschedule_in_poll = 0;
458 /* Bring the chip out of low-power mode. */
459 outl(0x4200, ioaddr + GENCTL);
460 /* Magic?! If we don't set this bit the MII interface won't work. */
461 /* This magic is documented in SMSC app note 7.15 */
462 for (i = 16; i > 0; i--)
463 outl(0x0008, ioaddr + TEST1);
465 /* Turn on the MII transceiver. */
466 outl(0x12, ioaddr + MIICfg);
468 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
469 outl(0x0200, ioaddr + GENCTL);
471 /* Note: the '175 does not have a serial EEPROM. */
472 for (i = 0; i < 3; i++)
473 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
476 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
478 for (i = 0; i < 64; i++)
479 printk(" %4.4x%s", read_eeprom(ioaddr, i),
480 i % 16 == 15 ? "\n" : "");
484 ep->chip_id = chip_idx;
485 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
487 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
488 | CntFull | TxUnderrun | EpicNapiEvent;
490 /* Find the connected MII xcvrs.
491 Doing this in open() would allow detecting external xcvrs later, but
492 takes much time and no cards have external MII. */
494 int phy, phy_idx = 0;
495 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
496 int mii_status = mdio_read(dev, phy, MII_BMSR);
497 if (mii_status != 0xffff && mii_status != 0x0000) {
498 ep->phys[phy_idx++] = phy;
499 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
500 "%4.4x status %4.4x.\n",
501 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
504 ep->mii_phy_cnt = phy_idx;
507 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
508 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
510 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
511 } else if ( ! (ep->chip_flags & NO_MII)) {
512 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
514 /* Use the known PHY address of the EPII. */
517 ep->mii.phy_id = ep->phys[0];
520 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
521 if (ep->chip_flags & MII_PWRDWN)
522 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
523 outl(0x0008, ioaddr + GENCTL);
525 /* The lower four bits are the media type. */
527 ep->mii.force_media = ep->mii.full_duplex = 1;
528 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
531 dev->if_port = ep->default_port = option;
533 /* The Epic-specific entries in the device structure. */
534 dev->open = &epic_open;
535 dev->hard_start_xmit = &epic_start_xmit;
536 dev->stop = &epic_close;
537 dev->get_stats = &epic_get_stats;
538 dev->set_multicast_list = &set_rx_mode;
539 dev->do_ioctl = &netdev_ioctl;
540 dev->ethtool_ops = &netdev_ethtool_ops;
541 dev->watchdog_timeo = TX_TIMEOUT;
542 dev->tx_timeout = &epic_tx_timeout;
543 dev->poll = epic_poll;
546 ret = register_netdev(dev);
548 goto err_out_unmap_rx;
550 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
551 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
552 for (i = 0; i < 5; i++)
553 printk("%2.2x:", dev->dev_addr[i]);
554 printk("%2.2x.\n", dev->dev_addr[i]);
560 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
562 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
570 pci_release_regions(pdev);
572 pci_disable_device(pdev);
576 /* Serial EEPROM section. */
578 /* EEPROM_Ctrl bits. */
579 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
580 #define EE_CS 0x02 /* EEPROM chip select. */
581 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
582 #define EE_WRITE_0 0x01
583 #define EE_WRITE_1 0x09
584 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
585 #define EE_ENB (0x0001 | EE_CS)
587 /* Delay between EEPROM clock transitions.
588 This serves to flush the operation to the PCI bus.
591 #define eeprom_delay() inl(ee_addr)
593 /* The EEPROM commands include the alway-set leading bit. */
594 #define EE_WRITE_CMD (5 << 6)
595 #define EE_READ64_CMD (6 << 6)
596 #define EE_READ256_CMD (6 << 8)
597 #define EE_ERASE_CMD (7 << 6)
599 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
601 long ioaddr = dev->base_addr;
603 outl(0x00000000, ioaddr + INTMASK);
606 static inline void __epic_pci_commit(long ioaddr)
609 inl(ioaddr + INTMASK);
613 static inline void epic_napi_irq_off(struct net_device *dev,
614 struct epic_private *ep)
616 long ioaddr = dev->base_addr;
618 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
619 __epic_pci_commit(ioaddr);
622 static inline void epic_napi_irq_on(struct net_device *dev,
623 struct epic_private *ep)
625 long ioaddr = dev->base_addr;
627 /* No need to commit possible posted write */
628 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
631 static int __devinit read_eeprom(long ioaddr, int location)
635 long ee_addr = ioaddr + EECTL;
636 int read_cmd = location |
637 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
639 outl(EE_ENB & ~EE_CS, ee_addr);
640 outl(EE_ENB, ee_addr);
642 /* Shift the read command bits out. */
643 for (i = 12; i >= 0; i--) {
644 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
645 outl(EE_ENB | dataval, ee_addr);
647 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
650 outl(EE_ENB, ee_addr);
652 for (i = 16; i > 0; i--) {
653 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
655 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
656 outl(EE_ENB, ee_addr);
660 /* Terminate the EEPROM access. */
661 outl(EE_ENB & ~EE_CS, ee_addr);
666 #define MII_WRITEOP 2
667 static int mdio_read(struct net_device *dev, int phy_id, int location)
669 long ioaddr = dev->base_addr;
670 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
673 outl(read_cmd, ioaddr + MIICtrl);
674 /* Typical operation takes 25 loops. */
675 for (i = 400; i > 0; i--) {
677 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
678 /* Work around read failure bug. */
679 if (phy_id == 1 && location < 6
680 && inw(ioaddr + MIIData) == 0xffff) {
681 outl(read_cmd, ioaddr + MIICtrl);
684 return inw(ioaddr + MIIData);
690 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
692 long ioaddr = dev->base_addr;
695 outw(value, ioaddr + MIIData);
696 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
697 for (i = 10000; i > 0; i--) {
699 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
706 static int epic_open(struct net_device *dev)
708 struct epic_private *ep = dev->priv;
709 long ioaddr = dev->base_addr;
713 /* Soft reset the chip. */
714 outl(0x4001, ioaddr + GENCTL);
716 if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev)))
721 outl(0x4000, ioaddr + GENCTL);
722 /* This magic is documented in SMSC app note 7.15 */
723 for (i = 16; i > 0; i--)
724 outl(0x0008, ioaddr + TEST1);
726 /* Pull the chip out of low-power mode, enable interrupts, and set for
727 PCI read multiple. The MIIcfg setting and strange write order are
728 required by the details of which bits are reset and the transceiver
729 wiring on the Ositech CardBus card.
732 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
734 if (ep->chip_flags & MII_PWRDWN)
735 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
737 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
738 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
739 inl(ioaddr + GENCTL);
740 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
742 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
743 inl(ioaddr + GENCTL);
744 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
747 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
749 for (i = 0; i < 3; i++)
750 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
752 ep->tx_threshold = TX_FIFO_THRESH;
753 outl(ep->tx_threshold, ioaddr + TxThresh);
755 if (media2miictl[dev->if_port & 15]) {
757 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
758 if (dev->if_port == 1) {
760 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
762 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
765 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
766 if (mii_lpa != 0xffff) {
767 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
768 ep->mii.full_duplex = 1;
769 else if (! (mii_lpa & LPA_LPACK))
770 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
772 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
773 " register read of %4.4x.\n", dev->name,
774 ep->mii.full_duplex ? "full" : "half",
775 ep->phys[0], mii_lpa);
779 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
780 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
781 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
783 /* Start the chip's Rx process. */
785 outl(StartRx | RxQueued, ioaddr + COMMAND);
787 netif_start_queue(dev);
789 /* Enable interrupts by setting the interrupt mask. */
790 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
791 | CntFull | TxUnderrun
792 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
795 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
797 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
798 ep->mii.full_duplex ? "full" : "half");
800 /* Set the timer to switch to check for link beat and perhaps switch
801 to an alternate media type. */
802 init_timer(&ep->timer);
803 ep->timer.expires = jiffies + 3*HZ;
804 ep->timer.data = (unsigned long)dev;
805 ep->timer.function = &epic_timer; /* timer handler */
806 add_timer(&ep->timer);
811 /* Reset the chip to recover from a PCI transaction error.
812 This may occur at interrupt time. */
813 static void epic_pause(struct net_device *dev)
815 long ioaddr = dev->base_addr;
816 struct epic_private *ep = dev->priv;
818 netif_stop_queue (dev);
820 /* Disable interrupts by clearing the interrupt mask. */
821 outl(0x00000000, ioaddr + INTMASK);
822 /* Stop the chip's Tx and Rx DMA processes. */
823 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
825 /* Update the error counts. */
826 if (inw(ioaddr + COMMAND) != 0xffff) {
827 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
828 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
829 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
832 /* Remove the packets on the Rx queue. */
833 epic_rx(dev, RX_RING_SIZE);
836 static void epic_restart(struct net_device *dev)
838 long ioaddr = dev->base_addr;
839 struct epic_private *ep = dev->priv;
842 /* Soft reset the chip. */
843 outl(0x4001, ioaddr + GENCTL);
845 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
846 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
849 /* This magic is documented in SMSC app note 7.15 */
850 for (i = 16; i > 0; i--)
851 outl(0x0008, ioaddr + TEST1);
853 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
854 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
856 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
858 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
859 if (ep->chip_flags & MII_PWRDWN)
860 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
862 for (i = 0; i < 3; i++)
863 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
865 ep->tx_threshold = TX_FIFO_THRESH;
866 outl(ep->tx_threshold, ioaddr + TxThresh);
867 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
868 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
869 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
870 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
871 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
873 /* Start the chip's Rx process. */
875 outl(StartRx | RxQueued, ioaddr + COMMAND);
877 /* Enable interrupts by setting the interrupt mask. */
878 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
879 | CntFull | TxUnderrun
880 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
882 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
883 " interrupt %4.4x.\n",
884 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
885 (int)inl(ioaddr + INTSTAT));
889 static void check_media(struct net_device *dev)
891 struct epic_private *ep = dev->priv;
892 long ioaddr = dev->base_addr;
893 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
894 int negotiated = mii_lpa & ep->mii.advertising;
895 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
897 if (ep->mii.force_media)
899 if (mii_lpa == 0xffff) /* Bogus read */
901 if (ep->mii.full_duplex != duplex) {
902 ep->mii.full_duplex = duplex;
903 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
904 " partner capability of %4.4x.\n", dev->name,
905 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
906 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
910 static void epic_timer(unsigned long data)
912 struct net_device *dev = (struct net_device *)data;
913 struct epic_private *ep = dev->priv;
914 long ioaddr = dev->base_addr;
915 int next_tick = 5*HZ;
918 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
919 dev->name, (int)inl(ioaddr + TxSTAT));
920 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
921 "IntStatus %4.4x RxStatus %4.4x.\n",
922 dev->name, (int)inl(ioaddr + INTMASK),
923 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
928 ep->timer.expires = jiffies + next_tick;
929 add_timer(&ep->timer);
932 static void epic_tx_timeout(struct net_device *dev)
934 struct epic_private *ep = dev->priv;
935 long ioaddr = dev->base_addr;
938 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
939 "Tx status %4.4x.\n",
940 dev->name, (int)inw(ioaddr + TxSTAT));
942 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
943 dev->name, ep->dirty_tx, ep->cur_tx);
946 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
947 ep->stats.tx_fifo_errors++;
948 outl(RestartTx, ioaddr + COMMAND);
951 outl(TxQueued, dev->base_addr + COMMAND);
954 dev->trans_start = jiffies;
955 ep->stats.tx_errors++;
957 netif_wake_queue(dev);
960 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961 static void epic_init_ring(struct net_device *dev)
963 struct epic_private *ep = dev->priv;
967 ep->dirty_tx = ep->cur_tx = 0;
968 ep->cur_rx = ep->dirty_rx = 0;
969 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
971 /* Initialize all Rx descriptors. */
972 for (i = 0; i < RX_RING_SIZE; i++) {
973 ep->rx_ring[i].rxstatus = 0;
974 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
975 ep->rx_ring[i].next = ep->rx_ring_dma +
976 (i+1)*sizeof(struct epic_rx_desc);
977 ep->rx_skbuff[i] = NULL;
979 /* Mark the last entry as wrapping the ring. */
980 ep->rx_ring[i-1].next = ep->rx_ring_dma;
982 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
983 for (i = 0; i < RX_RING_SIZE; i++) {
984 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
985 ep->rx_skbuff[i] = skb;
988 skb->dev = dev; /* Mark as being used by this device. */
989 skb_reserve(skb, 2); /* 16 byte align the IP header. */
990 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
991 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
992 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
994 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
996 /* The Tx buffer descriptor is filled in as needed, but we
997 do need to clear the ownership bit. */
998 for (i = 0; i < TX_RING_SIZE; i++) {
999 ep->tx_skbuff[i] = NULL;
1000 ep->tx_ring[i].txstatus = 0x0000;
1001 ep->tx_ring[i].next = ep->tx_ring_dma +
1002 (i+1)*sizeof(struct epic_tx_desc);
1004 ep->tx_ring[i-1].next = ep->tx_ring_dma;
1008 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1010 struct epic_private *ep = dev->priv;
1011 int entry, free_count;
1013 unsigned long flags;
1015 if (skb_padto(skb, ETH_ZLEN))
1018 /* Caution: the write order is important here, set the field with the
1019 "ownership" bit last. */
1021 /* Calculate the next Tx descriptor entry. */
1022 spin_lock_irqsave(&ep->lock, flags);
1023 free_count = ep->cur_tx - ep->dirty_tx;
1024 entry = ep->cur_tx % TX_RING_SIZE;
1026 ep->tx_skbuff[entry] = skb;
1027 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1028 skb->len, PCI_DMA_TODEVICE);
1029 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1030 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1031 } else if (free_count == TX_QUEUE_LEN/2) {
1032 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1033 } else if (free_count < TX_QUEUE_LEN - 1) {
1034 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1036 /* Leave room for an additional entry. */
1037 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1040 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1041 ep->tx_ring[entry].txstatus =
1042 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1043 | cpu_to_le32(DescOwn);
1047 netif_stop_queue(dev);
1049 spin_unlock_irqrestore(&ep->lock, flags);
1050 /* Trigger an immediate transmit demand. */
1051 outl(TxQueued, dev->base_addr + COMMAND);
1053 dev->trans_start = jiffies;
1055 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1056 "flag %2.2x Tx status %8.8x.\n",
1057 dev->name, (int)skb->len, entry, ctrl_word,
1058 (int)inl(dev->base_addr + TxSTAT));
1063 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1066 struct net_device_stats *stats = &ep->stats;
1068 #ifndef final_version
1069 /* There was an major error, log it. */
1071 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1075 if (status & 0x1050)
1076 stats->tx_aborted_errors++;
1077 if (status & 0x0008)
1078 stats->tx_carrier_errors++;
1079 if (status & 0x0040)
1080 stats->tx_window_errors++;
1081 if (status & 0x0010)
1082 stats->tx_fifo_errors++;
1085 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1087 unsigned int dirty_tx, cur_tx;
1090 * Note: if this lock becomes a problem we can narrow the locked
1091 * region at the cost of occasionally grabbing the lock more times.
1093 cur_tx = ep->cur_tx;
1094 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1095 struct sk_buff *skb;
1096 int entry = dirty_tx % TX_RING_SIZE;
1097 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1099 if (txstatus & DescOwn)
1100 break; /* It still hasn't been Txed */
1102 if (likely(txstatus & 0x0001)) {
1103 ep->stats.collisions += (txstatus >> 8) & 15;
1104 ep->stats.tx_packets++;
1105 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1107 epic_tx_error(dev, ep, txstatus);
1109 /* Free the original skb. */
1110 skb = ep->tx_skbuff[entry];
1111 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1112 skb->len, PCI_DMA_TODEVICE);
1113 dev_kfree_skb_irq(skb);
1114 ep->tx_skbuff[entry] = NULL;
1117 #ifndef final_version
1118 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1120 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1121 dev->name, dirty_tx, cur_tx, ep->tx_full);
1122 dirty_tx += TX_RING_SIZE;
1125 ep->dirty_tx = dirty_tx;
1126 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1127 /* The ring is no longer full, allow new TX entries. */
1129 netif_wake_queue(dev);
1133 /* The interrupt handler does all of the Rx thread work and cleans up
1134 after the Tx thread. */
1135 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1137 struct net_device *dev = dev_instance;
1138 struct epic_private *ep = dev->priv;
1139 long ioaddr = dev->base_addr;
1140 unsigned int handled = 0;
1143 status = inl(ioaddr + INTSTAT);
1144 /* Acknowledge all of the current interrupt sources ASAP. */
1145 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1148 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1149 "intstat=%#8.8x.\n", dev->name, status,
1150 (int)inl(ioaddr + INTSTAT));
1153 if ((status & IntrSummary) == 0)
1158 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1159 spin_lock(&ep->napi_lock);
1160 if (netif_rx_schedule_prep(dev)) {
1161 epic_napi_irq_off(dev, ep);
1162 __netif_rx_schedule(dev);
1164 ep->reschedule_in_poll++;
1165 spin_unlock(&ep->napi_lock);
1167 status &= ~EpicNapiEvent;
1169 /* Check uncommon events all at once. */
1170 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1171 if (status == EpicRemoved)
1174 /* Always update the error counts to avoid overhead later. */
1175 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1176 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1177 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1179 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1180 ep->stats.tx_fifo_errors++;
1181 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1182 /* Restart the transmit process. */
1183 outl(RestartTx, ioaddr + COMMAND);
1185 if (status & PCIBusErr170) {
1186 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1191 /* Clear all error sources. */
1192 outl(status & 0x7f18, ioaddr + INTSTAT);
1197 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1201 return IRQ_RETVAL(handled);
1204 static int epic_rx(struct net_device *dev, int budget)
1206 struct epic_private *ep = dev->priv;
1207 int entry = ep->cur_rx % RX_RING_SIZE;
1208 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1212 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1213 ep->rx_ring[entry].rxstatus);
1215 if (rx_work_limit > budget)
1216 rx_work_limit = budget;
1218 /* If we own the next entry, it's a new packet. Send it up. */
1219 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1220 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1223 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1224 if (--rx_work_limit < 0)
1226 if (status & 0x2006) {
1228 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1230 if (status & 0x2000) {
1231 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1232 "multiple buffers, status %4.4x!\n", dev->name, status);
1233 ep->stats.rx_length_errors++;
1234 } else if (status & 0x0006)
1235 /* Rx Frame errors are counted in hardware. */
1236 ep->stats.rx_errors++;
1238 /* Malloc up new buffer, compatible with net-2e. */
1239 /* Omit the four octet CRC from the length. */
1240 short pkt_len = (status >> 16) - 4;
1241 struct sk_buff *skb;
1243 if (pkt_len > PKT_BUF_SZ - 4) {
1244 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1246 dev->name, status, pkt_len);
1249 /* Check if the packet is long enough to accept without copying
1250 to a minimally-sized skbuff. */
1251 if (pkt_len < rx_copybreak
1252 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1254 skb_reserve(skb, 2); /* 16 byte align the IP header */
1255 pci_dma_sync_single_for_cpu(ep->pci_dev,
1256 ep->rx_ring[entry].bufaddr,
1258 PCI_DMA_FROMDEVICE);
1259 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1260 skb_put(skb, pkt_len);
1261 pci_dma_sync_single_for_device(ep->pci_dev,
1262 ep->rx_ring[entry].bufaddr,
1264 PCI_DMA_FROMDEVICE);
1266 pci_unmap_single(ep->pci_dev,
1267 ep->rx_ring[entry].bufaddr,
1268 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1269 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1270 ep->rx_skbuff[entry] = NULL;
1272 skb->protocol = eth_type_trans(skb, dev);
1273 netif_receive_skb(skb);
1274 dev->last_rx = jiffies;
1275 ep->stats.rx_packets++;
1276 ep->stats.rx_bytes += pkt_len;
1279 entry = (++ep->cur_rx) % RX_RING_SIZE;
1282 /* Refill the Rx ring buffers. */
1283 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1284 entry = ep->dirty_rx % RX_RING_SIZE;
1285 if (ep->rx_skbuff[entry] == NULL) {
1286 struct sk_buff *skb;
1287 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1290 skb->dev = dev; /* Mark as being used by this device. */
1291 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1292 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1293 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1296 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1301 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1303 long ioaddr = dev->base_addr;
1306 status = inl(ioaddr + INTSTAT);
1308 if (status == EpicRemoved)
1310 if (status & RxOverflow) /* Missed a Rx frame. */
1311 ep->stats.rx_errors++;
1312 if (status & (RxOverflow | RxFull))
1313 outw(RxQueued, ioaddr + COMMAND);
1316 static int epic_poll(struct net_device *dev, int *budget)
1318 struct epic_private *ep = dev->priv;
1319 int work_done = 0, orig_budget;
1320 long ioaddr = dev->base_addr;
1322 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1328 work_done += epic_rx(dev, *budget);
1330 epic_rx_err(dev, ep);
1332 *budget -= work_done;
1333 dev->quota -= work_done;
1335 if (netif_running(dev) && (work_done < orig_budget)) {
1336 unsigned long flags;
1339 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1341 spin_lock_irqsave(&ep->napi_lock, flags);
1343 more = ep->reschedule_in_poll;
1345 __netif_rx_complete(dev);
1346 outl(EpicNapiEvent, ioaddr + INTSTAT);
1347 epic_napi_irq_on(dev, ep);
1349 ep->reschedule_in_poll--;
1351 spin_unlock_irqrestore(&ep->napi_lock, flags);
1357 return (work_done >= orig_budget);
1360 static int epic_close(struct net_device *dev)
1362 long ioaddr = dev->base_addr;
1363 struct epic_private *ep = dev->priv;
1364 struct sk_buff *skb;
1367 netif_stop_queue(dev);
1370 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1371 dev->name, (int)inl(ioaddr + INTSTAT));
1373 del_timer_sync(&ep->timer);
1375 epic_disable_int(dev, ep);
1377 free_irq(dev->irq, dev);
1381 /* Free all the skbuffs in the Rx queue. */
1382 for (i = 0; i < RX_RING_SIZE; i++) {
1383 skb = ep->rx_skbuff[i];
1384 ep->rx_skbuff[i] = NULL;
1385 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1386 ep->rx_ring[i].buflength = 0;
1388 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1389 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1392 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1394 for (i = 0; i < TX_RING_SIZE; i++) {
1395 skb = ep->tx_skbuff[i];
1396 ep->tx_skbuff[i] = NULL;
1399 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1400 skb->len, PCI_DMA_TODEVICE);
1404 /* Green! Leave the chip in low-power mode. */
1405 outl(0x0008, ioaddr + GENCTL);
1410 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1412 struct epic_private *ep = dev->priv;
1413 long ioaddr = dev->base_addr;
1415 if (netif_running(dev)) {
1416 /* Update the error counts. */
1417 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1418 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1419 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1425 /* Set or clear the multicast filter for this adaptor.
1426 Note that we only use exclusion around actually queueing the
1427 new frame, not around filling ep->setup_frame. This is non-deterministic
1428 when re-entered but still correct. */
1430 static void set_rx_mode(struct net_device *dev)
1432 long ioaddr = dev->base_addr;
1433 struct epic_private *ep = dev->priv;
1434 unsigned char mc_filter[8]; /* Multicast hash filter */
1437 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1438 outl(0x002C, ioaddr + RxCtrl);
1439 /* Unconditionally log net taps. */
1440 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1441 memset(mc_filter, 0xff, sizeof(mc_filter));
1442 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1443 /* There is apparently a chip bug, so the multicast filter
1444 is never enabled. */
1445 /* Too many to filter perfectly -- accept all multicasts. */
1446 memset(mc_filter, 0xff, sizeof(mc_filter));
1447 outl(0x000C, ioaddr + RxCtrl);
1448 } else if (dev->mc_count == 0) {
1449 outl(0x0004, ioaddr + RxCtrl);
1451 } else { /* Never executed, for now. */
1452 struct dev_mc_list *mclist;
1454 memset(mc_filter, 0, sizeof(mc_filter));
1455 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1456 i++, mclist = mclist->next) {
1457 unsigned int bit_nr =
1458 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1459 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1462 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1463 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1464 for (i = 0; i < 4; i++)
1465 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1466 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1471 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1473 struct epic_private *np = dev->priv;
1475 strcpy (info->driver, DRV_NAME);
1476 strcpy (info->version, DRV_VERSION);
1477 strcpy (info->bus_info, pci_name(np->pci_dev));
1480 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1482 struct epic_private *np = dev->priv;
1485 spin_lock_irq(&np->lock);
1486 rc = mii_ethtool_gset(&np->mii, cmd);
1487 spin_unlock_irq(&np->lock);
1492 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1494 struct epic_private *np = dev->priv;
1497 spin_lock_irq(&np->lock);
1498 rc = mii_ethtool_sset(&np->mii, cmd);
1499 spin_unlock_irq(&np->lock);
1504 static int netdev_nway_reset(struct net_device *dev)
1506 struct epic_private *np = dev->priv;
1507 return mii_nway_restart(&np->mii);
1510 static u32 netdev_get_link(struct net_device *dev)
1512 struct epic_private *np = dev->priv;
1513 return mii_link_ok(&np->mii);
1516 static u32 netdev_get_msglevel(struct net_device *dev)
1521 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1526 static int ethtool_begin(struct net_device *dev)
1528 unsigned long ioaddr = dev->base_addr;
1529 /* power-up, if interface is down */
1530 if (! netif_running(dev)) {
1531 outl(0x0200, ioaddr + GENCTL);
1532 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1537 static void ethtool_complete(struct net_device *dev)
1539 unsigned long ioaddr = dev->base_addr;
1540 /* power-down, if interface is down */
1541 if (! netif_running(dev)) {
1542 outl(0x0008, ioaddr + GENCTL);
1543 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1547 static struct ethtool_ops netdev_ethtool_ops = {
1548 .get_drvinfo = netdev_get_drvinfo,
1549 .get_settings = netdev_get_settings,
1550 .set_settings = netdev_set_settings,
1551 .nway_reset = netdev_nway_reset,
1552 .get_link = netdev_get_link,
1553 .get_msglevel = netdev_get_msglevel,
1554 .set_msglevel = netdev_set_msglevel,
1555 .get_sg = ethtool_op_get_sg,
1556 .get_tx_csum = ethtool_op_get_tx_csum,
1557 .begin = ethtool_begin,
1558 .complete = ethtool_complete
1561 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1563 struct epic_private *np = dev->priv;
1564 long ioaddr = dev->base_addr;
1565 struct mii_ioctl_data *data = if_mii(rq);
1568 /* power-up, if interface is down */
1569 if (! netif_running(dev)) {
1570 outl(0x0200, ioaddr + GENCTL);
1571 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1574 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1575 spin_lock_irq(&np->lock);
1576 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1577 spin_unlock_irq(&np->lock);
1579 /* power-down, if interface is down */
1580 if (! netif_running(dev)) {
1581 outl(0x0008, ioaddr + GENCTL);
1582 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1588 static void __devexit epic_remove_one (struct pci_dev *pdev)
1590 struct net_device *dev = pci_get_drvdata(pdev);
1591 struct epic_private *ep = dev->priv;
1593 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1594 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1595 unregister_netdev(dev);
1597 iounmap((void*) dev->base_addr);
1599 pci_release_regions(pdev);
1601 pci_disable_device(pdev);
1602 pci_set_drvdata(pdev, NULL);
1603 /* pci_power_off(pdev, -1); */
1609 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1611 struct net_device *dev = pci_get_drvdata(pdev);
1612 long ioaddr = dev->base_addr;
1614 if (!netif_running(dev))
1617 /* Put the chip into low-power mode. */
1618 outl(0x0008, ioaddr + GENCTL);
1619 /* pci_power_off(pdev, -1); */
1624 static int epic_resume (struct pci_dev *pdev)
1626 struct net_device *dev = pci_get_drvdata(pdev);
1628 if (!netif_running(dev))
1631 /* pci_power_on(pdev); */
1635 #endif /* CONFIG_PM */
1638 static struct pci_driver epic_driver = {
1640 .id_table = epic_pci_tbl,
1641 .probe = epic_init_one,
1642 .remove = __devexit_p(epic_remove_one),
1644 .suspend = epic_suspend,
1645 .resume = epic_resume,
1646 #endif /* CONFIG_PM */
1650 static int __init epic_init (void)
1652 /* when a module, this is printed whether or not devices are found in probe */
1654 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1655 version, version2, version3);
1658 return pci_module_init (&epic_driver);
1662 static void __exit epic_cleanup (void)
1664 pci_unregister_driver (&epic_driver);
1668 module_init(epic_init);
1669 module_exit(epic_cleanup);