2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #ifdef CONFIG_SIS190_NAPI
47 #define NAPI_SUFFIX "-NAPI"
49 #define NAPI_SUFFIX ""
52 #define DRV_VERSION "1.2" NAPI_SUFFIX
53 #define DRV_NAME "sis190"
54 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
55 #define PFX DRV_NAME ": "
57 #ifdef CONFIG_SIS190_NAPI
58 #define sis190_rx_skb netif_receive_skb
59 #define sis190_rx_quota(count, quota) min(count, quota)
61 #define sis190_rx_skb netif_rx
62 #define sis190_rx_quota(count, quota) count
65 #define MAC_ADDR_LEN 6
67 #define NUM_TX_DESC 64 /* [8..1024] */
68 #define NUM_RX_DESC 64 /* [8..8192] */
69 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
70 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
71 #define RX_BUF_SIZE 1536
72 #define RX_BUF_MASK 0xfff8
74 #define SIS190_REGS_SIZE 0x80
75 #define SIS190_TX_TIMEOUT (6*HZ)
76 #define SIS190_PHY_TIMEOUT (10*HZ)
77 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
81 /* Enhanced PHY access register bit definitions */
82 #define EhnMIIread 0x0000
83 #define EhnMIIwrite 0x0020
84 #define EhnMIIdataShift 16
85 #define EhnMIIpmdShift 6 /* 7016 only */
86 #define EhnMIIregShift 11
87 #define EhnMIIreq 0x0010
88 #define EhnMIInotDone 0x0010
90 /* Write/read MMIO register */
91 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
92 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
93 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
94 #define SIS_R8(reg) readb (ioaddr + (reg))
95 #define SIS_R16(reg) readw (ioaddr + (reg))
96 #define SIS_R32(reg) readl (ioaddr + (reg))
98 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
100 enum sis190_registers {
102 TxDescStartAddr = 0x04,
103 rsv0 = 0x08, // reserved
104 TxSts = 0x0c, // unused (Control/Status)
106 RxDescStartAddr = 0x14,
107 rsv1 = 0x18, // reserved
108 RxSts = 0x1c, // unused
112 IntrTimer = 0x2c, // unused (Interupt Timer)
113 PMControl = 0x30, // unused (Power Mgmt Control/Status)
114 rsv2 = 0x34, // reserved
117 StationControl = 0x40,
119 GIoCR = 0x48, // unused (GMAC IO Compensation)
120 GIoCtrl = 0x4c, // unused (GMAC IO Control)
122 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
123 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
124 rsv3 = 0x5c, // reserved
128 // Undocumented = 0x6c,
130 RxWolData = 0x74, // unused (Rx WOL Data Access)
131 RxMPSControl = 0x78, // unused (Rx MPS Control)
132 rsv4 = 0x7c, // reserved
135 enum sis190_register_content {
137 SoftInt = 0x40000000, // unused
138 Timeup = 0x20000000, // unused
139 PauseFrame = 0x00080000, // unused
140 MagicPacket = 0x00040000, // unused
141 WakeupFrame = 0x00020000, // unused
142 LinkChange = 0x00010000,
143 RxQEmpty = 0x00000080,
145 TxQ1Empty = 0x00000020, // unused
146 TxQ1Int = 0x00000010,
147 TxQ0Empty = 0x00000008, // unused
148 TxQ0Int = 0x00000004,
154 CmdRxEnb = 0x08, // unused
156 RxBufEmpty = 0x01, // unused
159 Cfg9346_Lock = 0x00, // unused
160 Cfg9346_Unlock = 0xc0, // unused
163 AcceptErr = 0x20, // unused
164 AcceptRunt = 0x10, // unused
165 AcceptBroadcast = 0x0800,
166 AcceptMulticast = 0x0400,
167 AcceptMyPhys = 0x0200,
168 AcceptAllPhys = 0x0100,
172 RxCfgDMAShift = 8, // 0x1a in RxControl ?
175 TxInterFrameGapShift = 24,
176 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
186 LinkStatus = 0x02, // unused
187 FullDup = 0x01, // unused
190 TBILinkOK = 0x02000000, // unused
207 enum _DescStatusBit {
209 OWNbit = 0x80000000, // RXOWN/TXOWN
210 INTbit = 0x40000000, // RXINT/TXINT
211 CRCbit = 0x00020000, // CRCOFF/CRCEN
212 PADbit = 0x00010000, // PREADD/PADEN
214 RingEnd = 0x80000000,
216 LSEN = 0x08000000, // TSO ? -- FR
243 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
252 RxSizeMask = 0x0000ffff
254 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
255 * provide two (unused with Linux) Tx queues. No publically
256 * available documentation alas.
260 enum sis190_eeprom_access_register_bits {
261 EECS = 0x00000001, // unused
262 EECLK = 0x00000002, // unused
263 EEDO = 0x00000008, // unused
264 EEDI = 0x00000004, // unused
267 EEWOP = 0x00000100 // unused
270 /* EEPROM Addresses */
271 enum sis190_eeprom_address {
272 EEPROMSignature = 0x00,
273 EEPROMCLK = 0x01, // unused
278 struct sis190_private {
279 void __iomem *mmio_addr;
280 struct pci_dev *pci_dev;
281 struct net_device_stats stats;
290 struct RxDesc *RxDescRing;
291 struct TxDesc *TxDescRing;
292 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
293 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
294 struct work_struct phy_task;
295 struct timer_list timer;
297 struct mii_if_info mii_if;
300 const static struct {
302 u8 version; /* depend on docs */
303 u32 RxConfigMask; /* clear the bits supported by this chip */
304 } sis_chip_info[] = {
305 { DRV_NAME, 0x00, 0xff7e1880, },
308 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
309 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
313 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
315 static int rx_copybreak = 200;
321 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
322 module_param(rx_copybreak, int, 0);
323 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
324 module_param_named(debug, debug.msg_enable, int, 0);
325 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
326 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
327 MODULE_VERSION(DRV_VERSION);
328 MODULE_LICENSE("GPL");
330 static const u32 sis190_intr_mask =
331 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
334 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
335 * The chips use a 64 element hash table based on the Ethernet CRC.
337 static int multicast_filter_limit = 32;
339 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
343 SIS_W32(GMIIControl, ctl);
347 for (i = 0; i < 100; i++) {
348 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
354 printk(KERN_ERR PFX "PHY command failed !\n");
357 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
359 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
360 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
361 (((u32) val) << EhnMIIdataShift));
364 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
366 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
367 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
369 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
372 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
374 struct sis190_private *tp = netdev_priv(dev);
376 mdio_write(tp->mmio_addr, phy_id, reg, val);
379 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
381 struct sis190_private *tp = netdev_priv(dev);
383 return mdio_read(tp->mmio_addr, phy_id, reg);
386 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
391 if (!(SIS_R32(ROMControl) & 0x0002))
394 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
396 for (i = 0; i < 200; i++) {
397 if (!(SIS_R32(ROMInterface) & EEREQ)) {
398 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
407 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
409 SIS_W32(IntrMask, 0x00);
410 SIS_W32(IntrStatus, 0xffffffff);
414 static void sis190_asic_down(void __iomem *ioaddr)
416 /* Stop the chip's Tx and Rx DMA processes. */
418 SIS_W32(TxControl, 0x1a00);
419 SIS_W32(RxControl, 0x1a00);
421 sis190_irq_mask_and_ack(ioaddr);
424 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
426 desc->size |= cpu_to_le32(RingEnd);
429 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
431 u32 eor = le32_to_cpu(desc->size) & RingEnd;
434 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
436 desc->status = cpu_to_le32(OWNbit | INTbit);
439 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
442 desc->addr = cpu_to_le32(mapping);
443 sis190_give_to_asic(desc, rx_buf_sz);
446 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
449 desc->addr = 0xdeadbeef;
450 desc->size &= cpu_to_le32(RingEnd);
455 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
456 struct RxDesc *desc, u32 rx_buf_sz)
462 skb = dev_alloc_skb(rx_buf_sz);
468 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
471 sis190_map_to_asic(desc, mapping, rx_buf_sz);
477 sis190_make_unusable_by_asic(desc);
481 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
486 for (cur = start; cur < end; cur++) {
487 int ret, i = cur % NUM_RX_DESC;
489 if (tp->Rx_skbuff[i])
492 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
493 tp->RxDescRing + i, tp->rx_buf_sz);
500 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
501 struct RxDesc *desc, int rx_buf_sz)
505 if (pkt_size < rx_copybreak) {
508 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
510 skb_reserve(skb, NET_IP_ALIGN);
511 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
513 sis190_give_to_asic(desc, rx_buf_sz);
520 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
522 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
524 if ((status & CRCOK) && !(status & ErrMask))
527 if (!(status & CRCOK))
528 stats->rx_crc_errors++;
529 else if (status & OVRUN)
530 stats->rx_over_errors++;
531 else if (status & (SHORT | LIMIT))
532 stats->rx_length_errors++;
533 else if (status & (MIIER | NIBON | COLON))
534 stats->rx_frame_errors++;
540 static int sis190_rx_interrupt(struct net_device *dev,
541 struct sis190_private *tp, void __iomem *ioaddr)
543 struct net_device_stats *stats = &tp->stats;
544 u32 rx_left, cur_rx = tp->cur_rx;
547 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
548 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
550 for (; rx_left > 0; rx_left--, cur_rx++) {
551 unsigned int entry = cur_rx % NUM_RX_DESC;
552 struct RxDesc *desc = tp->RxDescRing + entry;
555 if (desc->status & OWNbit)
558 status = le32_to_cpu(desc->PSize);
560 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
563 if (sis190_rx_pkt_err(status, stats) < 0)
564 sis190_give_to_asic(desc, tp->rx_buf_sz);
566 struct sk_buff *skb = tp->Rx_skbuff[entry];
567 int pkt_size = (status & RxSizeMask) - 4;
568 void (*pci_action)(struct pci_dev *, dma_addr_t,
569 size_t, int) = pci_dma_sync_single_for_device;
571 if (unlikely(pkt_size > tp->rx_buf_sz)) {
572 net_intr(tp, KERN_INFO
573 "%s: (frag) status = %08x.\n",
576 stats->rx_length_errors++;
577 sis190_give_to_asic(desc, tp->rx_buf_sz);
581 pci_dma_sync_single_for_cpu(tp->pci_dev,
582 le32_to_cpu(desc->addr), tp->rx_buf_sz,
585 if (sis190_try_rx_copy(&skb, pkt_size, desc,
587 pci_action = pci_unmap_single;
588 tp->Rx_skbuff[entry] = NULL;
589 sis190_make_unusable_by_asic(desc);
592 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
593 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
596 skb_put(skb, pkt_size);
597 skb->protocol = eth_type_trans(skb, dev);
601 dev->last_rx = jiffies;
603 stats->rx_bytes += pkt_size;
604 if ((status & BCAST) == MCAST)
608 count = cur_rx - tp->cur_rx;
611 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
612 if (!delta && count && netif_msg_intr(tp))
613 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
614 tp->dirty_rx += delta;
616 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
617 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
622 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
627 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
629 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
631 memset(desc, 0x00, sizeof(*desc));
634 static void sis190_tx_interrupt(struct net_device *dev,
635 struct sis190_private *tp, void __iomem *ioaddr)
637 u32 pending, dirty_tx = tp->dirty_tx;
639 * It would not be needed if queueing was allowed to be enabled
640 * again too early (hint: think preempt and unclocked smp systems).
642 unsigned int queue_stopped;
645 pending = tp->cur_tx - dirty_tx;
646 queue_stopped = (pending == NUM_TX_DESC);
648 for (; pending; pending--, dirty_tx++) {
649 unsigned int entry = dirty_tx % NUM_TX_DESC;
650 struct TxDesc *txd = tp->TxDescRing + entry;
653 if (le32_to_cpu(txd->status) & OWNbit)
656 skb = tp->Tx_skbuff[entry];
658 tp->stats.tx_packets++;
659 tp->stats.tx_bytes += skb->len;
661 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
662 tp->Tx_skbuff[entry] = NULL;
663 dev_kfree_skb_irq(skb);
666 if (tp->dirty_tx != dirty_tx) {
667 tp->dirty_tx = dirty_tx;
670 netif_wake_queue(dev);
675 * The interrupt handler does all of the Rx thread work and cleans up after
678 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
680 struct net_device *dev = __dev;
681 struct sis190_private *tp = netdev_priv(dev);
682 void __iomem *ioaddr = tp->mmio_addr;
683 unsigned int handled = 0;
686 status = SIS_R32(IntrStatus);
688 if ((status == 0xffffffff) || !status)
693 if (unlikely(!netif_running(dev))) {
694 sis190_asic_down(ioaddr);
698 SIS_W32(IntrStatus, status);
700 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
702 if (status & LinkChange) {
703 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
704 schedule_work(&tp->phy_task);
708 sis190_rx_interrupt(dev, tp, ioaddr);
710 if (status & TxQ0Int)
711 sis190_tx_interrupt(dev, tp, ioaddr);
713 return IRQ_RETVAL(handled);
716 #ifdef CONFIG_NET_POLL_CONTROLLER
717 static void sis190_netpoll(struct net_device *dev)
719 struct sis190_private *tp = netdev_priv(dev);
720 struct pci_dev *pdev = tp->pci_dev;
722 disable_irq(pdev->irq);
723 sis190_interrupt(pdev->irq, dev, NULL);
724 enable_irq(pdev->irq);
728 static void sis190_free_rx_skb(struct sis190_private *tp,
729 struct sk_buff **sk_buff, struct RxDesc *desc)
731 struct pci_dev *pdev = tp->pci_dev;
733 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
735 dev_kfree_skb(*sk_buff);
737 sis190_make_unusable_by_asic(desc);
740 static void sis190_rx_clear(struct sis190_private *tp)
744 for (i = 0; i < NUM_RX_DESC; i++) {
745 if (!tp->Rx_skbuff[i])
747 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
751 static void sis190_init_ring_indexes(struct sis190_private *tp)
753 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
756 static int sis190_init_ring(struct net_device *dev)
758 struct sis190_private *tp = netdev_priv(dev);
760 sis190_init_ring_indexes(tp);
762 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
763 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
765 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
768 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
777 static void sis190_set_rx_mode(struct net_device *dev)
779 struct sis190_private *tp = netdev_priv(dev);
780 void __iomem *ioaddr = tp->mmio_addr;
782 u32 mc_filter[2]; /* Multicast hash filter */
785 if (dev->flags & IFF_PROMISC) {
786 /* Unconditionally log net taps. */
787 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
790 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
792 mc_filter[1] = mc_filter[0] = 0xffffffff;
793 } else if ((dev->mc_count > multicast_filter_limit) ||
794 (dev->flags & IFF_ALLMULTI)) {
795 /* Too many to filter perfectly -- accept all multicasts. */
796 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
797 mc_filter[1] = mc_filter[0] = 0xffffffff;
799 struct dev_mc_list *mclist;
802 rx_mode = AcceptBroadcast | AcceptMyPhys;
803 mc_filter[1] = mc_filter[0] = 0;
804 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
805 i++, mclist = mclist->next) {
807 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
808 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
809 rx_mode |= AcceptMulticast;
813 spin_lock_irqsave(&tp->lock, flags);
815 SIS_W16(RxMacControl, rx_mode | 0x2);
816 SIS_W32(RxHashTable, mc_filter[0]);
817 SIS_W32(RxHashTable + 4, mc_filter[1]);
819 spin_unlock_irqrestore(&tp->lock, flags);
822 static void sis190_soft_reset(void __iomem *ioaddr)
824 SIS_W32(IntrControl, 0x8000);
827 SIS_W32(IntrControl, 0x0);
828 sis190_asic_down(ioaddr);
832 static void sis190_hw_start(struct net_device *dev)
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
837 sis190_soft_reset(ioaddr);
839 SIS_W32(TxDescStartAddr, tp->tx_dma);
840 SIS_W32(RxDescStartAddr, tp->rx_dma);
842 SIS_W32(IntrStatus, 0xffffffff);
843 SIS_W32(IntrMask, 0x0);
845 * Default is 100Mbps.
846 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
848 SIS_W16(StationControl, 0x1901);
849 SIS_W32(GMIIControl, 0x0);
850 SIS_W32(TxMacControl, 0x60);
851 SIS_W16(RxMacControl, 0x02);
852 SIS_W32(RxHashTable, 0x0);
854 SIS_W32(RxWolCtrl, 0x0);
855 SIS_W32(RxWolData, 0x0);
859 sis190_set_rx_mode(dev);
861 /* Enable all known interrupts by setting the interrupt mask. */
862 SIS_W32(IntrMask, sis190_intr_mask);
864 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
865 SIS_W32(RxControl, 0x1a1d);
867 netif_start_queue(dev);
870 static void sis190_phy_task(void * data)
872 struct net_device *dev = data;
873 struct sis190_private *tp = netdev_priv(dev);
874 void __iomem *ioaddr = tp->mmio_addr;
875 int phy_id = tp->mii_if.phy_id;
880 val = mdio_read(ioaddr, phy_id, MII_BMCR);
881 if (val & BMCR_RESET) {
882 // FIXME: needlessly high ? -- FR 02/07/2005
883 mod_timer(&tp->timer, jiffies + HZ/10);
884 } else if (!(mdio_read(ioaddr, phy_id, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
885 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
887 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
888 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
896 { LPA_1000XFULL | LPA_SLCT,
897 "1000 Mbps Full Duplex",
899 { LPA_1000XHALF | LPA_SLCT,
900 "1000 Mbps Half Duplex",
903 "100 Mbps Full Duplex",
906 "100 Mbps Half Duplex",
909 "10 Mbps Full Duplex",
912 "10 Mbps Half Duplex",
914 { 0, "unknown", 0x0000 }
917 val = mdio_read(ioaddr, phy_id, 0x1f);
918 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
920 val = mdio_read(ioaddr, phy_id, MII_LPA);
921 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
923 for (p = reg31; p->ctl; p++) {
924 if ((val & p->val) == p->val)
928 SIS_W16(StationControl, p->ctl);
929 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
931 netif_carrier_on(dev);
937 static void sis190_phy_timer(unsigned long __opaque)
939 struct net_device *dev = (struct net_device *)__opaque;
940 struct sis190_private *tp = netdev_priv(dev);
942 if (likely(netif_running(dev)))
943 schedule_work(&tp->phy_task);
946 static inline void sis190_delete_timer(struct net_device *dev)
948 struct sis190_private *tp = netdev_priv(dev);
950 del_timer_sync(&tp->timer);
953 static inline void sis190_request_timer(struct net_device *dev)
955 struct sis190_private *tp = netdev_priv(dev);
956 struct timer_list *timer = &tp->timer;
959 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
960 timer->data = (unsigned long)dev;
961 timer->function = sis190_phy_timer;
965 static void sis190_set_rxbufsize(struct sis190_private *tp,
966 struct net_device *dev)
968 unsigned int mtu = dev->mtu;
970 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
971 /* RxDesc->size has a licence to kill the lower bits */
972 if (tp->rx_buf_sz & 0x07) {
974 tp->rx_buf_sz &= RX_BUF_MASK;
978 static int sis190_open(struct net_device *dev)
980 struct sis190_private *tp = netdev_priv(dev);
981 struct pci_dev *pdev = tp->pci_dev;
984 sis190_set_rxbufsize(tp, dev);
987 * Rx and Tx descriptors need 256 bytes alignment.
988 * pci_alloc_consistent() guarantees a stronger alignment.
990 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
994 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
998 rc = sis190_init_ring(dev);
1002 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1004 sis190_request_timer(dev);
1006 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1008 goto err_release_timer_2;
1010 sis190_hw_start(dev);
1014 err_release_timer_2:
1015 sis190_delete_timer(dev);
1016 sis190_rx_clear(tp);
1018 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1021 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1026 static void sis190_tx_clear(struct sis190_private *tp)
1030 for (i = 0; i < NUM_TX_DESC; i++) {
1031 struct sk_buff *skb = tp->Tx_skbuff[i];
1036 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1037 tp->Tx_skbuff[i] = NULL;
1040 tp->stats.tx_dropped++;
1042 tp->cur_tx = tp->dirty_tx = 0;
1045 static void sis190_down(struct net_device *dev)
1047 struct sis190_private *tp = netdev_priv(dev);
1048 void __iomem *ioaddr = tp->mmio_addr;
1049 unsigned int poll_locked = 0;
1051 sis190_delete_timer(dev);
1053 netif_stop_queue(dev);
1055 flush_scheduled_work();
1058 spin_lock_irq(&tp->lock);
1060 sis190_asic_down(ioaddr);
1062 spin_unlock_irq(&tp->lock);
1064 synchronize_irq(dev->irq);
1067 netif_poll_disable(dev);
1071 synchronize_sched();
1073 } while (SIS_R32(IntrMask));
1075 sis190_tx_clear(tp);
1076 sis190_rx_clear(tp);
1079 static int sis190_close(struct net_device *dev)
1081 struct sis190_private *tp = netdev_priv(dev);
1082 struct pci_dev *pdev = tp->pci_dev;
1086 free_irq(dev->irq, dev);
1088 netif_poll_enable(dev);
1090 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1091 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1093 tp->TxDescRing = NULL;
1094 tp->RxDescRing = NULL;
1099 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1101 struct sis190_private *tp = netdev_priv(dev);
1102 void __iomem *ioaddr = tp->mmio_addr;
1103 u32 len, entry, dirty_tx;
1104 struct TxDesc *desc;
1107 if (unlikely(skb->len < ETH_ZLEN)) {
1108 skb = skb_padto(skb, ETH_ZLEN);
1110 tp->stats.tx_dropped++;
1118 entry = tp->cur_tx % NUM_TX_DESC;
1119 desc = tp->TxDescRing + entry;
1121 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1122 netif_stop_queue(dev);
1123 net_tx_err(tp, KERN_ERR PFX
1124 "%s: BUG! Tx Ring full when queue awake!\n",
1126 return NETDEV_TX_BUSY;
1129 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1131 tp->Tx_skbuff[entry] = skb;
1133 desc->PSize = cpu_to_le32(len);
1134 desc->addr = cpu_to_le32(mapping);
1136 desc->size = cpu_to_le32(len);
1137 if (entry == (NUM_TX_DESC - 1))
1138 desc->size |= cpu_to_le32(RingEnd);
1142 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1148 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1150 dev->trans_start = jiffies;
1152 dirty_tx = tp->dirty_tx;
1153 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1154 netif_stop_queue(dev);
1156 if (dirty_tx != tp->dirty_tx)
1157 netif_wake_queue(dev);
1160 return NETDEV_TX_OK;
1163 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1165 struct sis190_private *tp = netdev_priv(dev);
1170 static void sis190_release_board(struct pci_dev *pdev)
1172 struct net_device *dev = pci_get_drvdata(pdev);
1173 struct sis190_private *tp = netdev_priv(dev);
1175 iounmap(tp->mmio_addr);
1176 pci_release_regions(pdev);
1177 pci_disable_device(pdev);
1181 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1183 struct sis190_private *tp;
1184 struct net_device *dev;
1185 void __iomem *ioaddr;
1188 dev = alloc_etherdev(sizeof(*tp));
1190 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1195 SET_MODULE_OWNER(dev);
1196 SET_NETDEV_DEV(dev, &pdev->dev);
1198 tp = netdev_priv(dev);
1199 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1201 rc = pci_enable_device(pdev);
1203 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1204 goto err_free_dev_1;
1209 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1210 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1212 goto err_pci_disable_2;
1214 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1215 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1217 goto err_pci_disable_2;
1220 rc = pci_request_regions(pdev, DRV_NAME);
1222 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1224 goto err_pci_disable_2;
1227 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1229 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1231 goto err_free_res_3;
1234 pci_set_master(pdev);
1236 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1238 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1241 goto err_free_res_3;
1245 tp->mmio_addr = ioaddr;
1247 tp->mii_if.dev = dev;
1248 tp->mii_if.mdio_read = __mdio_read;
1249 tp->mii_if.mdio_write = __mdio_write;
1250 tp->mii_if.phy_id = 1;
1251 tp->mii_if.phy_id_mask = 0x1f;
1252 tp->mii_if.reg_num_mask = 0x1f;
1254 sis190_irq_mask_and_ack(ioaddr);
1256 sis190_soft_reset(ioaddr);
1261 pci_release_regions(pdev);
1263 pci_disable_device(pdev);
1271 static void sis190_tx_timeout(struct net_device *dev)
1273 struct sis190_private *tp = netdev_priv(dev);
1274 void __iomem *ioaddr = tp->mmio_addr;
1277 /* Disable Tx, if not already */
1278 tmp8 = SIS_R8(TxControl);
1279 if (tmp8 & CmdTxEnb)
1280 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1283 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1284 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1286 /* Disable interrupts by clearing the interrupt mask. */
1287 SIS_W32(IntrMask, 0x0000);
1289 /* Stop a shared interrupt from scavenging while we are. */
1290 spin_lock_irq(&tp->lock);
1291 sis190_tx_clear(tp);
1292 spin_unlock_irq(&tp->lock);
1294 /* ...and finally, reset everything. */
1295 sis190_hw_start(dev);
1297 netif_wake_queue(dev);
1300 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1301 struct net_device *dev)
1303 struct sis190_private *tp = netdev_priv(dev);
1304 void __iomem *ioaddr = tp->mmio_addr;
1308 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1311 /* Check to see if there is a sane EEPROM */
1312 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1314 if ((sig == 0xffff) || (sig == 0x0000)) {
1315 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1316 pci_name(pdev), sig);
1320 /* Get MAC address from EEPROM */
1321 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1322 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1324 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1331 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1333 * @dev: network device to get address for
1335 * SiS965 model, use APC CMOS RAM to store MAC address.
1336 * APC CMOS RAM is accessed through ISA bridge.
1337 * MAC address is read into @net_dev->dev_addr.
1339 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1340 struct net_device *dev)
1342 struct sis190_private *tp = netdev_priv(dev);
1343 struct pci_dev *isa_bridge;
1347 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1350 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1352 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1357 /* Enable port 78h & 79h to access APC Registers. */
1358 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1359 reg = (tmp8 & ~0x02);
1360 pci_write_config_byte(isa_bridge, 0x48, reg);
1362 pci_read_config_byte(isa_bridge, 0x48, ®);
1364 for (i = 0; i < MAC_ADDR_LEN; i++) {
1365 outb(0x9 + i, 0x78);
1366 dev->dev_addr[i] = inb(0x79);
1372 /* Restore the value to ISA Bridge */
1373 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1374 pci_dev_put(isa_bridge);
1380 * sis190_init_rxfilter - Initialize the Rx filter
1381 * @dev: network device to initialize
1383 * Set receive filter address to our MAC address
1384 * and enable packet filtering.
1386 static inline void sis190_init_rxfilter(struct net_device *dev)
1388 struct sis190_private *tp = netdev_priv(dev);
1389 void __iomem *ioaddr = tp->mmio_addr;
1393 ctl = SIS_R16(RxMacControl);
1395 * Disable packet filtering before setting filter.
1396 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1397 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1399 SIS_W16(RxMacControl, ctl & ~0x0f00);
1401 for (i = 0; i < MAC_ADDR_LEN; i++)
1402 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1404 SIS_W16(RxMacControl, ctl);
1408 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1412 pci_read_config_byte(pdev, 0x73, &from);
1414 return (from & 0x00000001) ?
1415 sis190_get_mac_addr_from_apc(pdev, dev) :
1416 sis190_get_mac_addr_from_eeprom(pdev, dev);
1419 static void sis190_set_speed_auto(struct net_device *dev)
1421 struct sis190_private *tp = netdev_priv(dev);
1422 void __iomem *ioaddr = tp->mmio_addr;
1423 int phy_id = tp->mii_if.phy_id;
1426 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1428 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1430 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1432 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1433 ADVERTISE_100FULL | ADVERTISE_10FULL |
1434 ADVERTISE_100HALF | ADVERTISE_10HALF);
1436 // Enable 1000 Full Mode.
1437 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1439 // Enable auto-negotiation and restart auto-negotiation.
1440 mdio_write(ioaddr, phy_id, MII_BMCR,
1441 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1444 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1446 struct sis190_private *tp = netdev_priv(dev);
1448 return mii_ethtool_gset(&tp->mii_if, cmd);
1451 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1453 struct sis190_private *tp = netdev_priv(dev);
1455 return mii_ethtool_sset(&tp->mii_if, cmd);
1458 static void sis190_get_drvinfo(struct net_device *dev,
1459 struct ethtool_drvinfo *info)
1461 struct sis190_private *tp = netdev_priv(dev);
1463 strcpy(info->driver, DRV_NAME);
1464 strcpy(info->version, DRV_VERSION);
1465 strcpy(info->bus_info, pci_name(tp->pci_dev));
1468 static int sis190_get_regs_len(struct net_device *dev)
1470 return SIS190_REGS_SIZE;
1473 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1476 struct sis190_private *tp = netdev_priv(dev);
1477 unsigned long flags;
1479 if (regs->len > SIS190_REGS_SIZE)
1480 regs->len = SIS190_REGS_SIZE;
1482 spin_lock_irqsave(&tp->lock, flags);
1483 memcpy_fromio(p, tp->mmio_addr, regs->len);
1484 spin_unlock_irqrestore(&tp->lock, flags);
1487 static int sis190_nway_reset(struct net_device *dev)
1489 struct sis190_private *tp = netdev_priv(dev);
1491 return mii_nway_restart(&tp->mii_if);
1494 static u32 sis190_get_msglevel(struct net_device *dev)
1496 struct sis190_private *tp = netdev_priv(dev);
1498 return tp->msg_enable;
1501 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1503 struct sis190_private *tp = netdev_priv(dev);
1505 tp->msg_enable = value;
1508 static struct ethtool_ops sis190_ethtool_ops = {
1509 .get_settings = sis190_get_settings,
1510 .set_settings = sis190_set_settings,
1511 .get_drvinfo = sis190_get_drvinfo,
1512 .get_regs_len = sis190_get_regs_len,
1513 .get_regs = sis190_get_regs,
1514 .get_link = ethtool_op_get_link,
1515 .get_msglevel = sis190_get_msglevel,
1516 .set_msglevel = sis190_set_msglevel,
1517 .nway_reset = sis190_nway_reset,
1520 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1522 struct sis190_private *tp = netdev_priv(dev);
1524 return !netif_running(dev) ? -EINVAL :
1525 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1528 static int __devinit sis190_init_one(struct pci_dev *pdev,
1529 const struct pci_device_id *ent)
1531 static int printed_version = 0;
1532 struct sis190_private *tp;
1533 struct net_device *dev;
1534 void __iomem *ioaddr;
1537 if (!printed_version) {
1538 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1539 printed_version = 1;
1542 dev = sis190_init_board(pdev);
1548 tp = netdev_priv(dev);
1549 ioaddr = tp->mmio_addr;
1551 rc = sis190_get_mac_addr(pdev, dev);
1553 goto err_release_board;
1555 sis190_init_rxfilter(dev);
1557 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1559 dev->open = sis190_open;
1560 dev->stop = sis190_close;
1561 dev->do_ioctl = sis190_ioctl;
1562 dev->get_stats = sis190_get_stats;
1563 dev->tx_timeout = sis190_tx_timeout;
1564 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1565 dev->hard_start_xmit = sis190_start_xmit;
1566 #ifdef CONFIG_NET_POLL_CONTROLLER
1567 dev->poll_controller = sis190_netpoll;
1569 dev->set_multicast_list = sis190_set_rx_mode;
1570 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1571 dev->irq = pdev->irq;
1572 dev->base_addr = (unsigned long) 0xdead;
1574 spin_lock_init(&tp->lock);
1575 rc = register_netdev(dev);
1577 goto err_release_board;
1579 pci_set_drvdata(pdev, dev);
1581 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1582 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1583 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1585 dev->dev_addr[0], dev->dev_addr[1],
1586 dev->dev_addr[2], dev->dev_addr[3],
1587 dev->dev_addr[4], dev->dev_addr[5]);
1589 netif_carrier_off(dev);
1591 sis190_set_speed_auto(dev);
1596 sis190_release_board(pdev);
1600 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1602 struct net_device *dev = pci_get_drvdata(pdev);
1604 unregister_netdev(dev);
1605 sis190_release_board(pdev);
1606 pci_set_drvdata(pdev, NULL);
1609 static struct pci_driver sis190_pci_driver = {
1611 .id_table = sis190_pci_tbl,
1612 .probe = sis190_init_one,
1613 .remove = __devexit_p(sis190_remove_one),
1616 static int __init sis190_init_module(void)
1618 return pci_module_init(&sis190_pci_driver);
1621 static void __exit sis190_cleanup_module(void)
1623 pci_unregister_driver(&sis190_pci_driver);
1626 module_init(sis190_init_module);
1627 module_exit(sis190_cleanup_module);