2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
17 See the file COPYING in this distribution for more information.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netdevice.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/etherdevice.h>
26 #include <linux/ethtool.h>
27 #include <linux/pci.h>
28 #include <linux/mii.h>
29 #include <linux/delay.h>
30 #include <linux/crc32.h>
31 #include <linux/dma-mapping.h>
34 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
45 #ifdef CONFIG_SIS190_NAPI
46 #define NAPI_SUFFIX "-NAPI"
48 #define NAPI_SUFFIX ""
51 #define DRV_VERSION "1.2" NAPI_SUFFIX
52 #define DRV_NAME "sis190"
53 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
54 #define PFX DRV_NAME ": "
56 #ifdef CONFIG_SIS190_NAPI
57 #define sis190_rx_skb netif_receive_skb
58 #define sis190_rx_quota(count, quota) min(count, quota)
60 #define sis190_rx_skb netif_rx
61 #define sis190_rx_quota(count, quota) count
64 #define MAC_ADDR_LEN 6
66 #define NUM_TX_DESC 64
67 #define NUM_RX_DESC 64
68 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
69 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
70 #define RX_BUF_SIZE 1536
72 #define SIS190_REGS_SIZE 0x80
73 #define SIS190_TX_TIMEOUT (6*HZ)
74 #define SIS190_PHY_TIMEOUT (10*HZ)
75 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
79 /* Enhanced PHY access register bit definitions */
80 #define EhnMIIread 0x0000
81 #define EhnMIIwrite 0x0020
82 #define EhnMIIdataShift 16
83 #define EhnMIIpmdShift 6 /* 7016 only */
84 #define EhnMIIregShift 11
85 #define EhnMIIreq 0x0010
86 #define EhnMIInotDone 0x0010
88 /* Write/read MMIO register */
89 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
90 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
91 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
92 #define SIS_R8(reg) readb (ioaddr + (reg))
93 #define SIS_R16(reg) readw (ioaddr + (reg))
94 #define SIS_R32(reg) readl (ioaddr + (reg))
96 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
98 enum sis190_registers {
100 TxDescStartAddr = 0x04,
101 rsv0 = 0x08, // reserved
102 TxSts = 0x0c, // unused (Control/Status)
104 RxDescStartAddr = 0x14,
105 rsv1 = 0x18, // reserved
106 RxSts = 0x1c, // unused
110 IntrTimer = 0x2c, // unused (Interupt Timer)
111 PMControl = 0x30, // unused (Power Mgmt Control/Status)
112 rsv2 = 0x34, // reserved
115 StationControl = 0x40,
117 GIoCR = 0x48, // unused (GMAC IO Compensation)
118 GIoCtrl = 0x4c, // unused (GMAC IO Control)
120 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
121 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
122 rsv3 = 0x5c, // reserved
126 // Undocumented = 0x6c,
128 RxWolData = 0x74, // unused (Rx WOL Data Access)
129 RxMPSControl = 0x78, // unused (Rx MPS Control)
130 rsv4 = 0x7c, // reserved
133 enum sis190_register_content {
135 SoftInt = 0x40000000, // unused
136 Timeup = 0x20000000, // unused
137 PauseFrame = 0x00080000, // unused
138 MagicPacket = 0x00040000, // unused
139 WakeupFrame = 0x00020000, // unused
140 LinkChange = 0x00010000,
141 RxQEmpty = 0x00000080,
143 TxQ1Empty = 0x00000020, // unused
144 TxQ1Int = 0x00000010,
145 TxQ0Empty = 0x00000008, // unused
146 TxQ0Int = 0x00000004,
151 RxRES = 0x00200000, // unused
153 RxRUNT = 0x00100000, // unused
154 RxRWT = 0x00400000, // unused
158 CmdRxEnb = 0x08, // unused
160 RxBufEmpty = 0x01, // unused
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
190 LinkStatus = 0x02, // unused
191 FullDup = 0x01, // unused
194 TBILinkOK = 0x02000000, // unused
211 enum _DescStatusBit {
221 RxSizeMask = 0x0000ffff
224 struct sis190_private {
225 void __iomem *mmio_addr;
226 struct pci_dev *pci_dev;
227 struct net_device_stats stats;
236 struct RxDesc *RxDescRing;
237 struct TxDesc *TxDescRing;
238 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
239 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
240 struct work_struct phy_task;
241 struct timer_list timer;
243 struct mii_if_info mii_if;
246 const static struct {
248 u8 version; /* depend on docs */
249 u32 RxConfigMask; /* clear the bits supported by this chip */
250 } sis_chip_info[] = {
251 { DRV_NAME, 0x00, 0xff7e1880, },
254 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
255 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
259 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
261 static int rx_copybreak = 200;
267 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
268 module_param(rx_copybreak, int, 0);
269 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
270 module_param_named(debug, debug.msg_enable, int, 0);
271 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
272 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
273 MODULE_VERSION(DRV_VERSION);
274 MODULE_LICENSE("GPL");
276 static const u32 sis190_intr_mask =
277 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
280 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
281 * The chips use a 64 element hash table based on the Ethernet CRC.
283 static int multicast_filter_limit = 32;
285 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
289 SIS_W32(GMIIControl, ctl);
293 for (i = 0; i < 100; i++) {
294 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
300 printk(KERN_ERR PFX "PHY command failed !\n");
303 static void mdio_write(void __iomem *ioaddr, int reg, int val)
307 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
308 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
309 (((u32) val) << EhnMIIdataShift));
312 static int mdio_read(void __iomem *ioaddr, int reg)
316 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
317 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
319 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
322 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
324 struct sis190_private *tp = netdev_priv(dev);
326 mdio_write(tp->mmio_addr, reg, val);
329 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
331 struct sis190_private *tp = netdev_priv(dev);
333 return mdio_read(tp->mmio_addr, reg);
336 static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
342 if (!(SIS_R32(ROMControl) & 0x0002))
345 val = (0x0080 | (0x2 << 8) | (reg << 10));
347 SIS_W32(ROMInterface, val);
349 for (i = 0; i < 200; i++) {
350 if (!(SIS_R32(ROMInterface) & 0x0080))
355 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
360 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
362 SIS_W32(IntrMask, 0x00);
363 SIS_W32(IntrStatus, 0xffffffff);
367 static void sis190_asic_down(void __iomem *ioaddr)
369 /* Stop the chip's Tx and Rx DMA processes. */
371 SIS_W32(TxControl, 0x1a00);
372 SIS_W32(RxControl, 0x1a00);
374 sis190_irq_mask_and_ack(ioaddr);
377 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
379 desc->size |= cpu_to_le32(RingEnd);
382 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
384 u32 eor = le32_to_cpu(desc->size) & RingEnd;
387 desc->size = cpu_to_le32(rx_buf_sz | eor);
389 desc->status = cpu_to_le32(OWNbit | INTbit);
392 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
395 desc->addr = cpu_to_le32(mapping);
396 sis190_give_to_asic(desc, rx_buf_sz);
399 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
402 desc->addr = 0xdeadbeef;
403 desc->size &= cpu_to_le32(RingEnd);
408 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
409 struct RxDesc *desc, u32 rx_buf_sz)
415 skb = dev_alloc_skb(rx_buf_sz);
421 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
424 sis190_map_to_asic(desc, mapping, rx_buf_sz);
430 sis190_make_unusable_by_asic(desc);
434 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
439 for (cur = start; cur < end; cur++) {
440 int ret, i = cur % NUM_RX_DESC;
442 if (tp->Rx_skbuff[i])
445 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
446 tp->RxDescRing + i, tp->rx_buf_sz);
453 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
454 struct RxDesc *desc, int rx_buf_sz)
458 if (pkt_size < rx_copybreak) {
461 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
463 skb_reserve(skb, NET_IP_ALIGN);
464 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
466 sis190_give_to_asic(desc, rx_buf_sz);
473 static int sis190_rx_interrupt(struct net_device *dev,
474 struct sis190_private *tp, void __iomem *ioaddr)
476 struct net_device_stats *stats = &tp->stats;
477 u32 rx_left, cur_rx = tp->cur_rx;
480 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
481 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
483 for (; rx_left > 0; rx_left--, cur_rx++) {
484 unsigned int entry = cur_rx % NUM_RX_DESC;
485 struct RxDesc *desc = tp->RxDescRing + entry;
488 if (desc->status & OWNbit)
491 status = le32_to_cpu(desc->PSize);
493 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
496 if (status & RxCRC) {
497 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
500 stats->rx_crc_errors++;
501 sis190_give_to_asic(desc, tp->rx_buf_sz);
502 } else if (!(status & PADbit)) {
503 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
506 stats->rx_length_errors++;
507 sis190_give_to_asic(desc, tp->rx_buf_sz);
509 struct sk_buff *skb = tp->Rx_skbuff[entry];
510 int pkt_size = (status & RxSizeMask) - 4;
511 void (*pci_action)(struct pci_dev *, dma_addr_t,
512 size_t, int) = pci_dma_sync_single_for_device;
514 if (unlikely(pkt_size > tp->rx_buf_sz)) {
515 net_intr(tp, KERN_INFO
516 "%s: (frag) status = %08x.\n",
519 stats->rx_length_errors++;
520 sis190_give_to_asic(desc, tp->rx_buf_sz);
524 pci_dma_sync_single_for_cpu(tp->pci_dev,
525 le32_to_cpu(desc->addr), tp->rx_buf_sz,
528 if (sis190_try_rx_copy(&skb, pkt_size, desc,
530 pci_action = pci_unmap_single;
531 tp->Rx_skbuff[entry] = NULL;
532 sis190_make_unusable_by_asic(desc);
535 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
536 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
539 skb_put(skb, pkt_size);
540 skb->protocol = eth_type_trans(skb, dev);
544 dev->last_rx = jiffies;
545 stats->rx_bytes += pkt_size;
549 count = cur_rx - tp->cur_rx;
552 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
553 if (!delta && count && netif_msg_intr(tp))
554 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
555 tp->dirty_rx += delta;
557 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
558 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
563 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
568 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
570 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
572 memset(desc, 0x00, sizeof(*desc));
575 static void sis190_tx_interrupt(struct net_device *dev,
576 struct sis190_private *tp, void __iomem *ioaddr)
578 u32 pending, dirty_tx = tp->dirty_tx;
580 * It would not be needed if queueing was allowed to be enabled
581 * again too early (hint: think preempt and unclocked smp systems).
583 unsigned int queue_stopped;
586 pending = tp->cur_tx - dirty_tx;
587 queue_stopped = (pending == NUM_TX_DESC);
589 for (; pending; pending--, dirty_tx++) {
590 unsigned int entry = dirty_tx % NUM_TX_DESC;
591 struct TxDesc *txd = tp->TxDescRing + entry;
594 if (le32_to_cpu(txd->status) & OWNbit)
597 skb = tp->Tx_skbuff[entry];
599 tp->stats.tx_packets++;
600 tp->stats.tx_bytes += skb->len;
602 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
603 tp->Tx_skbuff[entry] = NULL;
604 dev_kfree_skb_irq(skb);
607 if (tp->dirty_tx != dirty_tx) {
608 tp->dirty_tx = dirty_tx;
611 netif_wake_queue(dev);
616 * The interrupt handler does all of the Rx thread work and cleans up after
619 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
621 struct net_device *dev = __dev;
622 struct sis190_private *tp = netdev_priv(dev);
623 void __iomem *ioaddr = tp->mmio_addr;
624 unsigned int handled = 0;
627 status = SIS_R32(IntrStatus);
629 if ((status == 0xffffffff) || !status)
634 if (unlikely(!netif_running(dev))) {
635 sis190_asic_down(ioaddr);
639 SIS_W32(IntrStatus, status);
641 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
643 if (status & LinkChange) {
644 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
645 schedule_work(&tp->phy_task);
649 sis190_rx_interrupt(dev, tp, ioaddr);
651 if (status & TxQ0Int)
652 sis190_tx_interrupt(dev, tp, ioaddr);
654 return IRQ_RETVAL(handled);
657 #ifdef CONFIG_NET_POLL_CONTROLLER
658 static void sis190_netpoll(struct net_device *dev)
660 struct sis190_private *tp = netdev_priv(dev);
661 struct pci_dev *pdev = tp->pci_dev;
663 disable_irq(pdev->irq);
664 sis190_interrupt(pdev->irq, dev, NULL);
665 enable_irq(pdev->irq);
669 static void sis190_free_rx_skb(struct sis190_private *tp,
670 struct sk_buff **sk_buff, struct RxDesc *desc)
672 struct pci_dev *pdev = tp->pci_dev;
674 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
676 dev_kfree_skb(*sk_buff);
678 sis190_make_unusable_by_asic(desc);
681 static void sis190_rx_clear(struct sis190_private *tp)
685 for (i = 0; i < NUM_RX_DESC; i++) {
686 if (!tp->Rx_skbuff[i])
688 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
692 static void sis190_init_ring_indexes(struct sis190_private *tp)
694 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
697 static int sis190_init_ring(struct net_device *dev)
699 struct sis190_private *tp = netdev_priv(dev);
701 sis190_init_ring_indexes(tp);
703 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
704 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
706 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
709 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
718 static void sis190_set_rx_mode(struct net_device *dev)
720 struct sis190_private *tp = netdev_priv(dev);
721 void __iomem *ioaddr = tp->mmio_addr;
723 u32 mc_filter[2]; /* Multicast hash filter */
726 if (dev->flags & IFF_PROMISC) {
727 /* Unconditionally log net taps. */
728 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
731 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
733 mc_filter[1] = mc_filter[0] = 0xffffffff;
734 } else if ((dev->mc_count > multicast_filter_limit) ||
735 (dev->flags & IFF_ALLMULTI)) {
736 /* Too many to filter perfectly -- accept all multicasts. */
737 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
738 mc_filter[1] = mc_filter[0] = 0xffffffff;
740 struct dev_mc_list *mclist;
743 rx_mode = AcceptBroadcast | AcceptMyPhys;
744 mc_filter[1] = mc_filter[0] = 0;
745 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
746 i++, mclist = mclist->next) {
748 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
749 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
750 rx_mode |= AcceptMulticast;
754 spin_lock_irqsave(&tp->lock, flags);
756 SIS_W16(RxMacControl, rx_mode | 0x2);
757 SIS_W32(RxHashTable, mc_filter[0]);
758 SIS_W32(RxHashTable + 4, mc_filter[1]);
760 spin_unlock_irqrestore(&tp->lock, flags);
763 static void sis190_soft_reset(void __iomem *ioaddr)
765 SIS_W32(IntrControl, 0x8000);
768 SIS_W32(IntrControl, 0x0);
769 sis190_asic_down(ioaddr);
773 static void sis190_hw_start(struct net_device *dev)
775 struct sis190_private *tp = netdev_priv(dev);
776 void __iomem *ioaddr = tp->mmio_addr;
778 sis190_soft_reset(ioaddr);
780 SIS_W32(TxDescStartAddr, tp->tx_dma);
781 SIS_W32(RxDescStartAddr, tp->rx_dma);
783 SIS_W32(IntrStatus, 0xffffffff);
784 SIS_W32(IntrMask, 0x0);
786 * Default is 100Mbps.
787 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
789 SIS_W16(StationControl, 0x1901);
790 SIS_W32(GMIIControl, 0x0);
791 SIS_W32(TxMacControl, 0x60);
792 SIS_W16(RxMacControl, 0x02);
793 SIS_W32(RxHashTable, 0x0);
795 SIS_W32(RxWolCtrl, 0x0);
796 SIS_W32(RxWolData, 0x0);
800 sis190_set_rx_mode(dev);
802 /* Enable all known interrupts by setting the interrupt mask. */
803 SIS_W32(IntrMask, sis190_intr_mask);
805 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
806 SIS_W32(RxControl, 0x1a1d);
808 netif_start_queue(dev);
811 static void sis190_phy_task(void * data)
813 struct net_device *dev = data;
814 struct sis190_private *tp = netdev_priv(dev);
815 void __iomem *ioaddr = tp->mmio_addr;
820 val = mdio_read(ioaddr, MII_BMCR);
821 if (val & BMCR_RESET) {
822 // FIXME: needlessly high ? -- FR 02/07/2005
823 mod_timer(&tp->timer, jiffies + HZ/10);
824 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
825 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
827 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
828 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
836 { LPA_1000XFULL | LPA_SLCT,
837 "1000 Mbps Full Duplex",
839 { LPA_1000XHALF | LPA_SLCT,
840 "1000 Mbps Half Duplex",
843 "100 Mbps Full Duplex",
846 "100 Mbps Half Duplex",
849 "10 Mbps Full Duplex",
852 "10 Mbps Half Duplex",
854 { 0, "unknown", 0x0000 }
857 val = mdio_read(ioaddr, 0x1f);
858 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
860 val = mdio_read(ioaddr, MII_LPA);
861 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
863 for (p = reg31; p->ctl; p++) {
864 if ((val & p->val) == p->val)
868 SIS_W16(StationControl, p->ctl);
869 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
871 netif_carrier_on(dev);
877 static void sis190_phy_timer(unsigned long __opaque)
879 struct net_device *dev = (struct net_device *)__opaque;
880 struct sis190_private *tp = netdev_priv(dev);
882 if (likely(netif_running(dev)))
883 schedule_work(&tp->phy_task);
886 static inline void sis190_delete_timer(struct net_device *dev)
888 struct sis190_private *tp = netdev_priv(dev);
890 del_timer_sync(&tp->timer);
893 static inline void sis190_request_timer(struct net_device *dev)
895 struct sis190_private *tp = netdev_priv(dev);
896 struct timer_list *timer = &tp->timer;
899 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
900 timer->data = (unsigned long)dev;
901 timer->function = sis190_phy_timer;
905 static void sis190_set_rxbufsize(struct sis190_private *tp,
906 struct net_device *dev)
908 unsigned int mtu = dev->mtu;
910 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
913 static int sis190_open(struct net_device *dev)
915 struct sis190_private *tp = netdev_priv(dev);
916 struct pci_dev *pdev = tp->pci_dev;
919 sis190_set_rxbufsize(tp, dev);
922 * Rx and Tx descriptors need 256 bytes alignment.
923 * pci_alloc_consistent() guarantees a stronger alignment.
925 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
929 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
933 rc = sis190_init_ring(dev);
937 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
939 sis190_request_timer(dev);
941 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
943 goto err_release_timer_2;
945 sis190_hw_start(dev);
950 sis190_delete_timer(dev);
953 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
956 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
961 static void sis190_tx_clear(struct sis190_private *tp)
965 for (i = 0; i < NUM_TX_DESC; i++) {
966 struct sk_buff *skb = tp->Tx_skbuff[i];
971 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
972 tp->Tx_skbuff[i] = NULL;
975 tp->stats.tx_dropped++;
977 tp->cur_tx = tp->dirty_tx = 0;
980 static void sis190_down(struct net_device *dev)
982 struct sis190_private *tp = netdev_priv(dev);
983 void __iomem *ioaddr = tp->mmio_addr;
984 unsigned int poll_locked = 0;
986 sis190_delete_timer(dev);
988 netif_stop_queue(dev);
990 flush_scheduled_work();
993 spin_lock_irq(&tp->lock);
995 sis190_asic_down(ioaddr);
997 spin_unlock_irq(&tp->lock);
999 synchronize_irq(dev->irq);
1002 netif_poll_disable(dev);
1006 synchronize_sched();
1008 } while (SIS_R32(IntrMask));
1010 sis190_tx_clear(tp);
1011 sis190_rx_clear(tp);
1014 static int sis190_close(struct net_device *dev)
1016 struct sis190_private *tp = netdev_priv(dev);
1017 struct pci_dev *pdev = tp->pci_dev;
1021 free_irq(dev->irq, dev);
1023 netif_poll_enable(dev);
1025 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1026 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1028 tp->TxDescRing = NULL;
1029 tp->RxDescRing = NULL;
1034 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1036 struct sis190_private *tp = netdev_priv(dev);
1037 void __iomem *ioaddr = tp->mmio_addr;
1038 u32 len, entry, dirty_tx;
1039 struct TxDesc *desc;
1042 if (unlikely(skb->len < ETH_ZLEN)) {
1043 skb = skb_padto(skb, ETH_ZLEN);
1045 tp->stats.tx_dropped++;
1053 entry = tp->cur_tx % NUM_TX_DESC;
1054 desc = tp->TxDescRing + entry;
1056 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1057 netif_stop_queue(dev);
1058 net_tx_err(tp, KERN_ERR PFX
1059 "%s: BUG! Tx Ring full when queue awake!\n",
1061 return NETDEV_TX_BUSY;
1064 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1066 tp->Tx_skbuff[entry] = skb;
1068 desc->PSize = cpu_to_le32(len);
1069 desc->addr = cpu_to_le32(mapping);
1071 desc->size = cpu_to_le32(len);
1072 if (entry == (NUM_TX_DESC - 1))
1073 desc->size |= cpu_to_le32(RingEnd);
1077 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1083 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1085 dev->trans_start = jiffies;
1087 dirty_tx = tp->dirty_tx;
1088 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1089 netif_stop_queue(dev);
1091 if (dirty_tx != tp->dirty_tx)
1092 netif_wake_queue(dev);
1095 return NETDEV_TX_OK;
1098 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1100 struct sis190_private *tp = netdev_priv(dev);
1105 static void sis190_release_board(struct pci_dev *pdev)
1107 struct net_device *dev = pci_get_drvdata(pdev);
1108 struct sis190_private *tp = netdev_priv(dev);
1110 iounmap(tp->mmio_addr);
1111 pci_release_regions(pdev);
1112 pci_disable_device(pdev);
1116 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1118 struct sis190_private *tp;
1119 struct net_device *dev;
1120 void __iomem *ioaddr;
1123 dev = alloc_etherdev(sizeof(*tp));
1125 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1130 SET_MODULE_OWNER(dev);
1131 SET_NETDEV_DEV(dev, &pdev->dev);
1133 tp = netdev_priv(dev);
1134 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1136 rc = pci_enable_device(pdev);
1138 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1139 goto err_free_dev_1;
1144 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1145 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1147 goto err_pci_disable_2;
1149 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1150 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1152 goto err_pci_disable_2;
1155 rc = pci_request_regions(pdev, DRV_NAME);
1157 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1159 goto err_pci_disable_2;
1162 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1164 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1166 goto err_free_res_3;
1169 pci_set_master(pdev);
1171 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1173 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1176 goto err_free_res_3;
1180 tp->mmio_addr = ioaddr;
1182 tp->mii_if.dev = dev;
1183 tp->mii_if.mdio_read = __mdio_read;
1184 tp->mii_if.mdio_write = __mdio_write;
1185 // tp->mii_if.phy_id = XXX;
1186 tp->mii_if.phy_id_mask = 0x1f;
1187 tp->mii_if.reg_num_mask = 0x1f;
1189 sis190_irq_mask_and_ack(ioaddr);
1191 sis190_soft_reset(ioaddr);
1196 pci_release_regions(pdev);
1198 pci_disable_device(pdev);
1206 static void sis190_tx_timeout(struct net_device *dev)
1208 struct sis190_private *tp = netdev_priv(dev);
1209 void __iomem *ioaddr = tp->mmio_addr;
1212 /* Disable Tx, if not already */
1213 tmp8 = SIS_R8(TxControl);
1214 if (tmp8 & CmdTxEnb)
1215 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1218 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1219 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1221 /* Disable interrupts by clearing the interrupt mask. */
1222 SIS_W32(IntrMask, 0x0000);
1224 /* Stop a shared interrupt from scavenging while we are. */
1225 spin_lock_irq(&tp->lock);
1226 sis190_tx_clear(tp);
1227 spin_unlock_irq(&tp->lock);
1229 /* ...and finally, reset everything. */
1230 sis190_hw_start(dev);
1232 netif_wake_queue(dev);
1235 static void sis190_set_speed_auto(struct net_device *dev)
1237 struct sis190_private *tp = netdev_priv(dev);
1238 void __iomem *ioaddr = tp->mmio_addr;
1241 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1243 val = mdio_read(ioaddr, MII_ADVERTISE);
1245 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1247 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1248 ADVERTISE_100FULL | ADVERTISE_10FULL |
1249 ADVERTISE_100HALF | ADVERTISE_10HALF);
1251 // Enable 1000 Full Mode.
1252 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1254 // Enable auto-negotiation and restart auto-negotiation.
1255 mdio_write(ioaddr, MII_BMCR,
1256 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1259 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1261 struct sis190_private *tp = netdev_priv(dev);
1263 return mii_ethtool_gset(&tp->mii_if, cmd);
1266 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1268 struct sis190_private *tp = netdev_priv(dev);
1270 return mii_ethtool_sset(&tp->mii_if, cmd);
1273 static void sis190_get_drvinfo(struct net_device *dev,
1274 struct ethtool_drvinfo *info)
1276 struct sis190_private *tp = netdev_priv(dev);
1278 strcpy(info->driver, DRV_NAME);
1279 strcpy(info->version, DRV_VERSION);
1280 strcpy(info->bus_info, pci_name(tp->pci_dev));
1283 static int sis190_get_regs_len(struct net_device *dev)
1285 return SIS190_REGS_SIZE;
1288 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1291 struct sis190_private *tp = netdev_priv(dev);
1292 unsigned long flags;
1294 if (regs->len > SIS190_REGS_SIZE)
1295 regs->len = SIS190_REGS_SIZE;
1297 spin_lock_irqsave(&tp->lock, flags);
1298 memcpy_fromio(p, tp->mmio_addr, regs->len);
1299 spin_unlock_irqrestore(&tp->lock, flags);
1302 static int sis190_nway_reset(struct net_device *dev)
1304 struct sis190_private *tp = netdev_priv(dev);
1306 return mii_nway_restart(&tp->mii_if);
1309 static u32 sis190_get_msglevel(struct net_device *dev)
1311 struct sis190_private *tp = netdev_priv(dev);
1313 return tp->msg_enable;
1316 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1318 struct sis190_private *tp = netdev_priv(dev);
1320 tp->msg_enable = value;
1323 static struct ethtool_ops sis190_ethtool_ops = {
1324 .get_settings = sis190_get_settings,
1325 .set_settings = sis190_set_settings,
1326 .get_drvinfo = sis190_get_drvinfo,
1327 .get_regs_len = sis190_get_regs_len,
1328 .get_regs = sis190_get_regs,
1329 .get_link = ethtool_op_get_link,
1330 .get_msglevel = sis190_get_msglevel,
1331 .set_msglevel = sis190_set_msglevel,
1332 .nway_reset = sis190_nway_reset,
1335 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1337 struct sis190_private *tp = netdev_priv(dev);
1339 return !netif_running(dev) ? -EINVAL :
1340 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1343 static int __devinit sis190_init_one(struct pci_dev *pdev,
1344 const struct pci_device_id *ent)
1346 static int printed_version = 0;
1347 struct sis190_private *tp;
1348 struct net_device *dev;
1349 void __iomem *ioaddr;
1352 if (!printed_version) {
1353 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1354 printed_version = 1;
1357 dev = sis190_init_board(pdev);
1363 tp = netdev_priv(dev);
1364 ioaddr = tp->mmio_addr;
1366 /* Get MAC address */
1367 /* Read node address from the EEPROM */
1369 if (SIS_R32(ROMControl) & 0x4) {
1370 for (i = 0; i < 3; i++) {
1371 SIS_W16(RxMacAddr + 2*i,
1372 sis190_read_eeprom(ioaddr, 3 + i));
1376 for (i = 0; i < MAC_ADDR_LEN; i++)
1377 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1379 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1381 dev->open = sis190_open;
1382 dev->stop = sis190_close;
1383 dev->do_ioctl = sis190_ioctl;
1384 dev->get_stats = sis190_get_stats;
1385 dev->tx_timeout = sis190_tx_timeout;
1386 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1387 dev->hard_start_xmit = sis190_start_xmit;
1388 #ifdef CONFIG_NET_POLL_CONTROLLER
1389 dev->poll_controller = sis190_netpoll;
1391 dev->set_multicast_list = sis190_set_rx_mode;
1392 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1393 dev->irq = pdev->irq;
1394 dev->base_addr = (unsigned long) 0xdead;
1396 spin_lock_init(&tp->lock);
1397 rc = register_netdev(dev);
1399 sis190_release_board(pdev);
1403 pci_set_drvdata(pdev, dev);
1405 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1406 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1407 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1409 dev->dev_addr[0], dev->dev_addr[1],
1410 dev->dev_addr[2], dev->dev_addr[3],
1411 dev->dev_addr[4], dev->dev_addr[5]);
1413 netif_carrier_off(dev);
1415 sis190_set_speed_auto(dev);
1420 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1422 struct net_device *dev = pci_get_drvdata(pdev);
1424 unregister_netdev(dev);
1425 sis190_release_board(pdev);
1426 pci_set_drvdata(pdev, NULL);
1429 static struct pci_driver sis190_pci_driver = {
1431 .id_table = sis190_pci_tbl,
1432 .probe = sis190_init_one,
1433 .remove = __devexit_p(sis190_remove_one),
1436 static int __init sis190_init_module(void)
1438 return pci_module_init(&sis190_pci_driver);
1441 static void __exit sis190_cleanup_module(void)
1443 pci_unregister_driver(&sis190_pci_driver);
1446 module_init(sis190_init_module);
1447 module_exit(sis190_cleanup_module);