2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
17 See the file COPYING in this distribution for more information.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netdevice.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/etherdevice.h>
26 #include <linux/ethtool.h>
27 #include <linux/pci.h>
28 #include <linux/mii.h>
29 #include <linux/delay.h>
30 #include <linux/crc32.h>
31 #include <linux/dma-mapping.h>
34 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
45 #ifdef CONFIG_SIS190_NAPI
46 #define NAPI_SUFFIX "-NAPI"
48 #define NAPI_SUFFIX ""
51 #define DRV_VERSION "1.2" NAPI_SUFFIX
52 #define DRV_NAME "sis190"
53 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
54 #define PFX DRV_NAME ": "
56 #ifdef CONFIG_SIS190_NAPI
57 #define sis190_rx_skb netif_receive_skb
58 #define sis190_rx_quota(count, quota) min(count, quota)
60 #define sis190_rx_skb netif_rx
61 #define sis190_rx_quota(count, quota) count
64 #define MAC_ADDR_LEN 6
66 #define NUM_TX_DESC 64
67 #define NUM_RX_DESC 64
68 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
69 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
70 #define RX_BUF_SIZE 1536
72 #define SIS190_REGS_SIZE 0x80
73 #define SIS190_TX_TIMEOUT (6*HZ)
74 #define SIS190_PHY_TIMEOUT (10*HZ)
75 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
79 /* Enhanced PHY access register bit definitions */
80 #define EhnMIIread 0x0000
81 #define EhnMIIwrite 0x0020
82 #define EhnMIIdataShift 16
83 #define EhnMIIpmdShift 6 /* 7016 only */
84 #define EhnMIIregShift 11
85 #define EhnMIIreq 0x0010
86 #define EhnMIInotDone 0x0010
88 /* Write/read MMIO register */
89 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
90 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
91 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
92 #define SIS_R8(reg) readb (ioaddr + (reg))
93 #define SIS_R16(reg) readw (ioaddr + (reg))
94 #define SIS_R32(reg) readl (ioaddr + (reg))
96 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
98 enum sis190_registers {
100 TxDescStartAddr = 0x04,
101 TxNextDescAddr = 0x0c, // unused
103 RxDescStartAddr = 0x14,
104 RxNextDescAddr = 0x1c, // unused
108 IntrTimer = 0x2c, // unused
109 PMControl = 0x30, // unused
112 StationControl = 0x40,
118 // Undocumented = 0x6c,
120 // Undocumented = 0x74,
121 RxMPSControl = 0x78, // unused
124 enum sis190_register_content {
126 SoftInt = 0x40000000, // unused
127 Timeup = 0x20000000, // unused
128 PauseFrame = 0x00080000, // unused
129 MagicPacket = 0x00040000, // unused
130 WakeupFrame = 0x00020000, // unused
131 LinkChange = 0x00010000,
132 RxQEmpty = 0x00000080,
134 TxQ1Empty = 0x00000020, // unused
135 TxQ1Int = 0x00000010,
136 TxQ0Empty = 0x00000008, // unused
137 TxQ0Int = 0x00000004,
142 RxRES = 0x00200000, // unused
144 RxRUNT = 0x00100000, // unused
145 RxRWT = 0x00400000, // unused
149 CmdRxEnb = 0x08, // unused
151 RxBufEmpty = 0x01, // unused
154 Cfg9346_Lock = 0x00, // unused
155 Cfg9346_Unlock = 0xc0, // unused
158 AcceptErr = 0x20, // unused
159 AcceptRunt = 0x10, // unused
160 AcceptBroadcast = 0x0800,
161 AcceptMulticast = 0x0400,
162 AcceptMyPhys = 0x0200,
163 AcceptAllPhys = 0x0100,
167 RxCfgDMAShift = 8, // 0x1a in RxControl ?
170 TxInterFrameGapShift = 24,
171 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181 LinkStatus = 0x02, // unused
182 FullDup = 0x01, // unused
185 TBILinkOK = 0x02000000, // unused
202 enum _DescStatusBit {
212 RxSizeMask = 0x0000ffff
215 struct sis190_private {
216 void __iomem *mmio_addr;
217 struct pci_dev *pci_dev;
218 struct net_device_stats stats;
227 struct RxDesc *RxDescRing;
228 struct TxDesc *TxDescRing;
229 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
230 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
231 struct work_struct phy_task;
232 struct timer_list timer;
234 struct mii_if_info mii_if;
237 const static struct {
239 u8 version; /* depend on docs */
240 u32 RxConfigMask; /* clear the bits supported by this chip */
241 } sis_chip_info[] = {
242 { DRV_NAME, 0x00, 0xff7e1880, },
245 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
246 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
250 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
252 static int rx_copybreak = 200;
258 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
259 module_param(rx_copybreak, int, 0);
260 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
261 module_param_named(debug, debug.msg_enable, int, 0);
262 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
263 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
264 MODULE_VERSION(DRV_VERSION);
265 MODULE_LICENSE("GPL");
267 static const u32 sis190_intr_mask =
268 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
271 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
272 * The chips use a 64 element hash table based on the Ethernet CRC.
274 static int multicast_filter_limit = 32;
276 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
280 SIS_W32(GMIIControl, ctl);
284 for (i = 0; i < 100; i++) {
285 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
291 printk(KERN_ERR PFX "PHY command failed !\n");
294 static void mdio_write(void __iomem *ioaddr, int reg, int val)
298 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
299 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
300 (((u32) val) << EhnMIIdataShift));
303 static int mdio_read(void __iomem *ioaddr, int reg)
307 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
308 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
310 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
313 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
315 struct sis190_private *tp = netdev_priv(dev);
317 mdio_write(tp->mmio_addr, reg, val);
320 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
322 struct sis190_private *tp = netdev_priv(dev);
324 return mdio_read(tp->mmio_addr, reg);
327 static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
333 if (!(SIS_R32(ROMControl) & 0x0002))
336 val = (0x0080 | (0x2 << 8) | (reg << 10));
338 SIS_W32(ROMInterface, val);
340 for (i = 0; i < 200; i++) {
341 if (!(SIS_R32(ROMInterface) & 0x0080))
346 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
351 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
353 SIS_W32(IntrMask, 0x00);
354 SIS_W32(IntrStatus, 0xffffffff);
358 static void sis190_asic_down(void __iomem *ioaddr)
360 /* Stop the chip's Tx and Rx DMA processes. */
362 SIS_W32(TxControl, 0x1a00);
363 SIS_W32(RxControl, 0x1a00);
365 sis190_irq_mask_and_ack(ioaddr);
368 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
370 desc->size |= cpu_to_le32(RingEnd);
373 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
375 u32 eor = le32_to_cpu(desc->size) & RingEnd;
378 desc->size = cpu_to_le32(rx_buf_sz | eor);
380 desc->status = cpu_to_le32(OWNbit | INTbit);
383 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
386 desc->addr = cpu_to_le32(mapping);
387 sis190_give_to_asic(desc, rx_buf_sz);
390 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
393 desc->addr = 0xdeadbeef;
394 desc->size &= cpu_to_le32(RingEnd);
399 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
400 struct RxDesc *desc, u32 rx_buf_sz)
406 skb = dev_alloc_skb(rx_buf_sz);
412 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
415 sis190_map_to_asic(desc, mapping, rx_buf_sz);
421 sis190_make_unusable_by_asic(desc);
425 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
430 for (cur = start; cur < end; cur++) {
431 int ret, i = cur % NUM_RX_DESC;
433 if (tp->Rx_skbuff[i])
436 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
437 tp->RxDescRing + i, tp->rx_buf_sz);
444 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
445 struct RxDesc *desc, int rx_buf_sz)
449 if (pkt_size < rx_copybreak) {
452 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
454 skb_reserve(skb, NET_IP_ALIGN);
455 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
457 sis190_give_to_asic(desc, rx_buf_sz);
464 static int sis190_rx_interrupt(struct net_device *dev,
465 struct sis190_private *tp, void __iomem *ioaddr)
467 struct net_device_stats *stats = &tp->stats;
468 u32 rx_left, cur_rx = tp->cur_rx;
471 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
472 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
474 for (; rx_left > 0; rx_left--, cur_rx++) {
475 unsigned int entry = cur_rx % NUM_RX_DESC;
476 struct RxDesc *desc = tp->RxDescRing + entry;
479 if (desc->status & OWNbit)
482 status = le32_to_cpu(desc->PSize);
484 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
487 if (status & RxCRC) {
488 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
491 stats->rx_crc_errors++;
492 sis190_give_to_asic(desc, tp->rx_buf_sz);
493 } else if (!(status & PADbit)) {
494 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
497 stats->rx_length_errors++;
498 sis190_give_to_asic(desc, tp->rx_buf_sz);
500 struct sk_buff *skb = tp->Rx_skbuff[entry];
501 int pkt_size = (status & RxSizeMask) - 4;
502 void (*pci_action)(struct pci_dev *, dma_addr_t,
503 size_t, int) = pci_dma_sync_single_for_device;
505 if (unlikely(pkt_size > tp->rx_buf_sz)) {
506 net_intr(tp, KERN_INFO
507 "%s: (frag) status = %08x.\n",
510 stats->rx_length_errors++;
511 sis190_give_to_asic(desc, tp->rx_buf_sz);
515 pci_dma_sync_single_for_cpu(tp->pci_dev,
516 le32_to_cpu(desc->addr), tp->rx_buf_sz,
519 if (sis190_try_rx_copy(&skb, pkt_size, desc,
521 pci_action = pci_unmap_single;
522 tp->Rx_skbuff[entry] = NULL;
523 sis190_make_unusable_by_asic(desc);
526 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
527 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
530 skb_put(skb, pkt_size);
531 skb->protocol = eth_type_trans(skb, dev);
535 dev->last_rx = jiffies;
536 stats->rx_bytes += pkt_size;
540 count = cur_rx - tp->cur_rx;
543 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
544 if (!delta && count && netif_msg_intr(tp))
545 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
546 tp->dirty_rx += delta;
548 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
549 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
554 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
559 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
561 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
563 memset(desc, 0x00, sizeof(*desc));
566 static void sis190_tx_interrupt(struct net_device *dev,
567 struct sis190_private *tp, void __iomem *ioaddr)
569 u32 pending, dirty_tx = tp->dirty_tx;
571 * It would not be needed if queueing was allowed to be enabled
572 * again too early (hint: think preempt and unclocked smp systems).
574 unsigned int queue_stopped;
577 pending = tp->cur_tx - dirty_tx;
578 queue_stopped = (pending == NUM_TX_DESC);
580 for (; pending; pending--, dirty_tx++) {
581 unsigned int entry = dirty_tx % NUM_TX_DESC;
582 struct TxDesc *txd = tp->TxDescRing + entry;
585 if (le32_to_cpu(txd->status) & OWNbit)
588 skb = tp->Tx_skbuff[entry];
590 tp->stats.tx_packets++;
591 tp->stats.tx_bytes += skb->len;
593 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
594 tp->Tx_skbuff[entry] = NULL;
595 dev_kfree_skb_irq(skb);
598 if (tp->dirty_tx != dirty_tx) {
599 tp->dirty_tx = dirty_tx;
602 netif_wake_queue(dev);
607 * The interrupt handler does all of the Rx thread work and cleans up after
610 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
612 struct net_device *dev = __dev;
613 struct sis190_private *tp = netdev_priv(dev);
614 void __iomem *ioaddr = tp->mmio_addr;
615 unsigned int handled = 0;
618 status = SIS_R32(IntrStatus);
620 if ((status == 0xffffffff) || !status)
625 if (unlikely(!netif_running(dev))) {
626 sis190_asic_down(ioaddr);
630 SIS_W32(IntrStatus, status);
632 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
634 if (status & LinkChange) {
635 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
636 schedule_work(&tp->phy_task);
640 sis190_rx_interrupt(dev, tp, ioaddr);
642 if (status & TxQ0Int)
643 sis190_tx_interrupt(dev, tp, ioaddr);
645 return IRQ_RETVAL(handled);
648 #ifdef CONFIG_NET_POLL_CONTROLLER
649 static void sis190_netpoll(struct net_device *dev)
651 struct sis190_private *tp = netdev_priv(dev);
652 struct pci_dev *pdev = tp->pci_dev;
654 disable_irq(pdev->irq);
655 sis190_interrupt(pdev->irq, dev, NULL);
656 enable_irq(pdev->irq);
660 static void sis190_free_rx_skb(struct sis190_private *tp,
661 struct sk_buff **sk_buff, struct RxDesc *desc)
663 struct pci_dev *pdev = tp->pci_dev;
665 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
667 dev_kfree_skb(*sk_buff);
669 sis190_make_unusable_by_asic(desc);
672 static void sis190_rx_clear(struct sis190_private *tp)
676 for (i = 0; i < NUM_RX_DESC; i++) {
677 if (!tp->Rx_skbuff[i])
679 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
683 static void sis190_init_ring_indexes(struct sis190_private *tp)
685 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
688 static int sis190_init_ring(struct net_device *dev)
690 struct sis190_private *tp = netdev_priv(dev);
692 sis190_init_ring_indexes(tp);
694 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
695 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
697 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
700 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
709 static void sis190_set_rx_mode(struct net_device *dev)
711 struct sis190_private *tp = netdev_priv(dev);
712 void __iomem *ioaddr = tp->mmio_addr;
714 u32 mc_filter[2]; /* Multicast hash filter */
717 if (dev->flags & IFF_PROMISC) {
718 /* Unconditionally log net taps. */
719 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
722 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
724 mc_filter[1] = mc_filter[0] = 0xffffffff;
725 } else if ((dev->mc_count > multicast_filter_limit) ||
726 (dev->flags & IFF_ALLMULTI)) {
727 /* Too many to filter perfectly -- accept all multicasts. */
728 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
729 mc_filter[1] = mc_filter[0] = 0xffffffff;
731 struct dev_mc_list *mclist;
734 rx_mode = AcceptBroadcast | AcceptMyPhys;
735 mc_filter[1] = mc_filter[0] = 0;
736 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
737 i++, mclist = mclist->next) {
739 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
740 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
741 rx_mode |= AcceptMulticast;
745 spin_lock_irqsave(&tp->lock, flags);
747 SIS_W16(RxMacControl, rx_mode | 0x2);
748 SIS_W32(RxHashTable, mc_filter[0]);
749 SIS_W32(RxHashTable + 4, mc_filter[1]);
751 spin_unlock_irqrestore(&tp->lock, flags);
754 static void sis190_soft_reset(void __iomem *ioaddr)
756 SIS_W32(IntrControl, 0x8000);
759 SIS_W32(IntrControl, 0x0);
760 sis190_asic_down(ioaddr);
764 static void sis190_hw_start(struct net_device *dev)
766 struct sis190_private *tp = netdev_priv(dev);
767 void __iomem *ioaddr = tp->mmio_addr;
769 sis190_soft_reset(ioaddr);
771 SIS_W32(TxDescStartAddr, tp->tx_dma);
772 SIS_W32(RxDescStartAddr, tp->rx_dma);
774 SIS_W32(IntrStatus, 0xffffffff);
775 SIS_W32(IntrMask, 0x0);
777 * Default is 100Mbps.
778 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
780 SIS_W16(StationControl, 0x1901);
781 SIS_W32(GMIIControl, 0x0);
782 SIS_W32(TxMacControl, 0x60);
783 SIS_W16(RxMacControl, 0x02);
784 SIS_W32(RxHashTable, 0x0);
786 SIS_W32(RxWakeOnLan, 0x0);
791 sis190_set_rx_mode(dev);
793 /* Enable all known interrupts by setting the interrupt mask. */
794 SIS_W32(IntrMask, sis190_intr_mask);
796 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
797 SIS_W32(RxControl, 0x1a1d);
799 netif_start_queue(dev);
802 static void sis190_phy_task(void * data)
804 struct net_device *dev = data;
805 struct sis190_private *tp = netdev_priv(dev);
806 void __iomem *ioaddr = tp->mmio_addr;
811 val = mdio_read(ioaddr, MII_BMCR);
812 if (val & BMCR_RESET) {
813 // FIXME: needlessly high ? -- FR 02/07/2005
814 mod_timer(&tp->timer, jiffies + HZ/10);
815 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
816 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
818 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
819 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
827 { LPA_1000XFULL | LPA_SLCT,
828 "1000 Mbps Full Duplex",
830 { LPA_1000XHALF | LPA_SLCT,
831 "1000 Mbps Half Duplex",
834 "100 Mbps Full Duplex",
837 "100 Mbps Half Duplex",
840 "10 Mbps Full Duplex",
843 "10 Mbps Half Duplex",
845 { 0, "unknown", 0x0000 }
848 val = mdio_read(ioaddr, 0x1f);
849 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
851 val = mdio_read(ioaddr, MII_LPA);
852 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
854 for (p = reg31; p->ctl; p++) {
855 if ((val & p->val) == p->val)
859 SIS_W16(StationControl, p->ctl);
860 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
862 netif_carrier_on(dev);
868 static void sis190_phy_timer(unsigned long __opaque)
870 struct net_device *dev = (struct net_device *)__opaque;
871 struct sis190_private *tp = netdev_priv(dev);
873 if (likely(netif_running(dev)))
874 schedule_work(&tp->phy_task);
877 static inline void sis190_delete_timer(struct net_device *dev)
879 struct sis190_private *tp = netdev_priv(dev);
881 del_timer_sync(&tp->timer);
884 static inline void sis190_request_timer(struct net_device *dev)
886 struct sis190_private *tp = netdev_priv(dev);
887 struct timer_list *timer = &tp->timer;
890 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
891 timer->data = (unsigned long)dev;
892 timer->function = sis190_phy_timer;
896 static void sis190_set_rxbufsize(struct sis190_private *tp,
897 struct net_device *dev)
899 unsigned int mtu = dev->mtu;
901 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
904 static int sis190_open(struct net_device *dev)
906 struct sis190_private *tp = netdev_priv(dev);
907 struct pci_dev *pdev = tp->pci_dev;
910 sis190_set_rxbufsize(tp, dev);
913 * Rx and Tx descriptors need 256 bytes alignment.
914 * pci_alloc_consistent() guarantees a stronger alignment.
916 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
920 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
924 rc = sis190_init_ring(dev);
928 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
930 sis190_request_timer(dev);
932 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
934 goto err_release_timer_2;
936 sis190_hw_start(dev);
941 sis190_delete_timer(dev);
944 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
947 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
952 static void sis190_tx_clear(struct sis190_private *tp)
956 for (i = 0; i < NUM_TX_DESC; i++) {
957 struct sk_buff *skb = tp->Tx_skbuff[i];
962 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
963 tp->Tx_skbuff[i] = NULL;
966 tp->stats.tx_dropped++;
968 tp->cur_tx = tp->dirty_tx = 0;
971 static void sis190_down(struct net_device *dev)
973 struct sis190_private *tp = netdev_priv(dev);
974 void __iomem *ioaddr = tp->mmio_addr;
975 unsigned int poll_locked = 0;
977 sis190_delete_timer(dev);
979 netif_stop_queue(dev);
981 flush_scheduled_work();
984 spin_lock_irq(&tp->lock);
986 sis190_asic_down(ioaddr);
988 spin_unlock_irq(&tp->lock);
990 synchronize_irq(dev->irq);
993 netif_poll_disable(dev);
999 } while (SIS_R32(IntrMask));
1001 sis190_tx_clear(tp);
1002 sis190_rx_clear(tp);
1005 static int sis190_close(struct net_device *dev)
1007 struct sis190_private *tp = netdev_priv(dev);
1008 struct pci_dev *pdev = tp->pci_dev;
1012 free_irq(dev->irq, dev);
1014 netif_poll_enable(dev);
1016 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1017 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1019 tp->TxDescRing = NULL;
1020 tp->RxDescRing = NULL;
1025 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 struct sis190_private *tp = netdev_priv(dev);
1028 void __iomem *ioaddr = tp->mmio_addr;
1029 u32 len, entry, dirty_tx;
1030 struct TxDesc *desc;
1033 if (unlikely(skb->len < ETH_ZLEN)) {
1034 skb = skb_padto(skb, ETH_ZLEN);
1036 tp->stats.tx_dropped++;
1044 entry = tp->cur_tx % NUM_TX_DESC;
1045 desc = tp->TxDescRing + entry;
1047 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1048 netif_stop_queue(dev);
1049 net_tx_err(tp, KERN_ERR PFX
1050 "%s: BUG! Tx Ring full when queue awake!\n",
1052 return NETDEV_TX_BUSY;
1055 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1057 tp->Tx_skbuff[entry] = skb;
1059 desc->PSize = cpu_to_le32(len);
1060 desc->addr = cpu_to_le32(mapping);
1062 desc->size = cpu_to_le32(len);
1063 if (entry == (NUM_TX_DESC - 1))
1064 desc->size |= cpu_to_le32(RingEnd);
1068 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1074 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1076 dev->trans_start = jiffies;
1078 dirty_tx = tp->dirty_tx;
1079 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1080 netif_stop_queue(dev);
1082 if (dirty_tx != tp->dirty_tx)
1083 netif_wake_queue(dev);
1086 return NETDEV_TX_OK;
1089 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1091 struct sis190_private *tp = netdev_priv(dev);
1096 static void sis190_release_board(struct pci_dev *pdev)
1098 struct net_device *dev = pci_get_drvdata(pdev);
1099 struct sis190_private *tp = netdev_priv(dev);
1101 iounmap(tp->mmio_addr);
1102 pci_release_regions(pdev);
1103 pci_disable_device(pdev);
1107 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1109 struct sis190_private *tp;
1110 struct net_device *dev;
1111 void __iomem *ioaddr;
1114 dev = alloc_etherdev(sizeof(*tp));
1116 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1121 SET_MODULE_OWNER(dev);
1122 SET_NETDEV_DEV(dev, &pdev->dev);
1124 tp = netdev_priv(dev);
1125 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1127 rc = pci_enable_device(pdev);
1129 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1130 goto err_free_dev_1;
1135 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1136 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1138 goto err_pci_disable_2;
1140 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1141 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1143 goto err_pci_disable_2;
1146 rc = pci_request_regions(pdev, DRV_NAME);
1148 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1150 goto err_pci_disable_2;
1153 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1155 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1157 goto err_free_res_3;
1160 pci_set_master(pdev);
1162 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1164 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1167 goto err_free_res_3;
1171 tp->mmio_addr = ioaddr;
1173 tp->mii_if.dev = dev;
1174 tp->mii_if.mdio_read = __mdio_read;
1175 tp->mii_if.mdio_write = __mdio_write;
1176 // tp->mii_if.phy_id = XXX;
1177 tp->mii_if.phy_id_mask = 0x1f;
1178 tp->mii_if.reg_num_mask = 0x1f;
1180 sis190_irq_mask_and_ack(ioaddr);
1182 sis190_soft_reset(ioaddr);
1187 pci_release_regions(pdev);
1189 pci_disable_device(pdev);
1197 static void sis190_tx_timeout(struct net_device *dev)
1199 struct sis190_private *tp = netdev_priv(dev);
1200 void __iomem *ioaddr = tp->mmio_addr;
1203 /* Disable Tx, if not already */
1204 tmp8 = SIS_R8(TxControl);
1205 if (tmp8 & CmdTxEnb)
1206 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1208 /* Disable interrupts by clearing the interrupt mask. */
1209 SIS_W32(IntrMask, 0x0000);
1211 /* Stop a shared interrupt from scavenging while we are. */
1212 spin_lock_irq(&tp->lock);
1213 sis190_tx_clear(tp);
1214 spin_unlock_irq(&tp->lock);
1216 /* ...and finally, reset everything. */
1217 sis190_hw_start(dev);
1219 netif_wake_queue(dev);
1222 static void sis190_set_speed_auto(struct net_device *dev)
1224 struct sis190_private *tp = netdev_priv(dev);
1225 void __iomem *ioaddr = tp->mmio_addr;
1228 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1230 val = mdio_read(ioaddr, MII_ADVERTISE);
1232 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1234 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1235 ADVERTISE_100FULL | ADVERTISE_10FULL |
1236 ADVERTISE_100HALF | ADVERTISE_10HALF);
1238 // Enable 1000 Full Mode.
1239 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1241 // Enable auto-negotiation and restart auto-negotiation.
1242 mdio_write(ioaddr, MII_BMCR,
1243 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1246 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1248 struct sis190_private *tp = netdev_priv(dev);
1250 return mii_ethtool_gset(&tp->mii_if, cmd);
1253 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1255 struct sis190_private *tp = netdev_priv(dev);
1257 return mii_ethtool_sset(&tp->mii_if, cmd);
1260 static void sis190_get_drvinfo(struct net_device *dev,
1261 struct ethtool_drvinfo *info)
1263 struct sis190_private *tp = netdev_priv(dev);
1265 strcpy(info->driver, DRV_NAME);
1266 strcpy(info->version, DRV_VERSION);
1267 strcpy(info->bus_info, pci_name(tp->pci_dev));
1270 static int sis190_get_regs_len(struct net_device *dev)
1272 return SIS190_REGS_SIZE;
1275 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1278 struct sis190_private *tp = netdev_priv(dev);
1279 unsigned long flags;
1281 if (regs->len > SIS190_REGS_SIZE)
1282 regs->len = SIS190_REGS_SIZE;
1284 spin_lock_irqsave(&tp->lock, flags);
1285 memcpy_fromio(p, tp->mmio_addr, regs->len);
1286 spin_unlock_irqrestore(&tp->lock, flags);
1289 static int sis190_nway_reset(struct net_device *dev)
1291 struct sis190_private *tp = netdev_priv(dev);
1293 return mii_nway_restart(&tp->mii_if);
1296 static u32 sis190_get_msglevel(struct net_device *dev)
1298 struct sis190_private *tp = netdev_priv(dev);
1300 return tp->msg_enable;
1303 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1305 struct sis190_private *tp = netdev_priv(dev);
1307 tp->msg_enable = value;
1310 static struct ethtool_ops sis190_ethtool_ops = {
1311 .get_settings = sis190_get_settings,
1312 .set_settings = sis190_set_settings,
1313 .get_drvinfo = sis190_get_drvinfo,
1314 .get_regs_len = sis190_get_regs_len,
1315 .get_regs = sis190_get_regs,
1316 .get_link = ethtool_op_get_link,
1317 .get_msglevel = sis190_get_msglevel,
1318 .set_msglevel = sis190_set_msglevel,
1319 .nway_reset = sis190_nway_reset,
1322 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1324 struct sis190_private *tp = netdev_priv(dev);
1326 return !netif_running(dev) ? -EINVAL :
1327 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1330 static int __devinit sis190_init_one(struct pci_dev *pdev,
1331 const struct pci_device_id *ent)
1333 static int printed_version = 0;
1334 struct sis190_private *tp;
1335 struct net_device *dev;
1336 void __iomem *ioaddr;
1339 if (!printed_version) {
1340 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1341 printed_version = 1;
1344 dev = sis190_init_board(pdev);
1350 tp = netdev_priv(dev);
1351 ioaddr = tp->mmio_addr;
1353 /* Get MAC address */
1354 /* Read node address from the EEPROM */
1356 if (SIS_R32(ROMControl) & 0x4) {
1357 for (i = 0; i < 3; i++) {
1358 SIS_W16(RxMacAddr + 2*i,
1359 sis190_read_eeprom(ioaddr, 3 + i));
1363 for (i = 0; i < MAC_ADDR_LEN; i++)
1364 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1366 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1368 dev->open = sis190_open;
1369 dev->stop = sis190_close;
1370 dev->do_ioctl = sis190_ioctl;
1371 dev->get_stats = sis190_get_stats;
1372 dev->tx_timeout = sis190_tx_timeout;
1373 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1374 dev->hard_start_xmit = sis190_start_xmit;
1375 #ifdef CONFIG_NET_POLL_CONTROLLER
1376 dev->poll_controller = sis190_netpoll;
1378 dev->set_multicast_list = sis190_set_rx_mode;
1379 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1380 dev->irq = pdev->irq;
1381 dev->base_addr = (unsigned long) 0xdead;
1383 spin_lock_init(&tp->lock);
1384 rc = register_netdev(dev);
1386 sis190_release_board(pdev);
1390 pci_set_drvdata(pdev, dev);
1392 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1393 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1394 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1396 dev->dev_addr[0], dev->dev_addr[1],
1397 dev->dev_addr[2], dev->dev_addr[3],
1398 dev->dev_addr[4], dev->dev_addr[5]);
1400 netif_carrier_off(dev);
1402 sis190_set_speed_auto(dev);
1407 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1409 struct net_device *dev = pci_get_drvdata(pdev);
1411 unregister_netdev(dev);
1412 sis190_release_board(pdev);
1413 pci_set_drvdata(pdev, NULL);
1416 static struct pci_driver sis190_pci_driver = {
1418 .id_table = sis190_pci_tbl,
1419 .probe = sis190_init_one,
1420 .remove = __devexit_p(sis190_remove_one),
1423 static int __init sis190_init_module(void)
1425 return pci_module_init(&sis190_pci_driver);
1428 static void __exit sis190_cleanup_module(void)
1430 pci_unregister_driver(&sis190_pci_driver);
1433 module_init(sis190_init_module);
1434 module_exit(sis190_cleanup_module);