2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
53 #define NAPI_SUFFIX ""
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
69 #define MAC_ADDR_LEN 6
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
104 enum sis190_registers {
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
121 StationControl = 0x40,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
132 // Undocumented = 0x6c,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
139 enum sis190_register_content {
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
158 CmdRxEnb = 0x08, // unused
160 RxBufEmpty = 0x01, // unused
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
186 TBILinkOK = 0x02000000, // unused
203 enum _DescStatusBit {
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
210 RingEnd = 0x80000000,
212 LSEN = 0x08000000, // TSO ? -- FR
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
248 RxSizeMask = 0x0000ffff
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
263 EEWOP = 0x00000100 // unused
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
274 enum sis190_feature {
279 struct sis190_private {
280 void __iomem *mmio_addr;
281 struct pci_dev *pci_dev;
282 struct net_device_stats stats;
291 struct RxDesc *RxDescRing;
292 struct TxDesc *TxDescRing;
293 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
294 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
295 struct work_struct phy_task;
296 struct timer_list timer;
298 struct mii_if_info mii_if;
299 struct list_head first_phy;
304 struct list_head list;
311 enum sis190_phy_type {
318 static struct mii_chip_info {
323 } mii_chip_table[] = {
324 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, 0 },
325 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
326 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
327 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
331 const static struct {
333 u8 version; /* depend on docs */
334 u32 RxConfigMask; /* clear the bits supported by this chip */
335 } sis_chip_info[] = {
336 { DRV_NAME, 0x00, 0xff7e1880, },
339 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
340 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
344 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
346 static int rx_copybreak = 200;
352 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
353 module_param(rx_copybreak, int, 0);
354 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
355 module_param_named(debug, debug.msg_enable, int, 0);
356 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
357 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
358 MODULE_VERSION(DRV_VERSION);
359 MODULE_LICENSE("GPL");
361 static const u32 sis190_intr_mask =
362 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
365 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
366 * The chips use a 64 element hash table based on the Ethernet CRC.
368 static int multicast_filter_limit = 32;
370 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
374 SIS_W32(GMIIControl, ctl);
378 for (i = 0; i < 100; i++) {
379 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
385 printk(KERN_ERR PFX "PHY command failed !\n");
388 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
390 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
391 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
392 (((u32) val) << EhnMIIdataShift));
395 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
397 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
398 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
400 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
403 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
405 struct sis190_private *tp = netdev_priv(dev);
407 mdio_write(tp->mmio_addr, phy_id, reg, val);
410 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
412 struct sis190_private *tp = netdev_priv(dev);
414 return mdio_read(tp->mmio_addr, phy_id, reg);
417 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
419 mdio_read(ioaddr, phy_id, reg);
420 return mdio_read(ioaddr, phy_id, reg);
423 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
428 if (!(SIS_R32(ROMControl) & 0x0002))
431 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
433 for (i = 0; i < 200; i++) {
434 if (!(SIS_R32(ROMInterface) & EEREQ)) {
435 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
444 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
446 SIS_W32(IntrMask, 0x00);
447 SIS_W32(IntrStatus, 0xffffffff);
451 static void sis190_asic_down(void __iomem *ioaddr)
453 /* Stop the chip's Tx and Rx DMA processes. */
455 SIS_W32(TxControl, 0x1a00);
456 SIS_W32(RxControl, 0x1a00);
458 sis190_irq_mask_and_ack(ioaddr);
461 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
463 desc->size |= cpu_to_le32(RingEnd);
466 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
468 u32 eor = le32_to_cpu(desc->size) & RingEnd;
471 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 desc->status = cpu_to_le32(OWNbit | INTbit);
476 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
479 desc->addr = cpu_to_le32(mapping);
480 sis190_give_to_asic(desc, rx_buf_sz);
483 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
486 desc->addr = 0xdeadbeef;
487 desc->size &= cpu_to_le32(RingEnd);
492 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
493 struct RxDesc *desc, u32 rx_buf_sz)
499 skb = dev_alloc_skb(rx_buf_sz);
505 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
508 sis190_map_to_asic(desc, mapping, rx_buf_sz);
514 sis190_make_unusable_by_asic(desc);
518 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
523 for (cur = start; cur < end; cur++) {
524 int ret, i = cur % NUM_RX_DESC;
526 if (tp->Rx_skbuff[i])
529 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
530 tp->RxDescRing + i, tp->rx_buf_sz);
537 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
538 struct RxDesc *desc, int rx_buf_sz)
542 if (pkt_size < rx_copybreak) {
545 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
547 skb_reserve(skb, NET_IP_ALIGN);
548 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
550 sis190_give_to_asic(desc, rx_buf_sz);
557 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
559 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
561 if ((status & CRCOK) && !(status & ErrMask))
564 if (!(status & CRCOK))
565 stats->rx_crc_errors++;
566 else if (status & OVRUN)
567 stats->rx_over_errors++;
568 else if (status & (SHORT | LIMIT))
569 stats->rx_length_errors++;
570 else if (status & (MIIER | NIBON | COLON))
571 stats->rx_frame_errors++;
577 static int sis190_rx_interrupt(struct net_device *dev,
578 struct sis190_private *tp, void __iomem *ioaddr)
580 struct net_device_stats *stats = &tp->stats;
581 u32 rx_left, cur_rx = tp->cur_rx;
584 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
585 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
587 for (; rx_left > 0; rx_left--, cur_rx++) {
588 unsigned int entry = cur_rx % NUM_RX_DESC;
589 struct RxDesc *desc = tp->RxDescRing + entry;
592 if (desc->status & OWNbit)
595 status = le32_to_cpu(desc->PSize);
597 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
600 if (sis190_rx_pkt_err(status, stats) < 0)
601 sis190_give_to_asic(desc, tp->rx_buf_sz);
603 struct sk_buff *skb = tp->Rx_skbuff[entry];
604 int pkt_size = (status & RxSizeMask) - 4;
605 void (*pci_action)(struct pci_dev *, dma_addr_t,
606 size_t, int) = pci_dma_sync_single_for_device;
608 if (unlikely(pkt_size > tp->rx_buf_sz)) {
609 net_intr(tp, KERN_INFO
610 "%s: (frag) status = %08x.\n",
613 stats->rx_length_errors++;
614 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 pci_dma_sync_single_for_cpu(tp->pci_dev,
619 le32_to_cpu(desc->addr), tp->rx_buf_sz,
622 if (sis190_try_rx_copy(&skb, pkt_size, desc,
624 pci_action = pci_unmap_single;
625 tp->Rx_skbuff[entry] = NULL;
626 sis190_make_unusable_by_asic(desc);
629 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
630 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
633 skb_put(skb, pkt_size);
634 skb->protocol = eth_type_trans(skb, dev);
638 dev->last_rx = jiffies;
640 stats->rx_bytes += pkt_size;
641 if ((status & BCAST) == MCAST)
645 count = cur_rx - tp->cur_rx;
648 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
649 if (!delta && count && netif_msg_intr(tp))
650 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
651 tp->dirty_rx += delta;
653 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
654 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
659 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
664 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
666 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
668 memset(desc, 0x00, sizeof(*desc));
671 static void sis190_tx_interrupt(struct net_device *dev,
672 struct sis190_private *tp, void __iomem *ioaddr)
674 u32 pending, dirty_tx = tp->dirty_tx;
676 * It would not be needed if queueing was allowed to be enabled
677 * again too early (hint: think preempt and unclocked smp systems).
679 unsigned int queue_stopped;
682 pending = tp->cur_tx - dirty_tx;
683 queue_stopped = (pending == NUM_TX_DESC);
685 for (; pending; pending--, dirty_tx++) {
686 unsigned int entry = dirty_tx % NUM_TX_DESC;
687 struct TxDesc *txd = tp->TxDescRing + entry;
690 if (le32_to_cpu(txd->status) & OWNbit)
693 skb = tp->Tx_skbuff[entry];
695 tp->stats.tx_packets++;
696 tp->stats.tx_bytes += skb->len;
698 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
699 tp->Tx_skbuff[entry] = NULL;
700 dev_kfree_skb_irq(skb);
703 if (tp->dirty_tx != dirty_tx) {
704 tp->dirty_tx = dirty_tx;
707 netif_wake_queue(dev);
712 * The interrupt handler does all of the Rx thread work and cleans up after
715 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
717 struct net_device *dev = __dev;
718 struct sis190_private *tp = netdev_priv(dev);
719 void __iomem *ioaddr = tp->mmio_addr;
720 unsigned int handled = 0;
723 status = SIS_R32(IntrStatus);
725 if ((status == 0xffffffff) || !status)
730 if (unlikely(!netif_running(dev))) {
731 sis190_asic_down(ioaddr);
735 SIS_W32(IntrStatus, status);
737 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
739 if (status & LinkChange) {
740 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
741 schedule_work(&tp->phy_task);
745 sis190_rx_interrupt(dev, tp, ioaddr);
747 if (status & TxQ0Int)
748 sis190_tx_interrupt(dev, tp, ioaddr);
750 return IRQ_RETVAL(handled);
753 #ifdef CONFIG_NET_POLL_CONTROLLER
754 static void sis190_netpoll(struct net_device *dev)
756 struct sis190_private *tp = netdev_priv(dev);
757 struct pci_dev *pdev = tp->pci_dev;
759 disable_irq(pdev->irq);
760 sis190_interrupt(pdev->irq, dev, NULL);
761 enable_irq(pdev->irq);
765 static void sis190_free_rx_skb(struct sis190_private *tp,
766 struct sk_buff **sk_buff, struct RxDesc *desc)
768 struct pci_dev *pdev = tp->pci_dev;
770 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
772 dev_kfree_skb(*sk_buff);
774 sis190_make_unusable_by_asic(desc);
777 static void sis190_rx_clear(struct sis190_private *tp)
781 for (i = 0; i < NUM_RX_DESC; i++) {
782 if (!tp->Rx_skbuff[i])
784 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
788 static void sis190_init_ring_indexes(struct sis190_private *tp)
790 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
793 static int sis190_init_ring(struct net_device *dev)
795 struct sis190_private *tp = netdev_priv(dev);
797 sis190_init_ring_indexes(tp);
799 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
800 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
802 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
805 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
814 static void sis190_set_rx_mode(struct net_device *dev)
816 struct sis190_private *tp = netdev_priv(dev);
817 void __iomem *ioaddr = tp->mmio_addr;
819 u32 mc_filter[2]; /* Multicast hash filter */
822 if (dev->flags & IFF_PROMISC) {
823 /* Unconditionally log net taps. */
824 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
827 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
829 mc_filter[1] = mc_filter[0] = 0xffffffff;
830 } else if ((dev->mc_count > multicast_filter_limit) ||
831 (dev->flags & IFF_ALLMULTI)) {
832 /* Too many to filter perfectly -- accept all multicasts. */
833 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
834 mc_filter[1] = mc_filter[0] = 0xffffffff;
836 struct dev_mc_list *mclist;
839 rx_mode = AcceptBroadcast | AcceptMyPhys;
840 mc_filter[1] = mc_filter[0] = 0;
841 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
842 i++, mclist = mclist->next) {
844 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
845 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
846 rx_mode |= AcceptMulticast;
850 spin_lock_irqsave(&tp->lock, flags);
852 SIS_W16(RxMacControl, rx_mode | 0x2);
853 SIS_W32(RxHashTable, mc_filter[0]);
854 SIS_W32(RxHashTable + 4, mc_filter[1]);
856 spin_unlock_irqrestore(&tp->lock, flags);
859 static void sis190_soft_reset(void __iomem *ioaddr)
861 SIS_W32(IntrControl, 0x8000);
864 SIS_W32(IntrControl, 0x0);
865 sis190_asic_down(ioaddr);
869 static void sis190_hw_start(struct net_device *dev)
871 struct sis190_private *tp = netdev_priv(dev);
872 void __iomem *ioaddr = tp->mmio_addr;
874 sis190_soft_reset(ioaddr);
876 SIS_W32(TxDescStartAddr, tp->tx_dma);
877 SIS_W32(RxDescStartAddr, tp->rx_dma);
879 SIS_W32(IntrStatus, 0xffffffff);
880 SIS_W32(IntrMask, 0x0);
881 SIS_W32(GMIIControl, 0x0);
882 SIS_W32(TxMacControl, 0x60);
883 SIS_W16(RxMacControl, 0x02);
884 SIS_W32(RxHashTable, 0x0);
886 SIS_W32(RxWolCtrl, 0x0);
887 SIS_W32(RxWolData, 0x0);
891 sis190_set_rx_mode(dev);
893 /* Enable all known interrupts by setting the interrupt mask. */
894 SIS_W32(IntrMask, sis190_intr_mask);
896 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
897 SIS_W32(RxControl, 0x1a1d);
899 netif_start_queue(dev);
902 static void sis190_phy_task(void * data)
904 struct net_device *dev = data;
905 struct sis190_private *tp = netdev_priv(dev);
906 void __iomem *ioaddr = tp->mmio_addr;
907 int phy_id = tp->mii_if.phy_id;
912 val = mdio_read(ioaddr, phy_id, MII_BMCR);
913 if (val & BMCR_RESET) {
914 // FIXME: needlessly high ? -- FR 02/07/2005
915 mod_timer(&tp->timer, jiffies + HZ/10);
916 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
917 BMSR_ANEGCOMPLETE)) {
918 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
920 netif_carrier_off(dev);
921 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
922 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
930 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
931 "1000 Mbps Full Duplex" },
932 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
933 "1000 Mbps Half Duplex" },
934 { LPA_100FULL, 0x04000800 | 0x00001000,
935 "100 Mbps Full Duplex" },
936 { LPA_100HALF, 0x04000800,
937 "100 Mbps Half Duplex" },
938 { LPA_10FULL, 0x04000400 | 0x00001000,
939 "10 Mbps Full Duplex" },
940 { LPA_10HALF, 0x04000400,
941 "10 Mbps Half Duplex" },
942 { 0, 0x04000400, "unknown" }
946 val = mdio_read(ioaddr, phy_id, 0x1f);
947 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
949 val = mdio_read(ioaddr, phy_id, MII_LPA);
950 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
951 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
952 dev->name, val, adv);
956 for (p = reg31; p->val; p++) {
957 if ((val & p->val) == p->val)
961 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
963 SIS_W32(StationControl, p->ctl);
965 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
967 netif_carrier_on(dev);
973 static void sis190_phy_timer(unsigned long __opaque)
975 struct net_device *dev = (struct net_device *)__opaque;
976 struct sis190_private *tp = netdev_priv(dev);
978 if (likely(netif_running(dev)))
979 schedule_work(&tp->phy_task);
982 static inline void sis190_delete_timer(struct net_device *dev)
984 struct sis190_private *tp = netdev_priv(dev);
986 del_timer_sync(&tp->timer);
989 static inline void sis190_request_timer(struct net_device *dev)
991 struct sis190_private *tp = netdev_priv(dev);
992 struct timer_list *timer = &tp->timer;
995 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
996 timer->data = (unsigned long)dev;
997 timer->function = sis190_phy_timer;
1001 static void sis190_set_rxbufsize(struct sis190_private *tp,
1002 struct net_device *dev)
1004 unsigned int mtu = dev->mtu;
1006 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1007 /* RxDesc->size has a licence to kill the lower bits */
1008 if (tp->rx_buf_sz & 0x07) {
1010 tp->rx_buf_sz &= RX_BUF_MASK;
1014 static int sis190_open(struct net_device *dev)
1016 struct sis190_private *tp = netdev_priv(dev);
1017 struct pci_dev *pdev = tp->pci_dev;
1020 sis190_set_rxbufsize(tp, dev);
1023 * Rx and Tx descriptors need 256 bytes alignment.
1024 * pci_alloc_consistent() guarantees a stronger alignment.
1026 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1027 if (!tp->TxDescRing)
1030 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1031 if (!tp->RxDescRing)
1034 rc = sis190_init_ring(dev);
1038 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1040 sis190_request_timer(dev);
1042 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1044 goto err_release_timer_2;
1046 sis190_hw_start(dev);
1050 err_release_timer_2:
1051 sis190_delete_timer(dev);
1052 sis190_rx_clear(tp);
1054 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1057 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1062 static void sis190_tx_clear(struct sis190_private *tp)
1066 for (i = 0; i < NUM_TX_DESC; i++) {
1067 struct sk_buff *skb = tp->Tx_skbuff[i];
1072 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1073 tp->Tx_skbuff[i] = NULL;
1076 tp->stats.tx_dropped++;
1078 tp->cur_tx = tp->dirty_tx = 0;
1081 static void sis190_down(struct net_device *dev)
1083 struct sis190_private *tp = netdev_priv(dev);
1084 void __iomem *ioaddr = tp->mmio_addr;
1085 unsigned int poll_locked = 0;
1087 sis190_delete_timer(dev);
1089 netif_stop_queue(dev);
1091 flush_scheduled_work();
1094 spin_lock_irq(&tp->lock);
1096 sis190_asic_down(ioaddr);
1098 spin_unlock_irq(&tp->lock);
1100 synchronize_irq(dev->irq);
1103 netif_poll_disable(dev);
1107 synchronize_sched();
1109 } while (SIS_R32(IntrMask));
1111 sis190_tx_clear(tp);
1112 sis190_rx_clear(tp);
1115 static int sis190_close(struct net_device *dev)
1117 struct sis190_private *tp = netdev_priv(dev);
1118 struct pci_dev *pdev = tp->pci_dev;
1122 free_irq(dev->irq, dev);
1124 netif_poll_enable(dev);
1126 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1127 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1129 tp->TxDescRing = NULL;
1130 tp->RxDescRing = NULL;
1135 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1137 struct sis190_private *tp = netdev_priv(dev);
1138 void __iomem *ioaddr = tp->mmio_addr;
1139 u32 len, entry, dirty_tx;
1140 struct TxDesc *desc;
1143 if (unlikely(skb->len < ETH_ZLEN)) {
1144 skb = skb_padto(skb, ETH_ZLEN);
1146 tp->stats.tx_dropped++;
1154 entry = tp->cur_tx % NUM_TX_DESC;
1155 desc = tp->TxDescRing + entry;
1157 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1158 netif_stop_queue(dev);
1159 net_tx_err(tp, KERN_ERR PFX
1160 "%s: BUG! Tx Ring full when queue awake!\n",
1162 return NETDEV_TX_BUSY;
1165 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1167 tp->Tx_skbuff[entry] = skb;
1169 desc->PSize = cpu_to_le32(len);
1170 desc->addr = cpu_to_le32(mapping);
1172 desc->size = cpu_to_le32(len);
1173 if (entry == (NUM_TX_DESC - 1))
1174 desc->size |= cpu_to_le32(RingEnd);
1178 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1184 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1186 dev->trans_start = jiffies;
1188 dirty_tx = tp->dirty_tx;
1189 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1190 netif_stop_queue(dev);
1192 if (dirty_tx != tp->dirty_tx)
1193 netif_wake_queue(dev);
1196 return NETDEV_TX_OK;
1199 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1201 struct sis190_private *tp = netdev_priv(dev);
1206 static void sis190_free_phy(struct list_head *first_phy)
1208 struct sis190_phy *cur, *next;
1210 list_for_each_entry_safe(cur, next, first_phy, list) {
1216 * sis190_default_phy - Select default PHY for sis190 mac.
1217 * @dev: the net device to probe for
1219 * Select first detected PHY with link as default.
1220 * If no one is link on, select PHY whose types is HOME as default.
1221 * If HOME doesn't exist, select LAN.
1223 static u16 sis190_default_phy(struct net_device *dev)
1225 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1226 struct sis190_private *tp = netdev_priv(dev);
1227 struct mii_if_info *mii_if = &tp->mii_if;
1228 void __iomem *ioaddr = tp->mmio_addr;
1231 phy_home = phy_default = phy_lan = NULL;
1233 list_for_each_entry(phy, &tp->first_phy, list) {
1234 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1236 // Link ON & Not select default PHY & not ghost PHY.
1237 if ((status & BMSR_LSTATUS) &&
1239 (phy->type != UNKNOWN)) {
1242 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1243 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1244 status | BMCR_ANENABLE | BMCR_ISOLATE);
1245 if (phy->type == HOME)
1247 else if (phy->type == LAN)
1254 phy_default = phy_home;
1256 phy_default = phy_lan;
1258 phy_default = list_entry(&tp->first_phy,
1259 struct sis190_phy, list);
1262 if (mii_if->phy_id != phy_default->phy_id) {
1263 mii_if->phy_id = phy_default->phy_id;
1264 net_probe(tp, KERN_INFO
1265 "%s: Using transceiver at address %d as default.\n",
1266 pci_name(tp->pci_dev), mii_if->phy_id);
1269 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1270 status &= (~BMCR_ISOLATE);
1272 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1273 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1278 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1279 struct sis190_phy *phy, unsigned int phy_id,
1282 void __iomem *ioaddr = tp->mmio_addr;
1283 struct mii_chip_info *p;
1285 INIT_LIST_HEAD(&phy->list);
1286 phy->status = mii_status;
1287 phy->phy_id = phy_id;
1289 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1290 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1292 for (p = mii_chip_table; p->type; p++) {
1293 if ((p->id[0] == phy->id[0]) &&
1294 (p->id[1] == (phy->id[1] & 0xfff0))) {
1300 phy->type = (p->type == MIX) ?
1301 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1302 LAN : HOME) : p->type;
1303 tp->features |= p->feature;
1305 phy->type = UNKNOWN;
1307 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1308 pci_name(tp->pci_dev),
1309 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1312 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1314 if (tp->features & F_PHY_88E1111) {
1315 void __iomem *ioaddr = tp->mmio_addr;
1316 int phy_id = tp->mii_if.phy_id;
1322 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1324 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1326 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1332 * sis190_mii_probe - Probe MII PHY for sis190
1333 * @dev: the net device to probe for
1335 * Search for total of 32 possible mii phy addresses.
1336 * Identify and set current phy if found one,
1337 * return error if it failed to found.
1339 static int __devinit sis190_mii_probe(struct net_device *dev)
1341 struct sis190_private *tp = netdev_priv(dev);
1342 struct mii_if_info *mii_if = &tp->mii_if;
1343 void __iomem *ioaddr = tp->mmio_addr;
1347 INIT_LIST_HEAD(&tp->first_phy);
1349 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1350 struct sis190_phy *phy;
1353 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1355 // Try next mii if the current one is not accessible.
1356 if (status == 0xffff || status == 0x0000)
1359 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1361 sis190_free_phy(&tp->first_phy);
1366 sis190_init_phy(dev, tp, phy, phy_id, status);
1368 list_add(&tp->first_phy, &phy->list);
1371 if (list_empty(&tp->first_phy)) {
1372 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1373 pci_name(tp->pci_dev));
1378 /* Select default PHY for mac */
1379 sis190_default_phy(dev);
1381 sis190_mii_probe_88e1111_fixup(tp);
1384 mii_if->mdio_read = __mdio_read;
1385 mii_if->mdio_write = __mdio_write;
1386 mii_if->phy_id_mask = PHY_ID_ANY;
1387 mii_if->reg_num_mask = MII_REG_ANY;
1392 static void __devexit sis190_mii_remove(struct net_device *dev)
1394 struct sis190_private *tp = netdev_priv(dev);
1396 sis190_free_phy(&tp->first_phy);
1399 static void sis190_release_board(struct pci_dev *pdev)
1401 struct net_device *dev = pci_get_drvdata(pdev);
1402 struct sis190_private *tp = netdev_priv(dev);
1404 iounmap(tp->mmio_addr);
1405 pci_release_regions(pdev);
1406 pci_disable_device(pdev);
1410 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1412 struct sis190_private *tp;
1413 struct net_device *dev;
1414 void __iomem *ioaddr;
1417 dev = alloc_etherdev(sizeof(*tp));
1419 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1424 SET_MODULE_OWNER(dev);
1425 SET_NETDEV_DEV(dev, &pdev->dev);
1427 tp = netdev_priv(dev);
1428 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1430 rc = pci_enable_device(pdev);
1432 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1433 goto err_free_dev_1;
1438 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1439 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1441 goto err_pci_disable_2;
1443 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1444 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1446 goto err_pci_disable_2;
1449 rc = pci_request_regions(pdev, DRV_NAME);
1451 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1453 goto err_pci_disable_2;
1456 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1458 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1460 goto err_free_res_3;
1463 pci_set_master(pdev);
1465 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1467 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1470 goto err_free_res_3;
1474 tp->mmio_addr = ioaddr;
1476 sis190_irq_mask_and_ack(ioaddr);
1478 sis190_soft_reset(ioaddr);
1483 pci_release_regions(pdev);
1485 pci_disable_device(pdev);
1493 static void sis190_tx_timeout(struct net_device *dev)
1495 struct sis190_private *tp = netdev_priv(dev);
1496 void __iomem *ioaddr = tp->mmio_addr;
1499 /* Disable Tx, if not already */
1500 tmp8 = SIS_R8(TxControl);
1501 if (tmp8 & CmdTxEnb)
1502 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1505 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1506 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1508 /* Disable interrupts by clearing the interrupt mask. */
1509 SIS_W32(IntrMask, 0x0000);
1511 /* Stop a shared interrupt from scavenging while we are. */
1512 spin_lock_irq(&tp->lock);
1513 sis190_tx_clear(tp);
1514 spin_unlock_irq(&tp->lock);
1516 /* ...and finally, reset everything. */
1517 sis190_hw_start(dev);
1519 netif_wake_queue(dev);
1522 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1524 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1527 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1528 struct net_device *dev)
1530 struct sis190_private *tp = netdev_priv(dev);
1531 void __iomem *ioaddr = tp->mmio_addr;
1535 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1538 /* Check to see if there is a sane EEPROM */
1539 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1541 if ((sig == 0xffff) || (sig == 0x0000)) {
1542 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1543 pci_name(pdev), sig);
1547 /* Get MAC address from EEPROM */
1548 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1549 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1551 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1554 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1560 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1562 * @dev: network device to get address for
1564 * SiS965 model, use APC CMOS RAM to store MAC address.
1565 * APC CMOS RAM is accessed through ISA bridge.
1566 * MAC address is read into @net_dev->dev_addr.
1568 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1569 struct net_device *dev)
1571 struct sis190_private *tp = netdev_priv(dev);
1572 struct pci_dev *isa_bridge;
1576 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1579 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1581 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1586 /* Enable port 78h & 79h to access APC Registers. */
1587 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1588 reg = (tmp8 & ~0x02);
1589 pci_write_config_byte(isa_bridge, 0x48, reg);
1591 pci_read_config_byte(isa_bridge, 0x48, ®);
1593 for (i = 0; i < MAC_ADDR_LEN; i++) {
1594 outb(0x9 + i, 0x78);
1595 dev->dev_addr[i] = inb(0x79);
1601 sis190_set_rgmii(tp, reg);
1603 /* Restore the value to ISA Bridge */
1604 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1605 pci_dev_put(isa_bridge);
1611 * sis190_init_rxfilter - Initialize the Rx filter
1612 * @dev: network device to initialize
1614 * Set receive filter address to our MAC address
1615 * and enable packet filtering.
1617 static inline void sis190_init_rxfilter(struct net_device *dev)
1619 struct sis190_private *tp = netdev_priv(dev);
1620 void __iomem *ioaddr = tp->mmio_addr;
1624 ctl = SIS_R16(RxMacControl);
1626 * Disable packet filtering before setting filter.
1627 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1628 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1630 SIS_W16(RxMacControl, ctl & ~0x0f00);
1632 for (i = 0; i < MAC_ADDR_LEN; i++)
1633 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1635 SIS_W16(RxMacControl, ctl);
1639 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1643 pci_read_config_byte(pdev, 0x73, &from);
1645 return (from & 0x00000001) ?
1646 sis190_get_mac_addr_from_apc(pdev, dev) :
1647 sis190_get_mac_addr_from_eeprom(pdev, dev);
1650 static void sis190_set_speed_auto(struct net_device *dev)
1652 struct sis190_private *tp = netdev_priv(dev);
1653 void __iomem *ioaddr = tp->mmio_addr;
1654 int phy_id = tp->mii_if.phy_id;
1657 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1659 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1661 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1663 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1664 ADVERTISE_100FULL | ADVERTISE_10FULL |
1665 ADVERTISE_100HALF | ADVERTISE_10HALF);
1667 // Enable 1000 Full Mode.
1668 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1670 // Enable auto-negotiation and restart auto-negotiation.
1671 mdio_write(ioaddr, phy_id, MII_BMCR,
1672 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1675 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1677 struct sis190_private *tp = netdev_priv(dev);
1679 return mii_ethtool_gset(&tp->mii_if, cmd);
1682 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1684 struct sis190_private *tp = netdev_priv(dev);
1686 return mii_ethtool_sset(&tp->mii_if, cmd);
1689 static void sis190_get_drvinfo(struct net_device *dev,
1690 struct ethtool_drvinfo *info)
1692 struct sis190_private *tp = netdev_priv(dev);
1694 strcpy(info->driver, DRV_NAME);
1695 strcpy(info->version, DRV_VERSION);
1696 strcpy(info->bus_info, pci_name(tp->pci_dev));
1699 static int sis190_get_regs_len(struct net_device *dev)
1701 return SIS190_REGS_SIZE;
1704 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1707 struct sis190_private *tp = netdev_priv(dev);
1708 unsigned long flags;
1710 if (regs->len > SIS190_REGS_SIZE)
1711 regs->len = SIS190_REGS_SIZE;
1713 spin_lock_irqsave(&tp->lock, flags);
1714 memcpy_fromio(p, tp->mmio_addr, regs->len);
1715 spin_unlock_irqrestore(&tp->lock, flags);
1718 static int sis190_nway_reset(struct net_device *dev)
1720 struct sis190_private *tp = netdev_priv(dev);
1722 return mii_nway_restart(&tp->mii_if);
1725 static u32 sis190_get_msglevel(struct net_device *dev)
1727 struct sis190_private *tp = netdev_priv(dev);
1729 return tp->msg_enable;
1732 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1734 struct sis190_private *tp = netdev_priv(dev);
1736 tp->msg_enable = value;
1739 static struct ethtool_ops sis190_ethtool_ops = {
1740 .get_settings = sis190_get_settings,
1741 .set_settings = sis190_set_settings,
1742 .get_drvinfo = sis190_get_drvinfo,
1743 .get_regs_len = sis190_get_regs_len,
1744 .get_regs = sis190_get_regs,
1745 .get_link = ethtool_op_get_link,
1746 .get_msglevel = sis190_get_msglevel,
1747 .set_msglevel = sis190_set_msglevel,
1748 .nway_reset = sis190_nway_reset,
1751 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1753 struct sis190_private *tp = netdev_priv(dev);
1755 return !netif_running(dev) ? -EINVAL :
1756 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1759 static int __devinit sis190_init_one(struct pci_dev *pdev,
1760 const struct pci_device_id *ent)
1762 static int printed_version = 0;
1763 struct sis190_private *tp;
1764 struct net_device *dev;
1765 void __iomem *ioaddr;
1768 if (!printed_version) {
1769 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1770 printed_version = 1;
1773 dev = sis190_init_board(pdev);
1779 tp = netdev_priv(dev);
1780 ioaddr = tp->mmio_addr;
1782 rc = sis190_get_mac_addr(pdev, dev);
1784 goto err_release_board;
1786 sis190_init_rxfilter(dev);
1788 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1790 dev->open = sis190_open;
1791 dev->stop = sis190_close;
1792 dev->do_ioctl = sis190_ioctl;
1793 dev->get_stats = sis190_get_stats;
1794 dev->tx_timeout = sis190_tx_timeout;
1795 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1796 dev->hard_start_xmit = sis190_start_xmit;
1797 #ifdef CONFIG_NET_POLL_CONTROLLER
1798 dev->poll_controller = sis190_netpoll;
1800 dev->set_multicast_list = sis190_set_rx_mode;
1801 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1802 dev->irq = pdev->irq;
1803 dev->base_addr = (unsigned long) 0xdead;
1805 spin_lock_init(&tp->lock);
1807 rc = sis190_mii_probe(dev);
1809 goto err_release_board;
1811 rc = register_netdev(dev);
1813 goto err_remove_mii;
1815 pci_set_drvdata(pdev, dev);
1817 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1818 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1819 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1821 dev->dev_addr[0], dev->dev_addr[1],
1822 dev->dev_addr[2], dev->dev_addr[3],
1823 dev->dev_addr[4], dev->dev_addr[5]);
1825 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1826 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1828 netif_carrier_off(dev);
1830 sis190_set_speed_auto(dev);
1835 sis190_mii_remove(dev);
1837 sis190_release_board(pdev);
1841 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1843 struct net_device *dev = pci_get_drvdata(pdev);
1845 sis190_mii_remove(dev);
1846 unregister_netdev(dev);
1847 sis190_release_board(pdev);
1848 pci_set_drvdata(pdev, NULL);
1851 static struct pci_driver sis190_pci_driver = {
1853 .id_table = sis190_pci_tbl,
1854 .probe = sis190_init_one,
1855 .remove = __devexit_p(sis190_remove_one),
1858 static int __init sis190_init_module(void)
1860 return pci_module_init(&sis190_pci_driver);
1863 static void __exit sis190_cleanup_module(void)
1865 pci_unregister_driver(&sis190_pci_driver);
1868 module_init(sis190_init_module);
1869 module_exit(sis190_cleanup_module);