2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
110 StationControl = 0x40,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
121 // Undocumented = 0x6c,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
147 CmdRxEnb = 0x08, // unused
149 RxBufEmpty = 0x01, // unused
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
175 TBILinkOK = 0x02000000, // unused
192 enum _DescStatusBit {
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
199 RingEnd = 0x80000000,
201 LSEN = 0x08000000, // TSO ? -- FR
228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
237 RxSizeMask = 0x0000ffff
239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
240 * provide two (unused with Linux) Tx queues. No publically
241 * available documentation alas.
245 enum sis190_eeprom_access_register_bits {
246 EECS = 0x00000001, // unused
247 EECLK = 0x00000002, // unused
248 EEDO = 0x00000008, // unused
249 EEDI = 0x00000004, // unused
252 EEWOP = 0x00000100 // unused
255 /* EEPROM Addresses */
256 enum sis190_eeprom_address {
257 EEPROMSignature = 0x00,
258 EEPROMCLK = 0x01, // unused
263 enum sis190_feature {
269 struct sis190_private {
270 void __iomem *mmio_addr;
271 struct pci_dev *pci_dev;
272 struct net_device *dev;
281 struct RxDesc *RxDescRing;
282 struct TxDesc *TxDescRing;
283 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
284 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
285 struct work_struct phy_task;
286 struct timer_list timer;
288 struct mii_if_info mii_if;
289 struct list_head first_phy;
294 struct list_head list;
301 enum sis190_phy_type {
308 static struct mii_chip_info {
313 } mii_chip_table[] = {
314 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
315 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
316 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
317 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
318 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
322 static const struct {
324 } sis_chip_info[] = {
325 { "SiS 190 PCI Fast Ethernet adapter" },
326 { "SiS 191 PCI Gigabit Ethernet adapter" },
329 static struct pci_device_id sis190_pci_tbl[] = {
330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
335 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
337 static int rx_copybreak = 200;
343 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
344 module_param(rx_copybreak, int, 0);
345 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
346 module_param_named(debug, debug.msg_enable, int, 0);
347 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
348 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
349 MODULE_VERSION(DRV_VERSION);
350 MODULE_LICENSE("GPL");
352 static const u32 sis190_intr_mask =
353 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
356 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
357 * The chips use a 64 element hash table based on the Ethernet CRC.
359 static const int multicast_filter_limit = 32;
361 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
365 SIS_W32(GMIIControl, ctl);
369 for (i = 0; i < 100; i++) {
370 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
376 printk(KERN_ERR PFX "PHY command failed !\n");
379 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
381 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
382 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
383 (((u32) val) << EhnMIIdataShift));
386 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
391 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
394 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
396 struct sis190_private *tp = netdev_priv(dev);
398 mdio_write(tp->mmio_addr, phy_id, reg, val);
401 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
403 struct sis190_private *tp = netdev_priv(dev);
405 return mdio_read(tp->mmio_addr, phy_id, reg);
408 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
410 mdio_read(ioaddr, phy_id, reg);
411 return mdio_read(ioaddr, phy_id, reg);
414 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 if (!(SIS_R32(ROMControl) & 0x0002))
422 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
424 for (i = 0; i < 200; i++) {
425 if (!(SIS_R32(ROMInterface) & EEREQ)) {
426 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
435 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
437 SIS_W32(IntrMask, 0x00);
438 SIS_W32(IntrStatus, 0xffffffff);
442 static void sis190_asic_down(void __iomem *ioaddr)
444 /* Stop the chip's Tx and Rx DMA processes. */
446 SIS_W32(TxControl, 0x1a00);
447 SIS_W32(RxControl, 0x1a00);
449 sis190_irq_mask_and_ack(ioaddr);
452 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
454 desc->size |= cpu_to_le32(RingEnd);
457 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
459 u32 eor = le32_to_cpu(desc->size) & RingEnd;
462 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
464 desc->status = cpu_to_le32(OWNbit | INTbit);
467 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
470 desc->addr = cpu_to_le32(mapping);
471 sis190_give_to_asic(desc, rx_buf_sz);
474 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
477 desc->addr = cpu_to_le32(0xdeadbeef);
478 desc->size &= cpu_to_le32(RingEnd);
483 static struct sk_buff *sis190_alloc_rx_skb(struct pci_dev *pdev,
484 struct RxDesc *desc, u32 rx_buf_sz)
488 skb = dev_alloc_skb(rx_buf_sz);
492 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
494 sis190_map_to_asic(desc, mapping, rx_buf_sz);
496 sis190_make_unusable_by_asic(desc);
501 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
506 for (cur = start; cur < end; cur++) {
507 unsigned int i = cur % NUM_RX_DESC;
509 if (tp->Rx_skbuff[i])
512 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp->pci_dev,
515 if (!tp->Rx_skbuff[i])
521 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
522 struct RxDesc *desc, int rx_buf_sz)
526 if (pkt_size < rx_copybreak) {
529 skb = dev_alloc_skb(pkt_size + 2);
532 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
534 sis190_give_to_asic(desc, rx_buf_sz);
541 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
543 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
545 if ((status & CRCOK) && !(status & ErrMask))
548 if (!(status & CRCOK))
549 stats->rx_crc_errors++;
550 else if (status & OVRUN)
551 stats->rx_over_errors++;
552 else if (status & (SHORT | LIMIT))
553 stats->rx_length_errors++;
554 else if (status & (MIIER | NIBON | COLON))
555 stats->rx_frame_errors++;
561 static int sis190_rx_interrupt(struct net_device *dev,
562 struct sis190_private *tp, void __iomem *ioaddr)
564 struct net_device_stats *stats = &dev->stats;
565 u32 rx_left, cur_rx = tp->cur_rx;
568 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
569 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
571 for (; rx_left > 0; rx_left--, cur_rx++) {
572 unsigned int entry = cur_rx % NUM_RX_DESC;
573 struct RxDesc *desc = tp->RxDescRing + entry;
576 if (le32_to_cpu(desc->status) & OWNbit)
579 status = le32_to_cpu(desc->PSize);
581 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
584 if (sis190_rx_pkt_err(status, stats) < 0)
585 sis190_give_to_asic(desc, tp->rx_buf_sz);
587 struct sk_buff *skb = tp->Rx_skbuff[entry];
588 int pkt_size = (status & RxSizeMask) - 4;
589 void (*pci_action)(struct pci_dev *, dma_addr_t,
590 size_t, int) = pci_dma_sync_single_for_device;
592 if (unlikely(pkt_size > tp->rx_buf_sz)) {
593 net_intr(tp, KERN_INFO
594 "%s: (frag) status = %08x.\n",
597 stats->rx_length_errors++;
598 sis190_give_to_asic(desc, tp->rx_buf_sz);
602 pci_dma_sync_single_for_cpu(tp->pci_dev,
603 le32_to_cpu(desc->addr), tp->rx_buf_sz,
606 if (sis190_try_rx_copy(&skb, pkt_size, desc,
608 pci_action = pci_unmap_single;
609 tp->Rx_skbuff[entry] = NULL;
610 sis190_make_unusable_by_asic(desc);
613 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
614 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
616 skb_put(skb, pkt_size);
617 skb->protocol = eth_type_trans(skb, dev);
621 dev->last_rx = jiffies;
623 stats->rx_bytes += pkt_size;
624 if ((status & BCAST) == MCAST)
628 count = cur_rx - tp->cur_rx;
631 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
632 if (!delta && count && netif_msg_intr(tp))
633 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
634 tp->dirty_rx += delta;
636 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
637 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
642 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
647 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
649 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
651 memset(desc, 0x00, sizeof(*desc));
654 static void sis190_tx_interrupt(struct net_device *dev,
655 struct sis190_private *tp, void __iomem *ioaddr)
657 u32 pending, dirty_tx = tp->dirty_tx;
659 * It would not be needed if queueing was allowed to be enabled
660 * again too early (hint: think preempt and unclocked smp systems).
662 unsigned int queue_stopped;
665 pending = tp->cur_tx - dirty_tx;
666 queue_stopped = (pending == NUM_TX_DESC);
668 for (; pending; pending--, dirty_tx++) {
669 unsigned int entry = dirty_tx % NUM_TX_DESC;
670 struct TxDesc *txd = tp->TxDescRing + entry;
673 if (le32_to_cpu(txd->status) & OWNbit)
676 skb = tp->Tx_skbuff[entry];
678 dev->stats.tx_packets++;
679 dev->stats.tx_bytes += skb->len;
681 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
682 tp->Tx_skbuff[entry] = NULL;
683 dev_kfree_skb_irq(skb);
686 if (tp->dirty_tx != dirty_tx) {
687 tp->dirty_tx = dirty_tx;
690 netif_wake_queue(dev);
695 * The interrupt handler does all of the Rx thread work and cleans up after
698 static irqreturn_t sis190_interrupt(int irq, void *__dev)
700 struct net_device *dev = __dev;
701 struct sis190_private *tp = netdev_priv(dev);
702 void __iomem *ioaddr = tp->mmio_addr;
703 unsigned int handled = 0;
706 status = SIS_R32(IntrStatus);
708 if ((status == 0xffffffff) || !status)
713 if (unlikely(!netif_running(dev))) {
714 sis190_asic_down(ioaddr);
718 SIS_W32(IntrStatus, status);
720 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
722 if (status & LinkChange) {
723 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
724 schedule_work(&tp->phy_task);
728 sis190_rx_interrupt(dev, tp, ioaddr);
730 if (status & TxQ0Int)
731 sis190_tx_interrupt(dev, tp, ioaddr);
733 return IRQ_RETVAL(handled);
736 #ifdef CONFIG_NET_POLL_CONTROLLER
737 static void sis190_netpoll(struct net_device *dev)
739 struct sis190_private *tp = netdev_priv(dev);
740 struct pci_dev *pdev = tp->pci_dev;
742 disable_irq(pdev->irq);
743 sis190_interrupt(pdev->irq, dev);
744 enable_irq(pdev->irq);
748 static void sis190_free_rx_skb(struct sis190_private *tp,
749 struct sk_buff **sk_buff, struct RxDesc *desc)
751 struct pci_dev *pdev = tp->pci_dev;
753 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
755 dev_kfree_skb(*sk_buff);
757 sis190_make_unusable_by_asic(desc);
760 static void sis190_rx_clear(struct sis190_private *tp)
764 for (i = 0; i < NUM_RX_DESC; i++) {
765 if (!tp->Rx_skbuff[i])
767 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
771 static void sis190_init_ring_indexes(struct sis190_private *tp)
773 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
776 static int sis190_init_ring(struct net_device *dev)
778 struct sis190_private *tp = netdev_priv(dev);
780 sis190_init_ring_indexes(tp);
782 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
783 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
785 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
788 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
797 static void sis190_set_rx_mode(struct net_device *dev)
799 struct sis190_private *tp = netdev_priv(dev);
800 void __iomem *ioaddr = tp->mmio_addr;
802 u32 mc_filter[2]; /* Multicast hash filter */
805 if (dev->flags & IFF_PROMISC) {
807 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
809 mc_filter[1] = mc_filter[0] = 0xffffffff;
810 } else if ((dev->mc_count > multicast_filter_limit) ||
811 (dev->flags & IFF_ALLMULTI)) {
812 /* Too many to filter perfectly -- accept all multicasts. */
813 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
814 mc_filter[1] = mc_filter[0] = 0xffffffff;
816 struct dev_mc_list *mclist;
819 rx_mode = AcceptBroadcast | AcceptMyPhys;
820 mc_filter[1] = mc_filter[0] = 0;
821 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
822 i++, mclist = mclist->next) {
824 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
825 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
826 rx_mode |= AcceptMulticast;
830 spin_lock_irqsave(&tp->lock, flags);
832 SIS_W16(RxMacControl, rx_mode | 0x2);
833 SIS_W32(RxHashTable, mc_filter[0]);
834 SIS_W32(RxHashTable + 4, mc_filter[1]);
836 spin_unlock_irqrestore(&tp->lock, flags);
839 static void sis190_soft_reset(void __iomem *ioaddr)
841 SIS_W32(IntrControl, 0x8000);
843 SIS_W32(IntrControl, 0x0);
844 sis190_asic_down(ioaddr);
847 static void sis190_hw_start(struct net_device *dev)
849 struct sis190_private *tp = netdev_priv(dev);
850 void __iomem *ioaddr = tp->mmio_addr;
852 sis190_soft_reset(ioaddr);
854 SIS_W32(TxDescStartAddr, tp->tx_dma);
855 SIS_W32(RxDescStartAddr, tp->rx_dma);
857 SIS_W32(IntrStatus, 0xffffffff);
858 SIS_W32(IntrMask, 0x0);
859 SIS_W32(GMIIControl, 0x0);
860 SIS_W32(TxMacControl, 0x60);
861 SIS_W16(RxMacControl, 0x02);
862 SIS_W32(RxHashTable, 0x0);
864 SIS_W32(RxWolCtrl, 0x0);
865 SIS_W32(RxWolData, 0x0);
869 sis190_set_rx_mode(dev);
871 /* Enable all known interrupts by setting the interrupt mask. */
872 SIS_W32(IntrMask, sis190_intr_mask);
874 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
875 SIS_W32(RxControl, 0x1a1d);
877 netif_start_queue(dev);
880 static void sis190_phy_task(struct work_struct *work)
882 struct sis190_private *tp =
883 container_of(work, struct sis190_private, phy_task);
884 struct net_device *dev = tp->dev;
885 void __iomem *ioaddr = tp->mmio_addr;
886 int phy_id = tp->mii_if.phy_id;
891 if (!netif_running(dev))
894 val = mdio_read(ioaddr, phy_id, MII_BMCR);
895 if (val & BMCR_RESET) {
896 // FIXME: needlessly high ? -- FR 02/07/2005
897 mod_timer(&tp->timer, jiffies + HZ/10);
898 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
899 BMSR_ANEGCOMPLETE)) {
900 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
902 netif_carrier_off(dev);
903 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
904 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
912 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
913 "1000 Mbps Full Duplex" },
914 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
915 "1000 Mbps Half Duplex" },
916 { LPA_100FULL, 0x04000800 | 0x00001000,
917 "100 Mbps Full Duplex" },
918 { LPA_100HALF, 0x04000800,
919 "100 Mbps Half Duplex" },
920 { LPA_10FULL, 0x04000400 | 0x00001000,
921 "10 Mbps Full Duplex" },
922 { LPA_10HALF, 0x04000400,
923 "10 Mbps Half Duplex" },
924 { 0, 0x04000400, "unknown" }
928 val = mdio_read(ioaddr, phy_id, 0x1f);
929 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
931 val = mdio_read(ioaddr, phy_id, MII_LPA);
932 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
933 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
934 dev->name, val, adv);
938 for (p = reg31; p->val; p++) {
939 if ((val & p->val) == p->val)
943 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
945 if ((tp->features & F_HAS_RGMII) &&
946 (tp->features & F_PHY_BCM5461)) {
947 // Set Tx Delay in RGMII mode.
948 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
950 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
951 p->ctl |= 0x03000000;
954 SIS_W32(StationControl, p->ctl);
956 if (tp->features & F_HAS_RGMII) {
957 SIS_W32(RGDelay, 0x0441);
958 SIS_W32(RGDelay, 0x0440);
961 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
963 netif_carrier_on(dev);
970 static void sis190_phy_timer(unsigned long __opaque)
972 struct net_device *dev = (struct net_device *)__opaque;
973 struct sis190_private *tp = netdev_priv(dev);
975 if (likely(netif_running(dev)))
976 schedule_work(&tp->phy_task);
979 static inline void sis190_delete_timer(struct net_device *dev)
981 struct sis190_private *tp = netdev_priv(dev);
983 del_timer_sync(&tp->timer);
986 static inline void sis190_request_timer(struct net_device *dev)
988 struct sis190_private *tp = netdev_priv(dev);
989 struct timer_list *timer = &tp->timer;
992 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
993 timer->data = (unsigned long)dev;
994 timer->function = sis190_phy_timer;
998 static void sis190_set_rxbufsize(struct sis190_private *tp,
999 struct net_device *dev)
1001 unsigned int mtu = dev->mtu;
1003 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1004 /* RxDesc->size has a licence to kill the lower bits */
1005 if (tp->rx_buf_sz & 0x07) {
1007 tp->rx_buf_sz &= RX_BUF_MASK;
1011 static int sis190_open(struct net_device *dev)
1013 struct sis190_private *tp = netdev_priv(dev);
1014 struct pci_dev *pdev = tp->pci_dev;
1017 sis190_set_rxbufsize(tp, dev);
1020 * Rx and Tx descriptors need 256 bytes alignment.
1021 * pci_alloc_consistent() guarantees a stronger alignment.
1023 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1024 if (!tp->TxDescRing)
1027 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1028 if (!tp->RxDescRing)
1031 rc = sis190_init_ring(dev);
1035 sis190_request_timer(dev);
1037 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1039 goto err_release_timer_2;
1041 sis190_hw_start(dev);
1045 err_release_timer_2:
1046 sis190_delete_timer(dev);
1047 sis190_rx_clear(tp);
1049 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1052 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1057 static void sis190_tx_clear(struct sis190_private *tp)
1061 for (i = 0; i < NUM_TX_DESC; i++) {
1062 struct sk_buff *skb = tp->Tx_skbuff[i];
1067 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1068 tp->Tx_skbuff[i] = NULL;
1071 tp->dev->stats.tx_dropped++;
1073 tp->cur_tx = tp->dirty_tx = 0;
1076 static void sis190_down(struct net_device *dev)
1078 struct sis190_private *tp = netdev_priv(dev);
1079 void __iomem *ioaddr = tp->mmio_addr;
1080 unsigned int poll_locked = 0;
1082 sis190_delete_timer(dev);
1084 netif_stop_queue(dev);
1087 spin_lock_irq(&tp->lock);
1089 sis190_asic_down(ioaddr);
1091 spin_unlock_irq(&tp->lock);
1093 synchronize_irq(dev->irq);
1098 synchronize_sched();
1100 } while (SIS_R32(IntrMask));
1102 sis190_tx_clear(tp);
1103 sis190_rx_clear(tp);
1106 static int sis190_close(struct net_device *dev)
1108 struct sis190_private *tp = netdev_priv(dev);
1109 struct pci_dev *pdev = tp->pci_dev;
1113 free_irq(dev->irq, dev);
1115 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1116 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1118 tp->TxDescRing = NULL;
1119 tp->RxDescRing = NULL;
1124 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1126 struct sis190_private *tp = netdev_priv(dev);
1127 void __iomem *ioaddr = tp->mmio_addr;
1128 u32 len, entry, dirty_tx;
1129 struct TxDesc *desc;
1132 if (unlikely(skb->len < ETH_ZLEN)) {
1133 if (skb_padto(skb, ETH_ZLEN)) {
1134 dev->stats.tx_dropped++;
1142 entry = tp->cur_tx % NUM_TX_DESC;
1143 desc = tp->TxDescRing + entry;
1145 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1146 netif_stop_queue(dev);
1147 net_tx_err(tp, KERN_ERR PFX
1148 "%s: BUG! Tx Ring full when queue awake!\n",
1150 return NETDEV_TX_BUSY;
1153 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1155 tp->Tx_skbuff[entry] = skb;
1157 desc->PSize = cpu_to_le32(len);
1158 desc->addr = cpu_to_le32(mapping);
1160 desc->size = cpu_to_le32(len);
1161 if (entry == (NUM_TX_DESC - 1))
1162 desc->size |= cpu_to_le32(RingEnd);
1166 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1172 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1174 dev->trans_start = jiffies;
1176 dirty_tx = tp->dirty_tx;
1177 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1178 netif_stop_queue(dev);
1180 if (dirty_tx != tp->dirty_tx)
1181 netif_wake_queue(dev);
1184 return NETDEV_TX_OK;
1187 static void sis190_free_phy(struct list_head *first_phy)
1189 struct sis190_phy *cur, *next;
1191 list_for_each_entry_safe(cur, next, first_phy, list) {
1197 * sis190_default_phy - Select default PHY for sis190 mac.
1198 * @dev: the net device to probe for
1200 * Select first detected PHY with link as default.
1201 * If no one is link on, select PHY whose types is HOME as default.
1202 * If HOME doesn't exist, select LAN.
1204 static u16 sis190_default_phy(struct net_device *dev)
1206 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1207 struct sis190_private *tp = netdev_priv(dev);
1208 struct mii_if_info *mii_if = &tp->mii_if;
1209 void __iomem *ioaddr = tp->mmio_addr;
1212 phy_home = phy_default = phy_lan = NULL;
1214 list_for_each_entry(phy, &tp->first_phy, list) {
1215 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1217 // Link ON & Not select default PHY & not ghost PHY.
1218 if ((status & BMSR_LSTATUS) &&
1220 (phy->type != UNKNOWN)) {
1223 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1224 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1225 status | BMCR_ANENABLE | BMCR_ISOLATE);
1226 if (phy->type == HOME)
1228 else if (phy->type == LAN)
1235 phy_default = phy_home;
1237 phy_default = phy_lan;
1239 phy_default = list_entry(&tp->first_phy,
1240 struct sis190_phy, list);
1243 if (mii_if->phy_id != phy_default->phy_id) {
1244 mii_if->phy_id = phy_default->phy_id;
1245 net_probe(tp, KERN_INFO
1246 "%s: Using transceiver at address %d as default.\n",
1247 pci_name(tp->pci_dev), mii_if->phy_id);
1250 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1251 status &= (~BMCR_ISOLATE);
1253 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1254 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1259 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1260 struct sis190_phy *phy, unsigned int phy_id,
1263 void __iomem *ioaddr = tp->mmio_addr;
1264 struct mii_chip_info *p;
1266 INIT_LIST_HEAD(&phy->list);
1267 phy->status = mii_status;
1268 phy->phy_id = phy_id;
1270 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1271 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1273 for (p = mii_chip_table; p->type; p++) {
1274 if ((p->id[0] == phy->id[0]) &&
1275 (p->id[1] == (phy->id[1] & 0xfff0))) {
1281 phy->type = (p->type == MIX) ?
1282 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1283 LAN : HOME) : p->type;
1284 tp->features |= p->feature;
1286 phy->type = UNKNOWN;
1288 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1289 pci_name(tp->pci_dev),
1290 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1293 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1295 if (tp->features & F_PHY_88E1111) {
1296 void __iomem *ioaddr = tp->mmio_addr;
1297 int phy_id = tp->mii_if.phy_id;
1303 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1305 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1307 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1313 * sis190_mii_probe - Probe MII PHY for sis190
1314 * @dev: the net device to probe for
1316 * Search for total of 32 possible mii phy addresses.
1317 * Identify and set current phy if found one,
1318 * return error if it failed to found.
1320 static int __devinit sis190_mii_probe(struct net_device *dev)
1322 struct sis190_private *tp = netdev_priv(dev);
1323 struct mii_if_info *mii_if = &tp->mii_if;
1324 void __iomem *ioaddr = tp->mmio_addr;
1328 INIT_LIST_HEAD(&tp->first_phy);
1330 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1331 struct sis190_phy *phy;
1334 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1336 // Try next mii if the current one is not accessible.
1337 if (status == 0xffff || status == 0x0000)
1340 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1342 sis190_free_phy(&tp->first_phy);
1347 sis190_init_phy(dev, tp, phy, phy_id, status);
1349 list_add(&tp->first_phy, &phy->list);
1352 if (list_empty(&tp->first_phy)) {
1353 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1354 pci_name(tp->pci_dev));
1359 /* Select default PHY for mac */
1360 sis190_default_phy(dev);
1362 sis190_mii_probe_88e1111_fixup(tp);
1365 mii_if->mdio_read = __mdio_read;
1366 mii_if->mdio_write = __mdio_write;
1367 mii_if->phy_id_mask = PHY_ID_ANY;
1368 mii_if->reg_num_mask = MII_REG_ANY;
1373 static void sis190_mii_remove(struct net_device *dev)
1375 struct sis190_private *tp = netdev_priv(dev);
1377 sis190_free_phy(&tp->first_phy);
1380 static void sis190_release_board(struct pci_dev *pdev)
1382 struct net_device *dev = pci_get_drvdata(pdev);
1383 struct sis190_private *tp = netdev_priv(dev);
1385 iounmap(tp->mmio_addr);
1386 pci_release_regions(pdev);
1387 pci_disable_device(pdev);
1391 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1393 struct sis190_private *tp;
1394 struct net_device *dev;
1395 void __iomem *ioaddr;
1398 dev = alloc_etherdev(sizeof(*tp));
1400 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1405 SET_NETDEV_DEV(dev, &pdev->dev);
1407 tp = netdev_priv(dev);
1409 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1411 rc = pci_enable_device(pdev);
1413 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1414 goto err_free_dev_1;
1419 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1420 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1422 goto err_pci_disable_2;
1424 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1425 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1427 goto err_pci_disable_2;
1430 rc = pci_request_regions(pdev, DRV_NAME);
1432 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1434 goto err_pci_disable_2;
1437 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1439 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1441 goto err_free_res_3;
1444 pci_set_master(pdev);
1446 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1448 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1451 goto err_free_res_3;
1455 tp->mmio_addr = ioaddr;
1457 sis190_irq_mask_and_ack(ioaddr);
1459 sis190_soft_reset(ioaddr);
1464 pci_release_regions(pdev);
1466 pci_disable_device(pdev);
1474 static void sis190_tx_timeout(struct net_device *dev)
1476 struct sis190_private *tp = netdev_priv(dev);
1477 void __iomem *ioaddr = tp->mmio_addr;
1480 /* Disable Tx, if not already */
1481 tmp8 = SIS_R8(TxControl);
1482 if (tmp8 & CmdTxEnb)
1483 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1486 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1487 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1489 /* Disable interrupts by clearing the interrupt mask. */
1490 SIS_W32(IntrMask, 0x0000);
1492 /* Stop a shared interrupt from scavenging while we are. */
1493 spin_lock_irq(&tp->lock);
1494 sis190_tx_clear(tp);
1495 spin_unlock_irq(&tp->lock);
1497 /* ...and finally, reset everything. */
1498 sis190_hw_start(dev);
1500 netif_wake_queue(dev);
1503 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1505 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1508 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1509 struct net_device *dev)
1511 struct sis190_private *tp = netdev_priv(dev);
1512 void __iomem *ioaddr = tp->mmio_addr;
1516 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1519 /* Check to see if there is a sane EEPROM */
1520 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1522 if ((sig == 0xffff) || (sig == 0x0000)) {
1523 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1524 pci_name(pdev), sig);
1528 /* Get MAC address from EEPROM */
1529 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1530 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1532 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1535 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1541 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1543 * @dev: network device to get address for
1545 * SiS96x model, use APC CMOS RAM to store MAC address.
1546 * APC CMOS RAM is accessed through ISA bridge.
1547 * MAC address is read into @net_dev->dev_addr.
1549 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1550 struct net_device *dev)
1552 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1553 struct sis190_private *tp = netdev_priv(dev);
1554 struct pci_dev *isa_bridge;
1558 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1561 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1562 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1568 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1573 /* Enable port 78h & 79h to access APC Registers. */
1574 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1575 reg = (tmp8 & ~0x02);
1576 pci_write_config_byte(isa_bridge, 0x48, reg);
1578 pci_read_config_byte(isa_bridge, 0x48, ®);
1580 for (i = 0; i < MAC_ADDR_LEN; i++) {
1581 outb(0x9 + i, 0x78);
1582 dev->dev_addr[i] = inb(0x79);
1588 sis190_set_rgmii(tp, reg);
1590 /* Restore the value to ISA Bridge */
1591 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1592 pci_dev_put(isa_bridge);
1598 * sis190_init_rxfilter - Initialize the Rx filter
1599 * @dev: network device to initialize
1601 * Set receive filter address to our MAC address
1602 * and enable packet filtering.
1604 static inline void sis190_init_rxfilter(struct net_device *dev)
1606 struct sis190_private *tp = netdev_priv(dev);
1607 void __iomem *ioaddr = tp->mmio_addr;
1611 ctl = SIS_R16(RxMacControl);
1613 * Disable packet filtering before setting filter.
1614 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1615 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1617 SIS_W16(RxMacControl, ctl & ~0x0f00);
1619 for (i = 0; i < MAC_ADDR_LEN; i++)
1620 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1622 SIS_W16(RxMacControl, ctl);
1626 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1627 struct net_device *dev)
1631 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1635 pci_read_config_byte(pdev, 0x73, ®);
1637 if (reg & 0x00000001)
1638 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1643 static void sis190_set_speed_auto(struct net_device *dev)
1645 struct sis190_private *tp = netdev_priv(dev);
1646 void __iomem *ioaddr = tp->mmio_addr;
1647 int phy_id = tp->mii_if.phy_id;
1650 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1652 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1654 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1656 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1657 ADVERTISE_100FULL | ADVERTISE_10FULL |
1658 ADVERTISE_100HALF | ADVERTISE_10HALF);
1660 // Enable 1000 Full Mode.
1661 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1663 // Enable auto-negotiation and restart auto-negotiation.
1664 mdio_write(ioaddr, phy_id, MII_BMCR,
1665 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1668 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1670 struct sis190_private *tp = netdev_priv(dev);
1672 return mii_ethtool_gset(&tp->mii_if, cmd);
1675 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1677 struct sis190_private *tp = netdev_priv(dev);
1679 return mii_ethtool_sset(&tp->mii_if, cmd);
1682 static void sis190_get_drvinfo(struct net_device *dev,
1683 struct ethtool_drvinfo *info)
1685 struct sis190_private *tp = netdev_priv(dev);
1687 strcpy(info->driver, DRV_NAME);
1688 strcpy(info->version, DRV_VERSION);
1689 strcpy(info->bus_info, pci_name(tp->pci_dev));
1692 static int sis190_get_regs_len(struct net_device *dev)
1694 return SIS190_REGS_SIZE;
1697 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1700 struct sis190_private *tp = netdev_priv(dev);
1701 unsigned long flags;
1703 if (regs->len > SIS190_REGS_SIZE)
1704 regs->len = SIS190_REGS_SIZE;
1706 spin_lock_irqsave(&tp->lock, flags);
1707 memcpy_fromio(p, tp->mmio_addr, regs->len);
1708 spin_unlock_irqrestore(&tp->lock, flags);
1711 static int sis190_nway_reset(struct net_device *dev)
1713 struct sis190_private *tp = netdev_priv(dev);
1715 return mii_nway_restart(&tp->mii_if);
1718 static u32 sis190_get_msglevel(struct net_device *dev)
1720 struct sis190_private *tp = netdev_priv(dev);
1722 return tp->msg_enable;
1725 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1727 struct sis190_private *tp = netdev_priv(dev);
1729 tp->msg_enable = value;
1732 static const struct ethtool_ops sis190_ethtool_ops = {
1733 .get_settings = sis190_get_settings,
1734 .set_settings = sis190_set_settings,
1735 .get_drvinfo = sis190_get_drvinfo,
1736 .get_regs_len = sis190_get_regs_len,
1737 .get_regs = sis190_get_regs,
1738 .get_link = ethtool_op_get_link,
1739 .get_msglevel = sis190_get_msglevel,
1740 .set_msglevel = sis190_set_msglevel,
1741 .nway_reset = sis190_nway_reset,
1744 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1746 struct sis190_private *tp = netdev_priv(dev);
1748 return !netif_running(dev) ? -EINVAL :
1749 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1752 static int __devinit sis190_init_one(struct pci_dev *pdev,
1753 const struct pci_device_id *ent)
1755 static int printed_version = 0;
1756 struct sis190_private *tp;
1757 struct net_device *dev;
1758 void __iomem *ioaddr;
1760 DECLARE_MAC_BUF(mac);
1762 if (!printed_version) {
1763 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1764 printed_version = 1;
1767 dev = sis190_init_board(pdev);
1773 pci_set_drvdata(pdev, dev);
1775 tp = netdev_priv(dev);
1776 ioaddr = tp->mmio_addr;
1778 rc = sis190_get_mac_addr(pdev, dev);
1780 goto err_release_board;
1782 sis190_init_rxfilter(dev);
1784 INIT_WORK(&tp->phy_task, sis190_phy_task);
1786 dev->open = sis190_open;
1787 dev->stop = sis190_close;
1788 dev->do_ioctl = sis190_ioctl;
1789 dev->tx_timeout = sis190_tx_timeout;
1790 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1791 dev->hard_start_xmit = sis190_start_xmit;
1792 #ifdef CONFIG_NET_POLL_CONTROLLER
1793 dev->poll_controller = sis190_netpoll;
1795 dev->set_multicast_list = sis190_set_rx_mode;
1796 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1797 dev->irq = pdev->irq;
1798 dev->base_addr = (unsigned long) 0xdead;
1800 spin_lock_init(&tp->lock);
1802 rc = sis190_mii_probe(dev);
1804 goto err_release_board;
1806 rc = register_netdev(dev);
1808 goto err_remove_mii;
1810 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1812 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1813 ioaddr, dev->irq, print_mac(mac, dev->dev_addr));
1815 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1816 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1818 netif_carrier_off(dev);
1820 sis190_set_speed_auto(dev);
1825 sis190_mii_remove(dev);
1827 sis190_release_board(pdev);
1831 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1833 struct net_device *dev = pci_get_drvdata(pdev);
1835 sis190_mii_remove(dev);
1836 flush_scheduled_work();
1837 unregister_netdev(dev);
1838 sis190_release_board(pdev);
1839 pci_set_drvdata(pdev, NULL);
1842 static struct pci_driver sis190_pci_driver = {
1844 .id_table = sis190_pci_tbl,
1845 .probe = sis190_init_one,
1846 .remove = __devexit_p(sis190_remove_one),
1849 static int __init sis190_init_module(void)
1851 return pci_register_driver(&sis190_pci_driver);
1854 static void __exit sis190_cleanup_module(void)
1856 pci_unregister_driver(&sis190_pci_driver);
1859 module_init(sis190_init_module);
1860 module_exit(sis190_cleanup_module);