]> err.no Git - linux-2.6/commitdiff
Merge refs/heads/upstream from master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
authorLinus Torvalds <torvalds@g5.osdl.org>
Tue, 30 Aug 2005 18:16:30 +0000 (11:16 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 30 Aug 2005 18:16:30 +0000 (11:16 -0700)
30 files changed:
Documentation/networking/cxgb.txt [new file with mode: 0644]
MAINTAINERS
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/chelsio/Makefile [new file with mode: 0644]
drivers/net/chelsio/common.h [new file with mode: 0644]
drivers/net/chelsio/cphy.h [new file with mode: 0644]
drivers/net/chelsio/cpl5_cmd.h [new file with mode: 0644]
drivers/net/chelsio/cxgb2.c [new file with mode: 0644]
drivers/net/chelsio/elmer0.h [new file with mode: 0644]
drivers/net/chelsio/espi.c [new file with mode: 0644]
drivers/net/chelsio/espi.h [new file with mode: 0644]
drivers/net/chelsio/gmac.h [new file with mode: 0644]
drivers/net/chelsio/mv88x201x.c [new file with mode: 0644]
drivers/net/chelsio/pm3393.c [new file with mode: 0644]
drivers/net/chelsio/regs.h [new file with mode: 0644]
drivers/net/chelsio/sge.c [new file with mode: 0644]
drivers/net/chelsio/sge.h [new file with mode: 0644]
drivers/net/chelsio/subr.c [new file with mode: 0644]
drivers/net/chelsio/suni1x10gexp_regs.h [new file with mode: 0644]
drivers/net/e100.c
drivers/net/sis190.c [new file with mode: 0644]
drivers/net/tulip/Kconfig
drivers/net/tulip/Makefile
drivers/net/tulip/media.c
drivers/net/tulip/timer.c
drivers/net/tulip/tulip.h
drivers/net/tulip/tulip_core.c
drivers/net/tulip/uli526x.c [new file with mode: 0644]
include/linux/pci_ids.h

diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/cxgb.txt
new file mode 100644 (file)
index 0000000..7632463
--- /dev/null
@@ -0,0 +1,352 @@
+                 Chelsio N210 10Gb Ethernet Network Controller
+
+                         Driver Release Notes for Linux
+
+                                 Version 2.1.1
+
+                                 June 20, 2005
+
+CONTENTS
+========
+ INTRODUCTION
+ FEATURES
+ PERFORMANCE
+ DRIVER MESSAGES
+ KNOWN ISSUES
+ SUPPORT
+
+
+INTRODUCTION
+============
+
+ This document describes the Linux driver for Chelsio 10Gb Ethernet Network
+ Controller. This driver supports the Chelsio N210 NIC and is backward
+ compatible with the Chelsio N110 model 10Gb NICs.
+
+
+FEATURES
+========
+
+ Adaptive Interrupts (adaptive-rx)
+ ---------------------------------
+
+  This feature provides an adaptive algorithm that adjusts the interrupt
+  coalescing parameters, allowing the driver to dynamically adapt the latency
+  settings to achieve the highest performance during various types of network
+  load.
+
+  The interface used to control this feature is ethtool. Please see the
+  ethtool manpage for additional usage information.
+
+  By default, adaptive-rx is disabled.
+  To enable adaptive-rx:
+
+      ethtool -C <interface> adaptive-rx on
+
+  To disable adaptive-rx, use ethtool:
+
+      ethtool -C <interface> adaptive-rx off
+
+  After disabling adaptive-rx, the timer latency value will be set to 50us.
+  You may set the timer latency after disabling adaptive-rx:
+
+      ethtool -C <interface> rx-usecs <microseconds>
+
+  An example to set the timer latency value to 100us on eth0:
+
+      ethtool -C eth0 rx-usecs 100
+
+  You may also provide a timer latency value while disabling adpative-rx:
+
+      ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
+
+  If adaptive-rx is disabled and a timer latency value is specified, the timer
+  will be set to the specified value until changed by the user or until
+  adaptive-rx is enabled.
+
+  To view the status of the adaptive-rx and timer latency values:
+
+      ethtool -c <interface>
+
+
+ TCP Segmentation Offloading (TSO) Support
+ -----------------------------------------
+
+  This feature, also known as "large send", enables a system's protocol stack
+  to offload portions of outbound TCP processing to a network interface card
+  thereby reducing system CPU utilization and enhancing performance.
+
+  The interface used to control this feature is ethtool version 1.8 or higher.
+  Please see the ethtool manpage for additional usage information.
+
+  By default, TSO is enabled.
+  To disable TSO:
+
+      ethtool -K <interface> tso off
+
+  To enable TSO:
+
+      ethtool -K <interface> tso on
+
+  To view the status of TSO:
+
+      ethtool -k <interface>
+
+
+PERFORMANCE
+===========
+
+ The following information is provided as an example of how to change system
+ parameters for "performance tuning" an what value to use. You may or may not
+ want to change these system parameters, depending on your server/workstation
+ application. Doing so is not warranted in any way by Chelsio Communications,
+ and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
+ of data or damage to equipment.
+
+ Your distribution may have a different way of doing things, or you may prefer
+ a different method. These commands are shown only to provide an example of
+ what to do and are by no means definitive.
+
+ Making any of the following system changes will only last until you reboot
+ your system. You may want to write a script that runs at boot-up which
+ includes the optimal settings for your system.
+
+  Setting PCI Latency Timer:
+      setpci -d 1425:* 0x0c.l=0x0000F800
+
+  Disabling TCP timestamp:
+      sysctl -w net.ipv4.tcp_timestamps=0
+
+  Disabling SACK:
+      sysctl -w net.ipv4.tcp_sack=0
+
+  Setting large number of incoming connection requests:
+      sysctl -w net.ipv4.tcp_max_syn_backlog=3000
+
+  Setting maximum receive socket buffer size:
+      sysctl -w net.core.rmem_max=1024000
+
+  Setting maximum send socket buffer size:
+      sysctl -w net.core.wmem_max=1024000
+
+  Set smp_affinity (on a multiprocessor system) to a single CPU:
+      echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+  Setting default receive socket buffer size:
+      sysctl -w net.core.rmem_default=524287
+
+  Setting default send socket buffer size:
+      sysctl -w net.core.wmem_default=524287
+
+  Setting maximum option memory buffers:
+      sysctl -w net.core.optmem_max=524287
+
+  Setting maximum backlog (# of unprocessed packets before kernel drops):
+      sysctl -w net.core.netdev_max_backlog=300000
+
+  Setting TCP read buffers (min/default/max):
+      sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
+
+  Setting TCP write buffers (min/pressure/max):
+      sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
+
+  Setting TCP buffer space (min/pressure/max):
+      sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
+
+  TCP window size for single connections:
+   The receive buffer (RX_WINDOW) size must be at least as large as the
+   Bandwidth-Delay Product of the communication link between the sender and
+   receiver. Due to the variations of RTT, you may want to increase the buffer
+   size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
+   "TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
+   At 10Gb speeds, use the following formula:
+       RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
+       Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
+   RX_WINDOW sizes of 256KB - 512KB should be sufficient.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size:
+       sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
+
+  TCP window size for multiple connections:
+   The receive buffer (RX_WINDOW) size may be calculated the same as single
+   connections, but should be divided by the number of connections. The
+   smaller window prevents congestion and facilitates better pacing,
+   especially if/when MAC level flow control does not work well or when it is
+   not supported on the machine. Experimentation may be necessary to attain
+   the correct value. This method is provided as a starting point fot the
+   correct receive buffer size.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size is
+   performed in the same manner as single connection.
+
+
+DRIVER MESSAGES
+===============
+
+ The following messages are the most common messages logged by syslog. These
+ may be found in /var/log/messages.
+
+  Driver up:
+     Chelsio Network Driver - version 2.1.1
+
+  NIC detected:
+     eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
+
+  Link up:
+     eth#: link is up at 10 Gbps, full duplex
+
+  Link down:
+     eth#: link is down
+
+
+KNOWN ISSUES
+============
+
+ These issues have been identified during testing. The following information
+ is provided as a workaround to the problem. In some cases, this problem is
+ inherent to Linux or to a particular Linux Distribution and/or hardware
+ platform.
+
+  1. Large number of TCP retransmits on a multiprocessor (SMP) system.
+
+      On a system with multiple CPUs, the interrupt (IRQ) for the network
+      controller may be bound to more than one CPU. This will cause TCP
+      retransmits if the packet data were to be split across different CPUs
+      and re-assembled in a different order than expected.
+
+      To eliminate the TCP retransmits, set smp_affinity on the particular
+      interrupt to a single CPU. You can locate the interrupt (IRQ) used on
+      the N110/N210 by using ifconfig:
+          ifconfig <dev_name> | grep Interrupt
+      Set the smp_affinity to a single CPU:
+          echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+      It is highly suggested that you do not run the irqbalance daemon on your
+      system, as this will change any smp_affinity setting you have applied.
+      The irqbalance daemon runs on a 10 second interval and binds interrupts
+      to the least loaded CPU determined by the daemon. To disable this daemon:
+          chkconfig --level 2345 irqbalance off
+
+      By default, some Linux distributions enable the kernel feature,
+      irqbalance, which performs the same function as the daemon. To disable
+      this feature, add the following line to your bootloader:
+          noirqbalance
+
+          Example using the Grub bootloader:
+              title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
+              root (hd0,0)
+              kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
+              initrd /initrd-2.4.21-27.ELsmp.img
+
+  2. After running insmod, the driver is loaded and the incorrect network
+     interface is brought up without running ifup.
+
+      When using 2.4.x kernels, including RHEL kernels, the Linux kernel
+      invokes a script named "hotplug". This script is primarily used to
+      automatically bring up USB devices when they are plugged in, however,
+      the script also attempts to automatically bring up a network interface
+      after loading the kernel module. The hotplug script does this by scanning
+      the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
+      for HWADDR=<mac_address>.
+
+      If the hotplug script does not find the HWADDRR within any of the
+      ifcfg-eth# files, it will bring up the device with the next available
+      interface name. If this interface is already configured for a different
+      network card, your new interface will have incorrect IP address and
+      network settings.
+
+      To solve this issue, you can add the HWADDR=<mac_address> key to the
+      interface config file of your network controller.
+
+      To disable this "hotplug" feature, you may add the driver (module name)
+      to the "blacklist" file located in /etc/hotplug. It has been noted that
+      this does not work for network devices because the net.agent script
+      does not use the blacklist file. Simply remove, or rename, the net.agent
+      script located in /etc/hotplug to disable this feature.
+
+  3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
+     on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
+
+      If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
+      chipset, you may experience the "133-Mhz Mode Split Completion Data
+      Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
+      bus PCI-X bus.
+
+      AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
+      can provide stale data via split completion cycles to a PCI-X card that
+      is operating at 133 Mhz", causing data corruption.
+
+      AMD's provides three workarounds for this problem, however, Chelsio
+      recommends the first option for best performance with this bug:
+
+        For 133Mhz secondary bus operation, limit the transaction length and
+        the number of outstanding transactions, via BIOS configuration
+        programming of the PCI-X card, to the following:
+
+           Data Length (bytes): 1k
+           Total allowed outstanding transactions: 2
+
+      Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
+      section 56, "133-MHz Mode Split Completion Data Corruption" for more
+      details with this bug and workarounds suggested by AMD.
+
+      It may be possible to work outside AMD's recommended PCI-X settings, try
+      increasing the Data Length to 2k bytes for increased performance. If you
+      have issues with these settings, please revert to the "safe" settings
+      and duplicate the problem before submitting a bug or asking for support.
+
+      NOTE: The default setting on most systems is 8 outstanding transactions
+            and 2k bytes data length.
+
+  4. On multiprocessor systems, it has been noted that an application which
+     is handling 10Gb networking can switch between CPUs causing degraded
+     and/or unstable performance.
+
+      If running on an SMP system and taking performance measurements, it
+      is suggested you either run the latest netperf-2.4.0+ or use a binding
+      tool such as Tim Hockin's procstate utilities (runon)
+      <http://www.hockin.org/~thockin/procstate/>.
+
+      Binding netserver and netperf (or other applications) to particular
+      CPUs will have a significant difference in performance measurements.
+      You may need to experiment which CPU to bind the application to in
+      order to achieve the best performance for your system.
+
+      If you are developing an application designed for 10Gb networking,
+      please keep in mind you may want to look at kernel functions
+      sched_setaffinity & sched_getaffinity to bind your application.
+
+      If you are just running user-space applications such as ftp, telnet,
+      etc., you may want to try the runon tool provided by Tim Hockin's
+      procstate utility. You could also try binding the interface to a
+      particular CPU: runon 0 ifup eth0
+
+
+SUPPORT
+=======
+
+ If you have problems with the software or hardware, please contact our
+ customer support team via email at support@chelsio.com or check our website
+ at http://www.chelsio.com
+
+===============================================================================
+
+ Chelsio Communications
+ 370 San Aleso Ave.
+ Suite 100
+ Sunnyvale, CA 94085
+ http://www.chelsio.com
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as
+published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+ Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
+
+===============================================================================
index 564a03e61a0c4b2322136ab9d96b3f121391ee83..e8214fe53a5e919feb0e640b189d3b6126a991e3 100644 (file)
@@ -2092,6 +2092,12 @@ M:       support@simtec.co.uk
 W:     http://www.simtec.co.uk/products/EB2410ITX/
 S:     Supported
 
+SIS 190 ETHERNET DRIVER
+P:     Francois Romieu
+M:     romieu@fr.zoreil.com
+L:     netdev@vger.kernel.org
+S:     Maintained
+
 SIS 5513 IDE CONTROLLER DRIVER
 P:     Lionel Bouton
 M:     Lionel.Bouton@inet6.fr
index 79e8aa6f2b9edfc215e43b1a1b0a934274a680f4..e0239a10d3250ff44f61b91c21f6d68584d1a9fd 100644 (file)
@@ -1923,6 +1923,17 @@ config R8169_VLAN
          
          If in doubt, say Y.
 
+config SIS190
+       tristate "SiS190 gigabit ethernet support"
+       depends on PCI
+       select CRC32
+       select MII
+       ---help---
+         Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sis190.  This is recommended.
+
 config SKGE
        tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
        depends on PCI && EXPERIMENTAL
@@ -2093,6 +2104,25 @@ endmenu
 menu "Ethernet (10000 Mbit)"
        depends on !UML
 
+config CHELSIO_T1
+        tristate "Chelsio 10Gb Ethernet support"
+        depends on PCI
+        help
+          This driver supports Chelsio N110 and N210 models 10Gb Ethernet
+          cards. More information about adapter features and performance
+          tuning is in <file:Documentation/networking/cxgb.txt>.
+
+          For general information about Chelsio and our products, visit
+          our website at <http://www.chelsio.com>.
+
+          For customer support, please visit our customer support page at
+          <http://www.chelsio.com/support.htm>.
+
+          Please send feedback to <linux-bugs@chelsio.com>.
+
+          To compile this driver as a module, choose M here: the module
+          will be called cxgb.
+
 config IXGB
        tristate "Intel(R) PRO/10GbE support"
        depends on PCI
index a369ae284a9a23cd5aaa7590ea584573df6dbe78..5baafcd5561086de53a6b7c8dd54850ea5be32b4 100644 (file)
@@ -9,6 +9,7 @@ endif
 obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
 obj-$(CONFIG_E100) += e100.o
 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SIS190) += sis190.o
 obj-$(CONFIG_SIS900) += sis900.o
 obj-$(CONFIG_YELLOWFIN) += yellowfin.o
 obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644 (file)
index 0000000..91e9278
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Chelsio 10Gb NIC driver for Linux.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb.o
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
+
+
+cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
+
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644 (file)
index 0000000..f093488
--- /dev/null
@@ -0,0 +1,314 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: common.h                                                            *
+ * $Revision: 1.21 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_COMMON_H_
+#define _CXGB_COMMON_H_
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/pci_ids.h>
+
+#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "2.1.1"
+#define PFX      DRV_NAME ": "
+
+#define CH_ERR(fmt, ...)   printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
+#define CH_WARN(fmt, ...)  printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
+#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
+
+#define CH_DEVICE(devid, ssid, idx) \
+       { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+#define SUPPORTED_PAUSE       (1 << 13)
+#define SUPPORTED_LOOPBACK    (1 << 15)
+
+#define ADVERTISED_PAUSE      (1 << 13)
+#define ADVERTISED_ASYM_PAUSE (1 << 14)
+
+typedef struct adapter adapter_t;
+
+void t1_elmer0_ext_intr(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
+                       int speed, int duplex, int fc);
+
+struct t1_rx_mode {
+       struct net_device *dev;
+       u32 idx;
+       struct dev_mc_list *list;
+};
+
+#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
+#define t1_rx_mode_allmulti(rm)        (rm->dev->flags & IFF_ALLMULTI)
+#define t1_rx_mode_mc_cnt(rm)  (rm->dev->mc_count)
+
+static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
+{
+       u8 *addr = 0;
+
+       if (rm->idx++ < rm->dev->mc_count) {
+               addr = rm->list->dmi_addr;
+               rm->list = rm->list->next;
+       }
+       return addr;
+}
+
+#define        MAX_NPORTS 4
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+enum {
+       CHBT_BOARD_N110,
+       CHBT_BOARD_N210
+};
+
+enum {
+       CHBT_TERM_T1,
+       CHBT_TERM_T2
+};
+
+enum {
+       CHBT_MAC_PM3393,
+};
+
+enum {
+       CHBT_PHY_88X2010,
+};
+
+enum {
+       PAUSE_RX      = 1 << 0,
+       PAUSE_TX      = 1 << 1,
+       PAUSE_AUTONEG = 1 << 2
+};
+
+/* Revisions of T1 chip */
+enum {
+       TERM_T1A   = 0,
+       TERM_T1B   = 1,
+       TERM_T2    = 3
+};
+
+struct sge_params {
+       unsigned int cmdQ_size[2];
+       unsigned int freelQ_size[2];
+       unsigned int large_buf_capacity;
+       unsigned int rx_coalesce_usecs;
+       unsigned int last_rx_coalesce_raw;
+       unsigned int default_rx_coalesce_usecs;
+       unsigned int sample_interval_usecs;
+       unsigned int coalesce_enable;
+       unsigned int polling;
+};
+
+struct chelsio_pci_params {
+       unsigned short speed;
+       unsigned char  width;
+       unsigned char  is_pcix;
+};
+
+struct adapter_params {
+       struct sge_params sge;
+       struct chelsio_pci_params pci;
+
+       const struct board_info *brd_info;
+
+       unsigned int   nports;          /* # of ethernet ports */
+       unsigned int   stats_update_period;
+       unsigned short chip_revision;
+       unsigned char  chip_version;
+};
+
+struct link_config {
+       unsigned int   supported;        /* link capabilities */
+       unsigned int   advertising;      /* advertised capabilities */
+       unsigned short requested_speed;  /* speed user has requested */
+       unsigned short speed;            /* actual link speed */
+       unsigned char  requested_duplex; /* duplex user has requested */
+       unsigned char  duplex;           /* actual link duplex */
+       unsigned char  requested_fc;     /* flow control user has requested */
+       unsigned char  fc;               /* actual link flow control */
+       unsigned char  autoneg;          /* autonegotiating? */
+};
+
+struct cmac;
+struct cphy;
+
+struct port_info {
+       struct net_device *dev;
+       struct cmac *mac;
+       struct cphy *phy;
+       struct link_config link_config;
+       struct net_device_stats netstats;
+};
+
+struct sge;
+struct peespi;
+
+struct adapter {
+       u8 *regs;
+       struct pci_dev *pdev;
+       unsigned long registered_device_map;
+       unsigned long open_device_map;
+       unsigned long flags;
+
+       const char *name;
+       int msg_enable;
+       u32 mmio_len;
+
+       struct work_struct ext_intr_handler_task;
+       struct adapter_params params;
+
+       struct vlan_group *vlan_grp;
+
+       /* Terminator modules. */
+       struct sge    *sge;
+       struct peespi *espi;
+
+       struct port_info port[MAX_NPORTS];
+       struct work_struct stats_update_task;
+       struct timer_list stats_update_timer;
+
+       struct semaphore mib_mutex;
+       spinlock_t tpi_lock;
+       spinlock_t work_lock;
+       /* guards async operations */
+       spinlock_t async_lock ____cacheline_aligned;
+       u32 slow_intr_mask;
+};
+
+enum {                                           /* adapter flags */
+       FULL_INIT_DONE        = 1 << 0,
+       TSO_CAPABLE           = 1 << 2,
+       TCP_CSUM_CAPABLE      = 1 << 3,
+       UDP_CSUM_CAPABLE      = 1 << 4,
+       VLAN_ACCEL_CAPABLE    = 1 << 5,
+       RX_CSUM_ENABLED       = 1 << 6,
+};
+
+struct mdio_ops;
+struct gmac;
+struct gphy;
+
+struct board_info {
+       unsigned char           board;
+       unsigned char           port_number;
+       unsigned long           caps;
+       unsigned char           chip_term;
+       unsigned char           chip_mac;
+       unsigned char           chip_phy;
+       unsigned int            clock_core;
+       unsigned int            clock_mc3;
+       unsigned int            clock_mc4;
+       unsigned int            espi_nports;
+       unsigned int            clock_cspi;
+       unsigned int            clock_elmer0;
+       unsigned char           mdio_mdien;
+       unsigned char           mdio_mdiinv;
+       unsigned char           mdio_mdc;
+       unsigned char           mdio_phybaseaddr;
+       struct gmac            *gmac;
+       struct gphy            *gphy;
+       struct mdio_ops        *mdio_ops;
+       const char             *desc;
+};
+
+extern struct pci_device_id t1_pci_tbl[];
+
+static inline int adapter_matches_type(const adapter_t *adapter,
+                                      int version, int revision)
+{
+       return adapter->params.chip_version == version &&
+              adapter->params.chip_revision == revision;
+}
+
+#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
+#define is_T2(adap)     adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
+
+/* Returns true if an adapter supports VLAN acceleration and TSO */
+static inline int vlan_tso_capable(const adapter_t *adapter)
+{
+       return !t1_is_T1B(adapter);
+}
+
+#define for_each_port(adapter, iter) \
+       for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define board_info(adapter) ((adapter)->params.brd_info)
+#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+       return board_info(adap)->clock_core / 1000000;
+}
+
+extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+extern void t1_interrupts_enable(adapter_t *adapter);
+extern void t1_interrupts_disable(adapter_t *adapter);
+extern void t1_interrupts_clear(adapter_t *adapter);
+extern int elmer0_ext_intr_handler(adapter_t *adapter);
+extern int t1_slow_intr_handler(adapter_t *adapter);
+
+extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+extern const struct board_info *t1_get_board_info(unsigned int board_id);
+extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+                                                   unsigned short ssid);
+extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
+extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+                    struct adapter_params *p);
+extern int t1_init_hw_modules(adapter_t *adapter);
+extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+extern void t1_free_sw_modules(adapter_t *adapter);
+extern void t1_fatal_err(adapter_t *adapter);
+
+extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
+
+#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644 (file)
index 0000000..3412342
--- /dev/null
@@ -0,0 +1,148 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cphy.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPHY_H_
+#define _CXGB_CPHY_H_
+
+#include "common.h"
+
+struct mdio_ops {
+       void (*init)(adapter_t *adapter, const struct board_info *bi);
+       int  (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+                    int reg_addr, unsigned int *val);
+       int  (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+                     int reg_addr, unsigned int val);
+};
+
+/* PHY interrupt types */
+enum {
+       cphy_cause_link_change = 0x1,
+       cphy_cause_error = 0x2
+};
+
+struct cphy;
+
+/* PHY operations */
+struct cphy_ops {
+       void (*destroy)(struct cphy *);
+       int (*reset)(struct cphy *, int wait);
+
+       int (*interrupt_enable)(struct cphy *);
+       int (*interrupt_disable)(struct cphy *);
+       int (*interrupt_clear)(struct cphy *);
+       int (*interrupt_handler)(struct cphy *);
+
+       int (*autoneg_enable)(struct cphy *);
+       int (*autoneg_disable)(struct cphy *);
+       int (*autoneg_restart)(struct cphy *);
+
+       int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+       int (*set_loopback)(struct cphy *, int on);
+       int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+       int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+                              int *duplex, int *fc);
+};
+
+/* A PHY instance */
+struct cphy {
+       int addr;                            /* PHY address */
+       adapter_t *adapter;                  /* associated adapter */
+       struct cphy_ops *ops;                /* PHY operations */
+       int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+                        int reg_addr, unsigned int *val);
+       int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+                         int reg_addr, unsigned int val);
+       struct cphy_instance *instance;
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
+                           unsigned int *valp)
+{
+       return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
+}
+
+static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
+                            unsigned int val)
+{
+       return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
+}
+
+static inline int simple_mdio_read(struct cphy *cphy, int reg,
+                                  unsigned int *valp)
+{
+       return mdio_read(cphy, 0, reg, valp);
+}
+
+static inline int simple_mdio_write(struct cphy *cphy, int reg,
+                                   unsigned int val)
+{
+       return mdio_write(cphy, 0, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
+                            int phy_addr, struct cphy_ops *phy_ops,
+                            struct mdio_ops *mdio_ops)
+{
+       phy->adapter = adapter;
+       phy->addr    = phy_addr;
+       phy->ops     = phy_ops;
+       if (mdio_ops) {
+               phy->mdio_read  = mdio_ops->read;
+               phy->mdio_write = mdio_ops->write;
+       }
+}
+
+/* Operations of the PHY-instance factory */
+struct gphy {
+       /* Construct a PHY instance with the given PHY address */
+       struct cphy *(*create)(adapter_t *adapter, int phy_addr,
+                              struct mdio_ops *mdio_ops);
+
+       /*
+        * Reset the PHY chip.  This resets the whole PHY chip, not individual
+        * ports.
+        */
+       int (*reset)(adapter_t *adapter);
+};
+
+extern struct gphy t1_mv88x201x_ops;
+extern struct gphy t1_dummy_phy_ops;
+
+#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644 (file)
index 0000000..27925e4
--- /dev/null
@@ -0,0 +1,145 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cpl5_cmd.h                                                          *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPL5_CMD_H_
+#define _CXGB_CPL5_CMD_H_
+
+#include <asm/byteorder.h>
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+enum CPL_opcode {
+       CPL_RX_PKT            = 0xAD,
+       CPL_TX_PKT            = 0xB2,
+       CPL_TX_PKT_LSO        = 0xB6,
+};
+
+enum {                /* TX_PKT_LSO ethernet types */
+       CPL_ETH_II,
+       CPL_ETH_II_VLAN,
+       CPL_ETH_802_3,
+       CPL_ETH_802_3_VLAN
+};
+
+struct cpl_rx_data {
+       u32 rsvd0;
+       u32 len;
+       u32 seq;
+       u16 urg;
+       u8  rsvd1;
+       u8  status;
+};
+
+/*
+ * We want this header's alignment to be no more stringent than 2-byte aligned.
+ * All fields are u8 or u16 except for the length.  However that field is not
+ * used so we break it into 2 16-bit parts to easily meet our alignment needs.
+ */
+struct cpl_tx_pkt {
+       u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 iff:4;
+       u8 ip_csum_dis:1;
+       u8 l4_csum_dis:1;
+       u8 vlan_valid:1;
+       u8 rsvd:1;
+#else
+       u8 rsvd:1;
+       u8 vlan_valid:1;
+       u8 l4_csum_dis:1;
+       u8 ip_csum_dis:1;
+       u8 iff:4;
+#endif
+       u16 vlan;
+       u16 len_hi;
+       u16 len_lo;
+};
+
+struct cpl_tx_pkt_lso {
+       u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 iff:4;
+       u8 ip_csum_dis:1;
+       u8 l4_csum_dis:1;
+       u8 vlan_valid:1;
+       u8 rsvd:1;
+#else
+       u8 rsvd:1;
+       u8 vlan_valid:1;
+       u8 l4_csum_dis:1;
+       u8 ip_csum_dis:1;
+       u8 iff:4;
+#endif
+       u16 vlan;
+       u32 len;
+
+       u32 rsvd2;
+       u8 rsvd3;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 tcp_hdr_words:4;
+       u8 ip_hdr_words:4;
+#else
+       u8 ip_hdr_words:4;
+       u8 tcp_hdr_words:4;
+#endif
+       u16 eth_type_mss;
+};
+
+struct cpl_rx_pkt {
+       u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 iff:4;
+       u8 csum_valid:1;
+       u8 bad_pkt:1;
+       u8 vlan_valid:1;
+       u8 rsvd:1;
+#else
+       u8 rsvd:1;
+       u8 vlan_valid:1;
+       u8 bad_pkt:1;
+       u8 csum_valid:1;
+       u8 iff:4;
+#endif
+       u16 csum;
+       u16 vlan;
+       u16 len;
+};
+
+#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644 (file)
index 0000000..28ae478
--- /dev/null
@@ -0,0 +1,1256 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cxgb2.c                                                             *
+ * $Revision: 1.25 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  Chelsio 10Gb Ethernet Driver.                                            *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+
+#include "cpl5_cmd.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+#ifdef work_struct
+#include <linux/tqueue.h>
+#define INIT_WORK INIT_TQUEUE
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+       mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+       del_timer_sync(&ap->stats_update_timer);
+       flush_scheduled_tasks();
+}
+
+/*
+ * Stats update timer for 2.4.  It schedules a task to do the actual update as
+ * we need to access MAC statistics in process context.
+ */
+static void mac_stats_timer(unsigned long data)
+{
+       struct adapter *ap = (struct adapter *)data;
+
+       schedule_task(&ap->stats_update_task);
+}
+#else
+#include <linux/workqueue.h>
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+       schedule_delayed_work(&ap->stats_update_task, secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+       cancel_delayed_work(&ap->stats_update_task);
+}
+#endif
+
+#define MAX_CMDQ_ENTRIES 16384
+#define MAX_CMDQ1_ENTRIES 1024
+#define MAX_RX_BUFFERS 16384
+#define MAX_RX_JUMBO_BUFFERS 16384
+#define MAX_TX_BUFFERS_HIGH    16384U
+#define MAX_TX_BUFFERS_LOW     1536U
+#define MIN_FL_ENTRIES 32
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+                        NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+                        NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/*
+ * The EEPROM is actually bigger but only the first few bytes are used so we
+ * only report those.
+ */
+#define EEPROM_SIZE 32
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("GPL");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+MODULE_PARM(dflt_msg_enable, "i");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+
+
+static const char pci_speed[][4] = {
+       "33", "66", "100", "133"
+};
+
+/*
+ * Setup MAC to receive the types of packets we want.
+ */
+static void t1_set_rxmode(struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+       struct cmac *mac = adapter->port[dev->if_port].mac;
+       struct t1_rx_mode rm;
+
+       rm.dev = dev;
+       rm.idx = 0;
+       rm.list = dev->mc_list;
+       mac->ops->set_rx_mode(mac, &rm);
+}
+
+static void link_report(struct port_info *p)
+{
+       if (!netif_carrier_ok(p->dev))
+               printk(KERN_INFO "%s: link down\n", p->dev->name);
+       else {
+               const char *s = "10Mbps";
+
+               switch (p->link_config.speed) {
+                       case SPEED_10000: s = "10Gbps"; break;
+                       case SPEED_1000:  s = "1000Mbps"; break;
+                       case SPEED_100:   s = "100Mbps"; break;
+               }
+
+        printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+                      p->dev->name, s,
+                      p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+       }
+}
+
+void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+                       int speed, int duplex, int pause)
+{
+       struct port_info *p = &adapter->port[port_id];
+
+       if (link_stat != netif_carrier_ok(p->dev)) {
+               if (link_stat)
+                       netif_carrier_on(p->dev);
+               else
+                       netif_carrier_off(p->dev);
+               link_report(p);
+
+       }
+}
+
+static void link_start(struct port_info *p)
+{
+       struct cmac *mac = p->mac;
+
+       mac->ops->reset(mac);
+       if (mac->ops->macaddress_set)
+               mac->ops->macaddress_set(mac, p->dev->dev_addr);
+       t1_set_rxmode(p->dev);
+       t1_link_start(p->phy, mac, &p->link_config);
+       mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static void enable_hw_csum(struct adapter *adapter)
+{
+       if (adapter->flags & TSO_CAPABLE)
+               t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
+       t1_tp_set_tcp_checksum_offload(adapter, 1);
+}
+
+/*
+ * Things to do upon first use of a card.
+ * This must run with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adapter)
+{
+       int err = 0;
+
+       if (!(adapter->flags & FULL_INIT_DONE)) {
+               err = t1_init_hw_modules(adapter);
+               if (err)
+                       goto out_err;
+
+               enable_hw_csum(adapter);
+               adapter->flags |= FULL_INIT_DONE;
+       }
+
+       t1_interrupts_clear(adapter);
+       if ((err = request_irq(adapter->pdev->irq,
+                              t1_select_intr_handler(adapter), SA_SHIRQ,
+                              adapter->name, adapter))) {
+               goto out_err;
+       }
+       t1_sge_start(adapter->sge);
+       t1_interrupts_enable(adapter);
+ out_err:
+       return err;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+       t1_sge_stop(adapter->sge);
+       t1_interrupts_disable(adapter);
+       free_irq(adapter->pdev->irq, adapter);
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+       int err;
+       struct adapter *adapter = dev->priv;
+       int other_ports = adapter->open_device_map & PORT_MASK;
+
+       if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+               return err;
+
+       __set_bit(dev->if_port, &adapter->open_device_map);
+       link_start(&adapter->port[dev->if_port]);
+       netif_start_queue(dev);
+       if (!other_ports && adapter->params.stats_update_period)
+               schedule_mac_stats_update(adapter,
+                                         adapter->params.stats_update_period);
+       return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+       struct cmac *mac = p->mac;
+
+       netif_stop_queue(dev);
+       mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+       netif_carrier_off(dev);
+
+       clear_bit(dev->if_port, &adapter->open_device_map);
+       if (adapter->params.stats_update_period &&
+           !(adapter->open_device_map & PORT_MASK)) {
+               /* Stop statistics accumulation. */
+               smp_mb__after_clear_bit();
+               spin_lock(&adapter->work_lock);   /* sync with update task */
+               spin_unlock(&adapter->work_lock);
+               cancel_mac_stats_update(adapter);
+       }
+
+       if (!adapter->open_device_map)
+               cxgb_down(adapter);
+       return 0;
+}
+
+static struct net_device_stats *t1_get_stats(struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+       struct net_device_stats *ns = &p->netstats;
+       const struct cmac_statistics *pstats;
+
+       /* Do a full update of the MAC stats */
+       pstats = p->mac->ops->statistics_update(p->mac,
+                                                     MAC_STATS_UPDATE_FULL);
+
+       ns->tx_packets = pstats->TxUnicastFramesOK +
+               pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
+
+       ns->rx_packets = pstats->RxUnicastFramesOK +
+               pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
+
+       ns->tx_bytes = pstats->TxOctetsOK;
+       ns->rx_bytes = pstats->RxOctetsOK;
+
+       ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
+               pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
+       ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
+               pstats->RxFCSErrors + pstats->RxAlignErrors +
+               pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
+               pstats->RxSymbolErrors + pstats->RxRuntErrors;
+
+       ns->multicast  = pstats->RxMulticastFramesOK;
+       ns->collisions = pstats->TxTotalCollisions;
+
+       /* detailed rx_errors */
+       ns->rx_length_errors = pstats->RxFrameTooLongErrors +
+               pstats->RxJabberErrors;
+       ns->rx_over_errors   = 0;
+       ns->rx_crc_errors    = pstats->RxFCSErrors;
+       ns->rx_frame_errors  = pstats->RxAlignErrors;
+       ns->rx_fifo_errors   = 0;
+       ns->rx_missed_errors = 0;
+
+       /* detailed tx_errors */
+       ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
+       ns->tx_carrier_errors   = 0;
+       ns->tx_fifo_errors      = pstats->TxUnderrun;
+       ns->tx_heartbeat_errors = 0;
+       ns->tx_window_errors    = pstats->TxLateCollisions;
+       return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+
+       return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+       struct adapter *adapter = dev->priv;
+
+       adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+        "TxOctetsOK",
+        "TxOctetsBad",
+        "TxUnicastFramesOK",
+        "TxMulticastFramesOK",
+        "TxBroadcastFramesOK",
+        "TxPauseFrames",
+        "TxFramesWithDeferredXmissions",
+        "TxLateCollisions",
+        "TxTotalCollisions",
+        "TxFramesAbortedDueToXSCollisions",
+        "TxUnderrun",
+        "TxLengthErrors",
+        "TxInternalMACXmitError",
+        "TxFramesWithExcessiveDeferral",
+        "TxFCSErrors",
+
+        "RxOctetsOK",
+        "RxOctetsBad",
+        "RxUnicastFramesOK",
+        "RxMulticastFramesOK",
+        "RxBroadcastFramesOK",
+        "RxPauseFrames",
+        "RxFCSErrors",
+        "RxAlignErrors",
+        "RxSymbolErrors",
+        "RxDataErrors",
+        "RxSequenceErrors",
+        "RxRuntErrors",
+        "RxJabberErrors",
+        "RxInternalMACRcvError",
+        "RxInRangeLengthErrors",
+        "RxOutOfRangeLengthField",
+        "RxFrameTooLongErrors",
+
+       "TSO",
+       "VLANextractions",
+       "VLANinsertions",
+       "RxCsumGood",
+       "TxCsumOffload",
+       "RxDrops"
+
+       "respQ_empty",
+       "respQ_overflow",
+       "freelistQ_empty",
+       "pkt_too_big",
+       "pkt_mismatch",
+       "cmdQ_full0",
+       "cmdQ_full1",
+       "tx_ipfrags",
+       "tx_reg_pkts",
+       "tx_lso_pkts",
+       "tx_do_cksum",
+       
+       "espi_DIP2ParityErr",
+       "espi_DIP4Err",
+       "espi_RxDrops",
+       "espi_TxDrops",
+       "espi_RxOvfl",
+       "espi_ParityErr"
+};
+#define T2_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+       return T2_REGMAP_SIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct adapter *adapter = dev->priv;
+
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->fw_version, "N/A");
+       strcpy(info->bus_info, pci_name(adapter->pdev));
+}
+
+static int get_stats_count(struct net_device *dev)
+{
+       return ARRAY_SIZE(stats_strings);
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+                     u64 *data)
+{
+       struct adapter *adapter = dev->priv;
+       struct cmac *mac = adapter->port[dev->if_port].mac;
+       const struct cmac_statistics *s;
+       const struct sge_port_stats *ss;
+       const struct sge_intr_counts *t;
+
+       s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+       ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
+       t = t1_sge_get_intr_counts(adapter->sge);
+
+        *data++ = s->TxOctetsOK;
+        *data++ = s->TxOctetsBad;
+        *data++ = s->TxUnicastFramesOK;
+        *data++ = s->TxMulticastFramesOK;
+        *data++ = s->TxBroadcastFramesOK;
+        *data++ = s->TxPauseFrames;
+        *data++ = s->TxFramesWithDeferredXmissions;
+        *data++ = s->TxLateCollisions;
+        *data++ = s->TxTotalCollisions;
+        *data++ = s->TxFramesAbortedDueToXSCollisions;
+        *data++ = s->TxUnderrun;
+        *data++ = s->TxLengthErrors;
+        *data++ = s->TxInternalMACXmitError;
+        *data++ = s->TxFramesWithExcessiveDeferral;
+        *data++ = s->TxFCSErrors;
+
+        *data++ = s->RxOctetsOK;
+        *data++ = s->RxOctetsBad;
+        *data++ = s->RxUnicastFramesOK;
+        *data++ = s->RxMulticastFramesOK;
+        *data++ = s->RxBroadcastFramesOK;
+        *data++ = s->RxPauseFrames;
+        *data++ = s->RxFCSErrors;
+        *data++ = s->RxAlignErrors;
+        *data++ = s->RxSymbolErrors;
+        *data++ = s->RxDataErrors;
+        *data++ = s->RxSequenceErrors;
+        *data++ = s->RxRuntErrors;
+        *data++ = s->RxJabberErrors;
+        *data++ = s->RxInternalMACRcvError;
+        *data++ = s->RxInRangeLengthErrors;
+        *data++ = s->RxOutOfRangeLengthField;
+        *data++ = s->RxFrameTooLongErrors;
+
+       *data++ = ss->tso;
+       *data++ = ss->vlan_xtract;
+       *data++ = ss->vlan_insert;
+       *data++ = ss->rx_cso_good;
+       *data++ = ss->tx_cso;
+       *data++ = ss->rx_drops;
+
+       *data++ = (u64)t->respQ_empty;
+       *data++ = (u64)t->respQ_overflow;
+       *data++ = (u64)t->freelistQ_empty;
+       *data++ = (u64)t->pkt_too_big;
+       *data++ = (u64)t->pkt_mismatch;
+       *data++ = (u64)t->cmdQ_full[0];
+       *data++ = (u64)t->cmdQ_full[1];
+       *data++ = (u64)t->tx_ipfrags;
+       *data++ = (u64)t->tx_reg_pkts;
+       *data++ = (u64)t->tx_lso_pkts;
+       *data++ = (u64)t->tx_do_cksum;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+                                 unsigned int start, unsigned int end)
+{
+       u32 *p = buf + start;
+
+       for ( ; start <= end; start += sizeof(u32))
+               *p++ = readl(ap->regs + start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                    void *buf)
+{
+       struct adapter *ap = dev->priv;
+
+       /*
+        * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
+        */
+       regs->version = 2;
+
+       memset(buf, 0, T2_REGMAP_SIZE);
+       reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+
+       cmd->supported = p->link_config.supported;
+       cmd->advertising = p->link_config.advertising;
+
+       if (netif_carrier_ok(dev)) {
+               cmd->speed = p->link_config.speed;
+               cmd->duplex = p->link_config.duplex;
+       } else {
+               cmd->speed = -1;
+               cmd->duplex = -1;
+       }
+
+        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+        cmd->phy_address = p->phy->addr;
+        cmd->transceiver = XCVR_EXTERNAL;
+        cmd->autoneg = p->link_config.autoneg;
+        cmd->maxtxpkt = 0;
+        cmd->maxrxpkt = 0;
+       return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+       int cap = 0;
+
+       switch (speed) {
+       case SPEED_10:
+               if (duplex == DUPLEX_FULL)
+                       cap = SUPPORTED_10baseT_Full;
+               else
+                       cap = SUPPORTED_10baseT_Half;
+               break;
+       case SPEED_100:
+               if (duplex == DUPLEX_FULL)
+                       cap = SUPPORTED_100baseT_Full;
+               else
+                       cap = SUPPORTED_100baseT_Half;
+               break;
+       case SPEED_1000:
+               if (duplex == DUPLEX_FULL)
+                       cap = SUPPORTED_1000baseT_Full;
+               else
+                       cap = SUPPORTED_1000baseT_Half;
+               break;
+       case SPEED_10000:
+               if (duplex == DUPLEX_FULL)
+                       cap = SUPPORTED_10000baseT_Full;
+       }
+       return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+                     ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+                     ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+                     ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+       struct link_config *lc = &p->link_config;
+
+       if (!(lc->supported & SUPPORTED_Autoneg))
+               return -EOPNOTSUPP;             /* can't change speed/duplex */
+
+       if (cmd->autoneg == AUTONEG_DISABLE) {
+               int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+
+               if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+                       return -EINVAL;
+               lc->requested_speed = cmd->speed;
+               lc->requested_duplex = cmd->duplex;
+               lc->advertising = 0;
+       } else {
+               cmd->advertising &= ADVERTISED_MASK;
+               if (cmd->advertising & (cmd->advertising - 1))
+                       cmd->advertising = lc->supported;
+               cmd->advertising &= lc->supported;
+               if (!cmd->advertising)
+                       return -EINVAL;
+               lc->requested_speed = SPEED_INVALID;
+               lc->requested_duplex = DUPLEX_INVALID;
+               lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+       }
+       lc->autoneg = cmd->autoneg;
+       if (netif_running(dev))
+               t1_link_start(p->phy, p->mac, lc);
+       return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+                          struct ethtool_pauseparam *epause)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+
+       epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+       epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+       epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+                         struct ethtool_pauseparam *epause)
+{
+       struct adapter *adapter = dev->priv;
+       struct port_info *p = &adapter->port[dev->if_port];
+       struct link_config *lc = &p->link_config;
+
+       if (epause->autoneg == AUTONEG_DISABLE)
+               lc->requested_fc = 0;
+       else if (lc->supported & SUPPORTED_Autoneg)
+               lc->requested_fc = PAUSE_AUTONEG;
+       else
+               return -EINVAL;
+
+       if (epause->rx_pause)
+               lc->requested_fc |= PAUSE_RX;
+       if (epause->tx_pause)
+               lc->requested_fc |= PAUSE_TX;
+       if (lc->autoneg == AUTONEG_ENABLE) {
+               if (netif_running(dev))
+                       t1_link_start(p->phy, p->mac, lc);
+       } else {
+               lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+               if (netif_running(dev))
+                       p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
+                                                        lc->fc);
+       }
+       return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+
+       return (adapter->flags & RX_CSUM_ENABLED) != 0;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+       struct adapter *adapter = dev->priv;
+
+       if (data)
+               adapter->flags |= RX_CSUM_ENABLED;
+       else
+               adapter->flags &= ~RX_CSUM_ENABLED;
+       return 0;
+}
+
+static int set_tso(struct net_device *dev, u32 value)
+{
+       struct adapter *adapter = dev->priv;
+
+       if (!(adapter->flags & TSO_CAPABLE))
+               return value ? -EOPNOTSUPP : 0;
+       return ethtool_op_set_tso(dev, value);
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+       struct adapter *adapter = dev->priv;
+       int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+       e->rx_max_pending = MAX_RX_BUFFERS;
+       e->rx_mini_max_pending = 0;
+       e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+       e->tx_max_pending = MAX_CMDQ_ENTRIES;
+
+       e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
+       e->rx_mini_pending = 0;
+       e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
+       e->tx_pending = adapter->params.sge.cmdQ_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+       struct adapter *adapter = dev->priv;
+       int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+       if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
+           e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+           e->tx_pending > MAX_CMDQ_ENTRIES ||
+           e->rx_pending < MIN_FL_ENTRIES ||
+           e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+           e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
+               return -EINVAL;
+
+       if (adapter->flags & FULL_INIT_DONE)
+        return -EBUSY;
+
+       adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
+       adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
+       adapter->params.sge.cmdQ_size[0] = e->tx_pending;
+       adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
+               MAX_CMDQ1_ENTRIES : e->tx_pending;
+       return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+       struct adapter *adapter = dev->priv;
+
+       /*
+        * If RX coalescing is requested we use NAPI, otherwise interrupts.
+        * This choice can be made only when all ports and the TOE are off.
+        */
+       if (adapter->open_device_map == 0)
+               adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
+
+       if (adapter->params.sge.polling) {
+               adapter->params.sge.rx_coalesce_usecs = 0;
+       } else {
+               adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
+       }
+       adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+       adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
+       t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
+       return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+       struct adapter *adapter = dev->priv;
+
+       c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
+       c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
+       c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
+       return 0;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+    return EEPROM_SIZE;
+}
+
+#define EEPROM_MAGIC(ap) \
+       (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+                     u8 *data)
+{
+       int i;
+       u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
+       struct adapter *adapter = dev->priv;
+
+       e->magic = EEPROM_MAGIC(adapter);
+       for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
+               t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
+       memcpy(data, buf + e->offset, e->len);
+       return 0;
+}
+
+static struct ethtool_ops t1_ethtool_ops = {
+       .get_settings      = get_settings,
+       .set_settings      = set_settings,
+       .get_drvinfo       = get_drvinfo,
+       .get_msglevel      = get_msglevel,
+       .set_msglevel      = set_msglevel,
+       .get_ringparam     = get_sge_param,
+       .set_ringparam     = set_sge_param,
+       .get_coalesce      = get_coalesce,
+       .set_coalesce      = set_coalesce,
+       .get_eeprom_len    = get_eeprom_len,
+       .get_eeprom        = get_eeprom,
+       .get_pauseparam    = get_pauseparam,
+       .set_pauseparam    = set_pauseparam,
+       .get_rx_csum       = get_rx_csum,
+       .set_rx_csum       = set_rx_csum,
+       .get_tx_csum       = ethtool_op_get_tx_csum,
+       .set_tx_csum       = ethtool_op_set_tx_csum,
+       .get_sg            = ethtool_op_get_sg,
+       .set_sg            = ethtool_op_set_sg,
+       .get_link          = ethtool_op_get_link,
+       .get_strings       = get_strings,
+       .get_stats_count   = get_stats_count,
+       .get_ethtool_stats = get_stats,
+       .get_regs_len      = get_regs_len,
+       .get_regs          = get_regs,
+       .get_tso           = ethtool_op_get_tso,
+       .set_tso           = set_tso,
+};
+
+static void cxgb_proc_cleanup(struct adapter *adapter,
+                                       struct proc_dir_entry *dir)
+{
+       const char *name;
+       name = adapter->name;
+       remove_proc_entry(name, dir);
+}
+//#define chtoe_setup_toedev(adapter) NULL
+#define update_mtu_tab(adapter)
+#define write_smt_entry(adapter, idx)
+
+static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+        struct adapter *adapter = dev->priv;
+        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+       switch (cmd) {
+        case SIOCGMIIPHY:
+                data->phy_id = adapter->port[dev->if_port].phy->addr;
+                /* FALLTHRU */
+        case SIOCGMIIREG: {
+               struct cphy *phy = adapter->port[dev->if_port].phy;
+               u32 val;
+
+               if (!phy->mdio_read)
+            return -EOPNOTSUPP;
+               phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+                              &val);
+                data->val_out = val;
+                break;
+       }
+        case SIOCSMIIREG: {
+               struct cphy *phy = adapter->port[dev->if_port].phy;
+
+                if (!capable(CAP_NET_ADMIN))
+                    return -EPERM;
+               if (!phy->mdio_write)
+            return -EOPNOTSUPP;
+               phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+                               data->val_in);
+                break;
+       }
+
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int t1_change_mtu(struct net_device *dev, int new_mtu)
+{
+       int ret;
+       struct adapter *adapter = dev->priv;
+       struct cmac *mac = adapter->port[dev->if_port].mac;
+
+       if (!mac->ops->set_mtu)
+        return -EOPNOTSUPP;
+       if (new_mtu < 68)
+        return -EINVAL;
+       if ((ret = mac->ops->set_mtu(mac, new_mtu)))
+               return ret;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int t1_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct adapter *adapter = dev->priv;
+       struct cmac *mac = adapter->port[dev->if_port].mac;
+       struct sockaddr *addr = p;
+
+       if (!mac->ops->macaddress_set)
+               return -EOPNOTSUPP;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       mac->ops->macaddress_set(mac, dev->dev_addr);
+       return 0;
+}
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void vlan_rx_register(struct net_device *dev,
+                                  struct vlan_group *grp)
+{
+       struct adapter *adapter = dev->priv;
+
+       spin_lock_irq(&adapter->async_lock);
+       adapter->vlan_grp = grp;
+       t1_set_vlan_accel(adapter, grp != NULL);
+       spin_unlock_irq(&adapter->async_lock);
+}
+
+static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+       struct adapter *adapter = dev->priv;
+
+       spin_lock_irq(&adapter->async_lock);
+       if (adapter->vlan_grp)
+               adapter->vlan_grp->vlan_devices[vid] = NULL;
+       spin_unlock_irq(&adapter->async_lock);
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void t1_netpoll(struct net_device *dev)
+{
+       unsigned long flags;
+       struct adapter *adapter = dev->priv;
+
+       local_irq_save(flags);
+        t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
+       local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.  This is used only if the MAC
+ * does not have any other way to prevent stats counter overflow.
+ */
+static void mac_stats_task(void *data)
+{
+       int i;
+       struct adapter *adapter = data;
+
+       for_each_port(adapter, i) {
+               struct port_info *p = &adapter->port[i];
+
+               if (netif_running(p->dev))
+                       p->mac->ops->statistics_update(p->mac,
+                                                      MAC_STATS_UPDATE_FAST);
+       }
+
+       /* Schedule the next statistics update if any port is active. */
+       spin_lock(&adapter->work_lock);
+       if (adapter->open_device_map & PORT_MASK)
+               schedule_mac_stats_update(adapter,
+                                         adapter->params.stats_update_period);
+       spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes elmer0 external interrupts in process context.
+ */
+static void ext_intr_task(void *data)
+{
+       struct adapter *adapter = data;
+
+       elmer0_ext_intr_handler(adapter);
+
+       /* Now reenable external interrupts */
+       spin_lock_irq(&adapter->async_lock);
+       adapter->slow_intr_mask |= F_PL_INTR_EXT;
+       writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+       writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+       spin_unlock_irq(&adapter->async_lock);
+}
+
+/*
+ * Interrupt-context handler for elmer0 external interrupts.
+ */
+void t1_elmer0_ext_intr(struct adapter *adapter)
+{
+       /*
+        * Schedule a task to handle external interrupts as we require
+        * a process context.  We disable EXT interrupts in the interim
+        * and let the task reenable them when it's done.
+        */
+       adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+       writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+       schedule_work(&adapter->ext_intr_handler_task);
+}
+
+void t1_fatal_err(struct adapter *adapter)
+{
+       if (adapter->flags & FULL_INIT_DONE) {
+               t1_sge_stop(adapter->sge);
+               t1_interrupts_disable(adapter);
+       }
+       CH_ALERT("%s: encountered fatal error, operation suspended\n",
+                adapter->name);
+}
+
+static int __devinit init_one(struct pci_dev *pdev,
+                             const struct pci_device_id *ent)
+{
+       static int version_printed;
+
+       int i, err, pci_using_dac = 0;
+       unsigned long mmio_start, mmio_len;
+       const struct board_info *bi;
+       struct adapter *adapter = NULL;
+       struct port_info *pi;
+
+       if (!version_printed) {
+               printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
+                      DRV_VERSION);
+               ++version_printed;
+       }
+
+       err = pci_enable_device(pdev);
+       if (err)
+        return err;
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               CH_ERR("%s: cannot find PCI device memory base address\n",
+                      pci_name(pdev));
+               err = -ENODEV;
+               goto out_disable_pdev;
+       }
+
+       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+               pci_using_dac = 1;
+
+               if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+                       CH_ERR("%s: unable to obtain 64-bit DMA for"
+                              "consistent allocations\n", pci_name(pdev));
+                       err = -ENODEV;
+                       goto out_disable_pdev;
+               }
+
+       } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
+               CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
+               goto out_disable_pdev;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
+               goto out_disable_pdev;
+       }
+
+       pci_set_master(pdev);
+
+    mmio_start = pci_resource_start(pdev, 0);
+       mmio_len = pci_resource_len(pdev, 0);
+       bi = t1_get_board_info(ent->driver_data);
+
+       for (i = 0; i < bi->port_number; ++i) {
+               struct net_device *netdev;
+
+               netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
+               if (!netdev) {
+                       err = -ENOMEM;
+                       goto out_free_dev;
+               }
+
+               SET_MODULE_OWNER(netdev);
+               SET_NETDEV_DEV(netdev, &pdev->dev);
+
+               if (!adapter) {
+                       adapter = netdev->priv;
+                       adapter->pdev = pdev;
+                       adapter->port[0].dev = netdev;  /* so we don't leak it */
+
+                       adapter->regs = ioremap(mmio_start, mmio_len);
+                       if (!adapter->regs) {
+                               CH_ERR("%s: cannot map device registers\n",
+                                      pci_name(pdev));
+                               err = -ENOMEM;
+                               goto out_free_dev;
+                       }
+
+                       if (t1_get_board_rev(adapter, bi, &adapter->params)) {
+                               err = -ENODEV;    /* Can't handle this chip rev */
+                               goto out_free_dev;
+                       }
+
+                       adapter->name = pci_name(pdev);
+                       adapter->msg_enable = dflt_msg_enable;
+                       adapter->mmio_len = mmio_len;
+
+                       init_MUTEX(&adapter->mib_mutex);
+                       spin_lock_init(&adapter->tpi_lock);
+                       spin_lock_init(&adapter->work_lock);
+                       spin_lock_init(&adapter->async_lock);
+
+                       INIT_WORK(&adapter->ext_intr_handler_task,
+                                 ext_intr_task, adapter);
+                       INIT_WORK(&adapter->stats_update_task, mac_stats_task,
+                                 adapter);
+#ifdef work_struct
+                       init_timer(&adapter->stats_update_timer);
+                       adapter->stats_update_timer.function = mac_stats_timer;
+                       adapter->stats_update_timer.data =
+                               (unsigned long)adapter;
+#endif
+
+                       pci_set_drvdata(pdev, netdev);
+               }
+
+               pi = &adapter->port[i];
+               pi->dev = netdev;
+               netif_carrier_off(netdev);
+               netdev->irq = pdev->irq;
+               netdev->if_port = i;
+               netdev->mem_start = mmio_start;
+               netdev->mem_end = mmio_start + mmio_len - 1;
+               netdev->priv = adapter;
+               netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+               netdev->features |= NETIF_F_LLTX;
+
+               adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
+               if (pci_using_dac)
+                       netdev->features |= NETIF_F_HIGHDMA;
+               if (vlan_tso_capable(adapter)) {
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+                       adapter->flags |= VLAN_ACCEL_CAPABLE;
+                       netdev->features |=
+                               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+                       netdev->vlan_rx_register = vlan_rx_register;
+                       netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
+#endif
+                       adapter->flags |= TSO_CAPABLE;
+                       netdev->features |= NETIF_F_TSO;
+               }
+
+               netdev->open = cxgb_open;
+               netdev->stop = cxgb_close;
+               netdev->hard_start_xmit = t1_start_xmit;
+               netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
+                       sizeof(struct cpl_tx_pkt_lso) :
+                       sizeof(struct cpl_tx_pkt);
+               netdev->get_stats = t1_get_stats;
+               netdev->set_multicast_list = t1_set_rxmode;
+               netdev->do_ioctl = t1_ioctl;
+               netdev->change_mtu = t1_change_mtu;
+               netdev->set_mac_address = t1_set_mac_addr;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+               netdev->poll_controller = t1_netpoll;
+#endif
+               netdev->weight = 64;
+
+        SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+       }
+
+       if (t1_init_sw_modules(adapter, bi) < 0) {
+               err = -ENODEV;
+               goto out_free_dev;
+       }
+
+       /*
+        * The card is now ready to go.  If any errors occur during device
+        * registration we do not fail the whole card but rather proceed only
+        * with the ports we manage to register successfully.  However we must
+        * register at least one net device.
+        */
+       for (i = 0; i < bi->port_number; ++i) {
+               err = register_netdev(adapter->port[i].dev);
+               if (err)
+                       CH_WARN("%s: cannot register net device %s, skipping\n",
+                               pci_name(pdev), adapter->port[i].dev->name);
+               else {
+                       /*
+                        * Change the name we use for messages to the name of
+                        * the first successfully registered interface.
+                        */
+                       if (!adapter->registered_device_map)
+                               adapter->name = adapter->port[i].dev->name;
+
+                __set_bit(i, &adapter->registered_device_map);
+               }
+       }
+       if (!adapter->registered_device_map) {
+               CH_ERR("%s: could not register any net devices\n",
+                      pci_name(pdev));
+               goto out_release_adapter_res;
+       }
+
+       printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
+              bi->desc, adapter->params.chip_revision,
+              adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+              adapter->params.pci.speed, adapter->params.pci.width);
+       return 0;
+
+ out_release_adapter_res:
+       t1_free_sw_modules(adapter);
+ out_free_dev:
+       if (adapter) {
+               if (adapter->regs) iounmap(adapter->regs);
+               for (i = bi->port_number - 1; i >= 0; --i)
+                       if (adapter->port[i].dev) {
+                               cxgb_proc_cleanup(adapter, proc_root_driver);
+                               kfree(adapter->port[i].dev);
+                       }
+       }
+       pci_release_regions(pdev);
+ out_disable_pdev:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static inline void t1_sw_reset(struct pci_dev *pdev)
+{
+       pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
+       pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               int i;
+               struct adapter *adapter = dev->priv;
+
+               for_each_port(adapter, i)
+                       if (test_bit(i, &adapter->registered_device_map))
+                               unregister_netdev(adapter->port[i].dev);
+
+               t1_free_sw_modules(adapter);
+               iounmap(adapter->regs);
+               while (--i >= 0)
+                       if (adapter->port[i].dev) {
+                               cxgb_proc_cleanup(adapter, proc_root_driver);
+                               kfree(adapter->port[i].dev);
+                       }
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+               pci_set_drvdata(pdev, NULL);
+               t1_sw_reset(pdev);
+       }
+}
+
+static struct pci_driver driver = {
+       .name     = DRV_NAME,
+       .id_table = t1_pci_tbl,
+       .probe    = init_one,
+       .remove   = __devexit_p(remove_one),
+};
+
+static int __init t1_init_module(void)
+{
+       return pci_module_init(&driver);
+}
+
+static void __exit t1_cleanup_module(void)
+{
+       pci_unregister_driver(&driver);
+}
+
+module_init(t1_init_module);
+module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644 (file)
index 0000000..5590cb2
--- /dev/null
@@ -0,0 +1,151 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: elmer0.h                                                            *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 22:49:43 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ELMER0_H_
+#define _CXGB_ELMER0_H_
+
+/* ELMER0 registers */
+#define A_ELMER0_VERSION 0x100000
+#define A_ELMER0_PHY_CFG 0x100004
+#define A_ELMER0_INT_ENABLE 0x100008
+#define A_ELMER0_INT_CAUSE 0x10000c
+#define A_ELMER0_GPI_CFG 0x100010
+#define A_ELMER0_GPI_STAT 0x100014
+#define A_ELMER0_GPO 0x100018
+#define A_ELMER0_PORT0_MI1_CFG 0x400000
+
+#define S_MI1_MDI_ENABLE    0
+#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
+#define F_MI1_MDI_ENABLE    V_MI1_MDI_ENABLE(1U)
+
+#define S_MI1_MDI_INVERT    1
+#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
+#define F_MI1_MDI_INVERT    V_MI1_MDI_INVERT(1U)
+
+#define S_MI1_PREAMBLE_ENABLE    2
+#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
+#define F_MI1_PREAMBLE_ENABLE    V_MI1_PREAMBLE_ENABLE(1U)
+
+#define S_MI1_SOF    3
+#define M_MI1_SOF    0x3
+#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
+#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
+
+#define S_MI1_CLK_DIV    5
+#define M_MI1_CLK_DIV    0xff
+#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
+#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
+
+#define A_ELMER0_PORT0_MI1_ADDR 0x400004
+
+#define S_MI1_REG_ADDR    0
+#define M_MI1_REG_ADDR    0x1f
+#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
+#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
+
+#define S_MI1_PHY_ADDR    5
+#define M_MI1_PHY_ADDR    0x1f
+#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
+#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
+
+#define A_ELMER0_PORT0_MI1_DATA 0x400008
+
+#define S_MI1_DATA    0
+#define M_MI1_DATA    0xffff
+#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
+#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
+
+#define A_ELMER0_PORT0_MI1_OP 0x40000c
+
+#define S_MI1_OP    0
+#define M_MI1_OP    0x3
+#define V_MI1_OP(x) ((x) << S_MI1_OP)
+#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
+
+#define S_MI1_ADDR_AUTOINC    2
+#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
+#define F_MI1_ADDR_AUTOINC    V_MI1_ADDR_AUTOINC(1U)
+
+#define S_MI1_OP_BUSY    31
+#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
+#define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
+
+#define A_ELMER0_PORT1_MI1_CFG 0x500000
+#define A_ELMER0_PORT1_MI1_ADDR 0x500004
+#define A_ELMER0_PORT1_MI1_DATA 0x500008
+#define A_ELMER0_PORT1_MI1_OP 0x50000c
+#define A_ELMER0_PORT2_MI1_CFG 0x600000
+#define A_ELMER0_PORT2_MI1_ADDR 0x600004
+#define A_ELMER0_PORT2_MI1_DATA 0x600008
+#define A_ELMER0_PORT2_MI1_OP 0x60000c
+#define A_ELMER0_PORT3_MI1_CFG 0x700000
+#define A_ELMER0_PORT3_MI1_ADDR 0x700004
+#define A_ELMER0_PORT3_MI1_DATA 0x700008
+#define A_ELMER0_PORT3_MI1_OP 0x70000c
+
+/* Simple bit definition for GPI and GP0 registers. */
+#define     ELMER0_GP_BIT0              0x0001
+#define     ELMER0_GP_BIT1              0x0002
+#define     ELMER0_GP_BIT2              0x0004
+#define     ELMER0_GP_BIT3              0x0008
+#define     ELMER0_GP_BIT4              0x0010
+#define     ELMER0_GP_BIT5              0x0020
+#define     ELMER0_GP_BIT6              0x0040
+#define     ELMER0_GP_BIT7              0x0080
+#define     ELMER0_GP_BIT8              0x0100
+#define     ELMER0_GP_BIT9              0x0200
+#define     ELMER0_GP_BIT10             0x0400
+#define     ELMER0_GP_BIT11             0x0800
+#define     ELMER0_GP_BIT12             0x1000
+#define     ELMER0_GP_BIT13             0x2000
+#define     ELMER0_GP_BIT14             0x4000
+#define     ELMER0_GP_BIT15             0x8000
+#define     ELMER0_GP_BIT16             0x10000
+#define     ELMER0_GP_BIT17             0x20000
+#define     ELMER0_GP_BIT18             0x40000
+#define     ELMER0_GP_BIT19             0x80000
+
+#define MI1_OP_DIRECT_WRITE 1
+#define MI1_OP_DIRECT_READ  2
+
+#define MI1_OP_INDIRECT_ADDRESS  0
+#define MI1_OP_INDIRECT_WRITE    1
+#define MI1_OP_INDIRECT_READ_INC 2
+#define MI1_OP_INDIRECT_READ     3
+
+#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644 (file)
index 0000000..2306425
--- /dev/null
@@ -0,0 +1,346 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.c                                                              *
+ * $Revision: 1.14 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  Ethernet SPI functionality.                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "espi.h"
+
+struct peespi {
+       adapter_t *adapter;
+       struct espi_intr_counts intr_cnt;
+       u32 misc_ctrl;
+       spinlock_t lock;
+};
+
+#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
+                       F_RAMPARITYERR | F_DIP2PARITYERR)
+#define MON_MASK  (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
+                  | F_MONITORED_INTERFACE)
+
+#define TRICN_CNFG 14
+#define TRICN_CMD_READ  0x11
+#define TRICN_CMD_WRITE 0x21
+#define TRICN_CMD_ATTEMPTS 10
+
+static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
+                      int ch_addr, int reg_offset, u32 wr_data)
+{
+       int busy, attempts = TRICN_CMD_ATTEMPTS;
+
+       writel(V_WRITE_DATA(wr_data) |
+              V_REGISTER_OFFSET(reg_offset) |
+              V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
+              V_BUNDLE_ADDR(bundle_addr) |
+              V_SPI4_COMMAND(TRICN_CMD_WRITE),
+              adapter->regs + A_ESPI_CMD_ADDR);
+       writel(0, adapter->regs + A_ESPI_GOSTAT);
+
+       do {
+               busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
+       } while (busy && --attempts);
+
+       if (busy)
+               CH_ERR("%s: TRICN write timed out\n", adapter->name);
+
+       return busy;
+}
+
+/* 1. Deassert rx_reset_core. */
+/* 2. Program TRICN_CNFG registers. */
+/* 3. Deassert rx_reset_link */
+static int tricn_init(adapter_t *adapter)
+{
+       int     i               = 0;
+       int     sme             = 1;
+       int     stat            = 0;
+       int     timeout         = 0;
+       int     is_ready        = 0;
+       int     dynamic_deskew  = 0;
+
+       if (dynamic_deskew)
+               sme = 0;
+
+
+       /* 1 */
+       timeout=1000;
+       do {
+               stat = readl(adapter->regs + A_ESPI_RX_RESET);
+               is_ready = (stat & 0x4);
+               timeout--;
+               udelay(5);
+       } while (!is_ready || (timeout==0));
+       writel(0x2, adapter->regs + A_ESPI_RX_RESET);
+       if (timeout==0)
+       {
+               CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
+               t1_fatal_err(adapter);
+       }
+
+       /* 2 */
+       if (sme) {
+               tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+               tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+               tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+       }
+       for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+       for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+       for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+       for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+       for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+       for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+       for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
+       for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+
+       /* 3 */
+       writel(0x3, adapter->regs + A_ESPI_RX_RESET);
+
+       return 0;
+}
+
+void t1_espi_intr_enable(struct peespi *espi)
+{
+       u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+       /*
+        * Cannot enable ESPI interrupts on T1B because HW asserts the
+        * interrupt incorrectly, namely the driver gets ESPI interrupts
+        * but no data is actually dropped (can verify this reading the ESPI
+        * drop registers).  Also, once the ESPI interrupt is asserted it
+        * cannot be cleared (HW bug).
+        */
+       enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
+       writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+       writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+void t1_espi_intr_clear(struct peespi *espi)
+{
+       writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
+       writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
+}
+
+void t1_espi_intr_disable(struct peespi *espi)
+{
+       u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+       writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+       writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+int t1_espi_intr_handler(struct peespi *espi)
+{
+       u32 cnt;
+       u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+       if (status & F_DIP4ERR)
+               espi->intr_cnt.DIP4_err++;
+       if (status & F_RXDROP)
+               espi->intr_cnt.rx_drops++;
+       if (status & F_TXDROP)
+               espi->intr_cnt.tx_drops++;
+       if (status & F_RXOVERFLOW)
+               espi->intr_cnt.rx_ovflw++;
+       if (status & F_RAMPARITYERR)
+               espi->intr_cnt.parity_err++;
+       if (status & F_DIP2PARITYERR) {
+               espi->intr_cnt.DIP2_parity_err++;
+
+               /*
+                * Must read the error count to clear the interrupt
+                * that it causes.
+                */
+               cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+       }
+
+       /*
+        * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+        * write the status as is.
+        */
+       if (status && t1_is_T1B(espi->adapter))
+               status = 1;
+       writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+       return 0;
+}
+
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
+{
+    return &espi->intr_cnt;
+}
+
+static void espi_setup_for_pm3393(adapter_t *adapter)
+{
+       u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
+
+       writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+       writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+       writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+       writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
+       writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+       writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+       writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+       writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+       writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
+}
+
+/* T2 Init part --  */
+/* 1. Set T_ESPI_MISCCTRL_ADDR */
+/* 2. Init ESPI registers. */
+/* 3. Init TriCN Hard Macro */
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
+       u32 cnt;
+
+       u32 status_enable_extra = 0;
+       adapter_t *adapter = espi->adapter;
+       u32 status, burstval = 0x800100;
+
+       /* Disable ESPI training.  MACs that can handle it enable it below. */
+       writel(0, adapter->regs + A_ESPI_TRAIN);
+
+       if (is_T2(adapter)) {
+               writel(V_OUT_OF_SYNC_COUNT(4) |
+                      V_DIP2_PARITY_ERR_THRES(3) |
+                      V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
+               if (nports == 4) {
+                       /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
+                       burstval = 0x200040;
+               }
+       }
+       writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+
+       switch (mac_type) {
+       case CHBT_MAC_PM3393:
+               espi_setup_for_pm3393(adapter);
+               break;
+       default:
+               return -1;
+       }
+
+       /*
+        * Make sure any pending interrupts from the SPI are
+        * Cleared before enabling the interrupt.
+        */
+       writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+       status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+       if (status & F_DIP2PARITYERR) {
+               cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+       }
+
+       /*
+        * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+        * write the status as is.
+        */
+       if (status && t1_is_T1B(espi->adapter))
+               status = 1;
+       writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+       writel(status_enable_extra | F_RXSTATUSENABLE,
+              adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
+
+       if (is_T2(adapter)) {
+               tricn_init(adapter);
+               /*
+                * Always position the control at the 1st port egress IN
+                * (sop,eop) counter to reduce PIOs for T/N210 workaround.
+                */
+               espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
+                                  & ~MON_MASK) | (F_MONITORED_DIRECTION
+                                  | F_MONITORED_INTERFACE);
+               writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+               spin_lock_init(&espi->lock);
+       }
+
+       return 0;
+}
+
+void t1_espi_destroy(struct peespi *espi)
+{
+       kfree(espi);
+}
+
+struct peespi *t1_espi_create(adapter_t *adapter)
+{
+       struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
+
+       memset(espi, 0, sizeof(*espi));
+
+       if (espi)
+               espi->adapter = adapter;
+       return espi;
+}
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
+{
+       struct peespi *espi = adapter->espi;
+
+       if (!is_T2(adapter))
+               return;
+       spin_lock(&espi->lock);
+       espi->misc_ctrl = (val & ~MON_MASK) |
+                         (espi->misc_ctrl & MON_MASK);
+       writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+       spin_unlock(&espi->lock);
+}
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
+{
+       u32 sel;
+
+       struct peespi *espi = adapter->espi;
+
+       if (!is_T2(adapter))
+               return 0;
+       sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
+       if (!wait) {
+               if (!spin_trylock(&espi->lock))
+                       return 0;
+       }
+       else
+               spin_lock(&espi->lock);
+       if ((sel != (espi->misc_ctrl & MON_MASK))) {
+               writel(((espi->misc_ctrl & ~MON_MASK) | sel),
+                      adapter->regs + A_ESPI_MISC_CONTROL);
+               sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+               writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+       }
+       else
+               sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+       spin_unlock(&espi->lock);
+       return sel;
+}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644 (file)
index 0000000..c90e37f
--- /dev/null
@@ -0,0 +1,68 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ESPI_H_
+#define _CXGB_ESPI_H_
+
+#include "common.h"
+
+struct espi_intr_counts {
+       unsigned int DIP4_err;
+       unsigned int rx_drops;
+       unsigned int tx_drops;
+       unsigned int rx_ovflw;
+       unsigned int parity_err;
+       unsigned int DIP2_parity_err;
+};
+
+struct peespi;
+
+struct peespi *t1_espi_create(adapter_t *adapter);
+void t1_espi_destroy(struct peespi *espi);
+int t1_espi_init(struct peespi *espi, int mac_type, int nports);
+
+void t1_espi_intr_enable(struct peespi *);
+void t1_espi_intr_clear(struct peespi *);
+void t1_espi_intr_disable(struct peespi *);
+int t1_espi_intr_handler(struct peespi *);
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+
+#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644 (file)
index 0000000..746b0ee
--- /dev/null
@@ -0,0 +1,134 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: gmac.h                                                              *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  Generic MAC functionality.                                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_GMAC_H_
+#define _CXGB_GMAC_H_
+
+#include "common.h"
+
+enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
+enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
+
+struct cmac_statistics {
+       /* Transmit */
+       u64 TxOctetsOK;
+       u64 TxOctetsBad;
+       u64 TxUnicastFramesOK;
+       u64 TxMulticastFramesOK;
+       u64 TxBroadcastFramesOK;
+       u64 TxPauseFrames;
+       u64 TxFramesWithDeferredXmissions;
+       u64 TxLateCollisions;
+       u64 TxTotalCollisions;
+       u64 TxFramesAbortedDueToXSCollisions;
+       u64 TxUnderrun;
+       u64 TxLengthErrors;
+       u64 TxInternalMACXmitError;
+       u64 TxFramesWithExcessiveDeferral;
+       u64 TxFCSErrors;
+
+       /* Receive */
+       u64 RxOctetsOK;
+       u64 RxOctetsBad;
+       u64 RxUnicastFramesOK;
+       u64 RxMulticastFramesOK;
+       u64 RxBroadcastFramesOK;
+       u64 RxPauseFrames;
+       u64 RxFCSErrors;
+       u64 RxAlignErrors;
+       u64 RxSymbolErrors;
+       u64 RxDataErrors;
+       u64 RxSequenceErrors;
+       u64 RxRuntErrors;
+       u64 RxJabberErrors;
+       u64 RxInternalMACRcvError;
+       u64 RxInRangeLengthErrors;
+       u64 RxOutOfRangeLengthField;
+       u64 RxFrameTooLongErrors;
+};
+
+struct cmac_ops {
+       void (*destroy)(struct cmac *);
+       int (*reset)(struct cmac *);
+       int (*interrupt_enable)(struct cmac *);
+       int (*interrupt_disable)(struct cmac *);
+       int (*interrupt_clear)(struct cmac *);
+       int (*interrupt_handler)(struct cmac *);
+
+       int (*enable)(struct cmac *, int);
+       int (*disable)(struct cmac *, int);
+
+       int (*loopback_enable)(struct cmac *);
+       int (*loopback_disable)(struct cmac *);
+
+       int (*set_mtu)(struct cmac *, int mtu);
+       int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
+
+       int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
+       int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
+                                  int *fc);
+
+       const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
+
+       int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
+       int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+};
+
+typedef struct _cmac_instance cmac_instance;
+
+struct cmac {
+       struct cmac_statistics stats;
+       adapter_t *adapter;
+       struct cmac_ops *ops;
+       cmac_instance *instance;
+};
+
+struct gmac {
+       unsigned int stats_update_period;
+       struct cmac *(*create)(adapter_t *adapter, int index);
+       int (*reset)(adapter_t *);
+};
+
+extern struct gmac t1_pm3393_ops;
+extern struct gmac t1_chelsio_mac_ops;
+extern struct gmac t1_vsc7321_ops;
+extern struct gmac t1_ixf1010_ops;
+extern struct gmac t1_dummy_mac_ops;
+
+#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644 (file)
index 0000000..db50342
--- /dev/null
@@ -0,0 +1,252 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: mv88x201x.c                                                         *
+ * $Revision: 1.12 $                                                         *
+ * $Date: 2005/04/15 19:27:14 $                                              *
+ * Description:                                                              *
+ *  Marvell PHY (mv88x201x) functionality.                                   *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "cphy.h"
+#include "elmer0.h"
+
+/*
+ * The 88x2010 Rev C. requires some link status registers * to be read
+ * twice in order to get the right values. Future * revisions will fix
+ * this problem and then this macro * can disappear.
+ */
+#define MV88x2010_LINK_STATUS_BUGS    1
+
+static int led_init(struct cphy *cphy)
+{
+       /* Setup the LED registers so we can turn on/off.
+        * Writing these bits maps control to another
+        * register. mmd(0x1) addr(0x7)
+        */
+       mdio_write(cphy, 0x3, 0x8304, 0xdddd);
+       return 0;
+}
+
+static int led_link(struct cphy *cphy, u32 do_enable)
+{
+       u32 led = 0;
+#define LINK_ENABLE_BIT 0x1
+
+       mdio_read(cphy, 0x1, 0x7, &led);
+
+       if (do_enable & LINK_ENABLE_BIT) {
+               led |= LINK_ENABLE_BIT;
+               mdio_write(cphy, 0x1, 0x7, led);
+       } else {
+               led &= ~LINK_ENABLE_BIT;
+               mdio_write(cphy, 0x1, 0x7, led);
+       }
+       return 0;
+}
+
+/* Port Reset */
+static int mv88x201x_reset(struct cphy *cphy, int wait)
+{
+       /* This can be done through registers.  It is not required since
+        * a full chip reset is used.
+        */
+       return 0;
+}
+
+static int mv88x201x_interrupt_enable(struct cphy *cphy)
+{
+       u32 elmer;
+
+       /* Enable PHY LASI interrupts. */
+       mdio_write(cphy, 0x1, 0x9002, 0x1);
+
+       /* Enable Marvell interrupts through Elmer0. */
+       t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+       elmer |= ELMER0_GP_BIT6;
+       t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+       return 0;
+}
+
+static int mv88x201x_interrupt_disable(struct cphy *cphy)
+{
+       u32 elmer;
+
+       /* Disable PHY LASI interrupts. */
+       mdio_write(cphy, 0x1, 0x9002, 0x0);
+
+       /* Disable Marvell interrupts through Elmer0. */
+       t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+       elmer &= ~ELMER0_GP_BIT6;
+       t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+       return 0;
+}
+
+static int mv88x201x_interrupt_clear(struct cphy *cphy)
+{
+       u32 elmer;
+       u32 val;
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+       /* Required to read twice before clear takes affect. */
+       mdio_read(cphy, 0x1, 0x9003, &val);
+       mdio_read(cphy, 0x1, 0x9004, &val);
+       mdio_read(cphy, 0x1, 0x9005, &val);
+
+       /* Read this register after the others above it else
+        * the register doesn't clear correctly.
+        */
+       mdio_read(cphy, 0x1, 0x1, &val);
+#endif
+
+       /* Clear link status. */
+       mdio_read(cphy, 0x1, 0x1, &val);
+       /* Clear PHY LASI interrupts. */
+       mdio_read(cphy, 0x1, 0x9005, &val);
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+       /* Do it again. */
+       mdio_read(cphy, 0x1, 0x9003, &val);
+       mdio_read(cphy, 0x1, 0x9004, &val);
+#endif
+
+       /* Clear Marvell interrupts through Elmer0. */
+       t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+       elmer |= ELMER0_GP_BIT6;
+       t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+       return 0;
+}
+
+static int mv88x201x_interrupt_handler(struct cphy *cphy)
+{
+       /* Clear interrupts */
+       mv88x201x_interrupt_clear(cphy);
+
+       /* We have only enabled link change interrupts and so
+        * cphy_cause must be a link change interrupt.
+        */
+       return cphy_cause_link_change;
+}
+
+static int mv88x201x_set_loopback(struct cphy *cphy, int on)
+{
+       return 0;
+}
+
+static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
+                                    int *speed, int *duplex, int *fc)
+{
+       u32 val = 0;
+#define LINK_STATUS_BIT 0x4
+
+       if (link_ok) {
+               /* Read link status. */
+               mdio_read(cphy, 0x1, 0x1, &val);
+               val &= LINK_STATUS_BIT;
+               *link_ok = (val == LINK_STATUS_BIT);
+               /* Turn on/off Link LED */
+               led_link(cphy, *link_ok);
+       }
+       if (speed)
+               *speed = SPEED_10000;
+       if (duplex)
+               *duplex = DUPLEX_FULL;
+       if (fc)
+               *fc = PAUSE_RX | PAUSE_TX;
+       return 0;
+}
+
+static void mv88x201x_destroy(struct cphy *cphy)
+{
+       kfree(cphy);
+}
+
+static struct cphy_ops mv88x201x_ops = {
+       .destroy           = mv88x201x_destroy,
+       .reset             = mv88x201x_reset,
+       .interrupt_enable  = mv88x201x_interrupt_enable,
+       .interrupt_disable = mv88x201x_interrupt_disable,
+       .interrupt_clear   = mv88x201x_interrupt_clear,
+       .interrupt_handler = mv88x201x_interrupt_handler,
+       .get_link_status   = mv88x201x_get_link_status,
+       .set_loopback      = mv88x201x_set_loopback,
+};
+
+static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
+                                        struct mdio_ops *mdio_ops)
+{
+       u32 val;
+       struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
+
+       if (!cphy)
+               return NULL;
+       memset(cphy, 0, sizeof(*cphy));
+       cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
+
+       /* Commands the PHY to enable XFP's clock. */
+       mdio_read(cphy, 0x3, 0x8300, &val);
+       mdio_write(cphy, 0x3, 0x8300, val | 1);
+
+       /* Clear link status. Required because of a bug in the PHY.  */
+       mdio_read(cphy, 0x1, 0x8, &val);
+       mdio_read(cphy, 0x3, 0x8, &val);
+
+       /* Allows for Link,Ack LED turn on/off */
+       led_init(cphy);
+       return cphy;
+}
+
+/* Chip Reset */
+static int mv88x201x_phy_reset(adapter_t *adapter)
+{
+       u32 val;
+
+       t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val &= ~4;
+       t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       msleep(100);
+
+       t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+       msleep(1000);
+
+       /* Now lets enable the Laser. Delay 100us */
+       t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val |= 0x8000;
+       t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(100);
+       return 0;
+}
+
+struct gphy t1_mv88x201x_ops = {
+       mv88x201x_phy_create,
+       mv88x201x_phy_reset
+};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644 (file)
index 0000000..04a1404
--- /dev/null
@@ -0,0 +1,826 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: pm3393.c                                                            *
+ * $Revision: 1.16 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "gmac.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
+ */
+enum {
+    MMD_RESERVED,
+    MMD_PMAPMD,
+    MMD_WIS,
+    MMD_PCS,
+    MMD_PHY_XGXS,      /* XGMII Extender Sublayer */
+    MMD_DTE_XGXS,
+};
+
+enum {
+    PHY_XGXS_CTRL_1,
+    PHY_XGXS_STATUS_1
+};
+
+#define OFFSET(REG_ADDR)    (REG_ADDR << 2)
+
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define MAX_FRAME_SIZE  9600
+
+#define IPG 12
+#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
+       SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
+       SUNI1x10GEXP_BITMSK_TXXG_PADEN)
+#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
+       SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
+
+/* Update statistics every 15 minutes */
+#define STATS_TICK_SECS (15 * 60)
+
+enum {                     /* RMON registers */
+       RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
+       RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
+       RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
+       RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
+       RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
+       RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
+       RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
+       RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
+       RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
+       RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
+       RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
+       RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
+       RxUndersizedFrames =  SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+
+       TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
+       TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
+       TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
+       TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
+       TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
+       TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
+       TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
+};
+
+struct _cmac_instance {
+       u8 enabled;
+       u8 fc;
+       u8 mac_addr[6];
+};
+
+static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
+{
+       t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
+       return 0;
+}
+
+static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
+{
+       t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
+       return 0;
+}
+
+/* Port reset. */
+static int pm3393_reset(struct cmac *cmac)
+{
+       return 0;
+}
+
+/*
+ * Enable interrupts for the PM3393
+
+       1. Enable PM3393 BLOCK interrupts.
+       2. Enable PM3393 Master Interrupt bit(INTE)
+       3. Enable ELMER's PM3393 bit.
+       4. Enable Terminator external interrupt.
+*/
+static int pm3393_interrupt_enable(struct cmac *cmac)
+{
+       u32 pl_intr;
+
+       /* PM3393 - Enabling all hardware block interrupts.
+        */
+       pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
+
+       /* Don't interrupt on statistics overflow, we are polling */
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+
+       pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
+       pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
+
+       /* PM3393 - Global interrupt enable
+        */
+       /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
+       pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
+               0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
+
+       /* TERMINATOR - PL_INTERUPTS_EXT */
+       pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
+       pl_intr |= F_PL_INTR_EXT;
+       writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
+       return 0;
+}
+
+static int pm3393_interrupt_disable(struct cmac *cmac)
+{
+       u32 elmer;
+
+       /* PM3393 - Enabling HW interrupt blocks. */
+       pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
+       pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
+
+       /* PM3393 - Global interrupt enable */
+       pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
+
+       /* ELMER - External chip interrupts. */
+       t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
+       elmer &= ~ELMER0_GP_BIT1;
+       t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
+
+       /* TERMINATOR - PL_INTERUPTS_EXT */
+       /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
+        * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
+        */
+
+       return 0;
+}
+
+static int pm3393_interrupt_clear(struct cmac *cmac)
+{
+       u32 elmer;
+       u32 pl_intr;
+       u32 val32;
+
+       /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
+        *          bit WCIMODE=0 for a clear-on-read.
+        */
+       pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
+              &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
+       pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
+
+       /* PM3393 - Global interrupt status
+        */
+       pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
+
+       /* ELMER - External chip interrupts.
+        */
+       t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
+       elmer |= ELMER0_GP_BIT1;
+       t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
+
+       /* TERMINATOR - PL_INTERUPTS_EXT
+        */
+       pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
+       pl_intr |= F_PL_INTR_EXT;
+       writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
+
+       return 0;
+}
+
+/* Interrupt handler */
+static int pm3393_interrupt_handler(struct cmac *cmac)
+{
+       u32 master_intr_status;
+/*
+       1. Read master interrupt register.
+       2. Read BLOCK's interrupt status registers.
+       3. Handle BLOCK interrupts.
+*/
+       /* Read the master interrupt status register. */
+       pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
+              &master_intr_status);
+
+       /* TBD XXX Lets just clear everything for now */
+       pm3393_interrupt_clear(cmac);
+
+       return 0;
+}
+
+static int pm3393_enable(struct cmac *cmac, int which)
+{
+       if (which & MAC_DIRECTION_RX)
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
+                       (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
+
+       if (which & MAC_DIRECTION_TX) {
+               u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
+
+               if (cmac->instance->fc & PAUSE_RX)
+                       val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
+               if (cmac->instance->fc & PAUSE_TX)
+                       val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
+               pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
+       }
+
+       cmac->instance->enabled |= which;
+       return 0;
+}
+
+static int pm3393_enable_port(struct cmac *cmac, int which)
+{
+       /* Clear port statistics */
+       pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+               SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
+       udelay(2);
+       memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
+
+       pm3393_enable(cmac, which);
+
+       /*
+        * XXX This should be done by the PHY and preferrably not at all.
+        * The PHY doesn't give us link status indication on its own so have
+        * the link management code query it instead.
+        */
+       {
+               extern void link_changed(adapter_t *adapter, int port_id);
+
+               link_changed(cmac->adapter, 0);
+       }
+       return 0;
+}
+
+static int pm3393_disable(struct cmac *cmac, int which)
+{
+       if (which & MAC_DIRECTION_RX)
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
+       if (which & MAC_DIRECTION_TX)
+               pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
+
+       /*
+        * The disable is graceful. Give the PM3393 time.  Can't wait very
+        * long here, we may be holding locks.
+        */
+       udelay(20);
+
+       cmac->instance->enabled &= ~which;
+       return 0;
+}
+
+static int pm3393_loopback_enable(struct cmac *cmac)
+{
+       return 0;
+}
+
+static int pm3393_loopback_disable(struct cmac *cmac)
+{
+       return 0;
+}
+
+static int pm3393_set_mtu(struct cmac *cmac, int mtu)
+{
+       int enabled = cmac->instance->enabled;
+
+       /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
+       mtu += 14 + 4;
+       if (mtu > MAX_FRAME_SIZE)
+               return -EINVAL;
+
+       /* Disable Rx/Tx MAC before configuring it. */
+       if (enabled)
+               pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
+
+       if (enabled)
+               pm3393_enable(cmac, enabled);
+       return 0;
+}
+
+static u32 calc_crc(u8 *b, int len)
+{
+       int i;
+       u32 crc = (u32)~0;
+
+       /* calculate crc one bit at a time */
+       while (len--) {
+               crc ^= *b++;
+               for (i = 0; i < 8; i++) {
+                       if (crc & 0x1)
+                               crc = (crc >> 1) ^ 0xedb88320;
+                       else
+                               crc = (crc >> 1);
+               }
+       }
+
+       /* reverse bits */
+       crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
+       crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
+       crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
+       /* swap bytes */
+       crc = (crc >> 16) | (crc << 16);
+       crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
+
+       return crc;
+}
+
+static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
+{
+       int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
+       u32 rx_mode;
+
+       /* Disable MAC RX before reconfiguring it */
+       if (enabled)
+               pm3393_disable(cmac, MAC_DIRECTION_RX);
+
+       pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
+       rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
+                    SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
+               (u16)rx_mode);
+
+       if (t1_rx_mode_promisc(rm)) {
+               /* Promiscuous mode. */
+               rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
+       }
+       if (t1_rx_mode_allmulti(rm)) {
+               /* Accept all multicast. */
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
+               rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+       } else if (t1_rx_mode_mc_cnt(rm)) {
+               /* Accept one or more multicast(s). */
+               u8 *addr;
+               int bit;
+               u16 mc_filter[4] = { 0, };
+
+               while ((addr = t1_get_next_mcaddr(rm))) {
+                       bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f;  /* bit[23:28] */
+                       mc_filter[bit >> 4] |= 1 << (bit & 0xf);
+               }
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
+               pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
+               rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+       }
+
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
+
+       if (enabled)
+               pm3393_enable(cmac, MAC_DIRECTION_RX);
+
+       return 0;
+}
+
+static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
+                                     int *duplex, int *fc)
+{
+       if (speed)
+               *speed = SPEED_10000;
+       if (duplex)
+               *duplex = DUPLEX_FULL;
+       if (fc)
+               *fc = cmac->instance->fc;
+       return 0;
+}
+
+static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+                                     int fc)
+{
+       if (speed >= 0 && speed != SPEED_10000)
+               return -1;
+       if (duplex >= 0 && duplex != DUPLEX_FULL)
+               return -1;
+       if (fc & ~(PAUSE_TX | PAUSE_RX))
+               return -1;
+
+       if (fc != cmac->instance->fc) {
+               cmac->instance->fc = (u8) fc;
+               if (cmac->instance->enabled & MAC_DIRECTION_TX)
+                       pm3393_enable(cmac, MAC_DIRECTION_TX);
+       }
+       return 0;
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+       { \
+               t1_tpi_read((mac)->adapter, OFFSET(name), &val0);       \
+               t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
+               t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
+               (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
+                                               (((u64)val1 & 0xffff) << 16) | \
+                                               (((u64)val2 & 0xff) << 32) | \
+                                               ((mac)->stats.stat_name & \
+                                                       (~(u64)0 << 40)); \
+               if (ro &        \
+                       ((name -  SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
+                       (mac)->stats.stat_name += ((u64)1 << 40); \
+       }
+
+static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+                                                             int flag)
+{
+       u64     ro;
+       u32     val0, val1, val2, val3;
+
+       /* Snap the counters */
+       pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+               SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+
+       /* Counter rollover, clear on read */
+       pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
+       pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
+       pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
+       pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
+       ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+               (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+       /* Rx stats */
+       RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
+       RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
+       RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
+       RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
+       RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
+       RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
+       RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
+                               RxInternalMACRcvError);
+       RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+       RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
+       RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
+       RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
+       RMON_UPDATE(mac, RxFragments, RxRuntErrors);
+       RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+
+       /* Tx stats */
+       RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
+       RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
+                               TxInternalMACXmitError);
+       RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
+       RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
+       RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
+       RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
+       RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+
+       return &mac->stats;
+}
+
+static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
+{
+       memcpy(mac_addr, cmac->instance->mac_addr, 6);
+       return 0;
+}
+
+static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+{
+       u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
+
+       /*
+        * MAC addr: 00:07:43:00:13:09
+        *
+        * ma[5] = 0x09
+        * ma[4] = 0x13
+        * ma[3] = 0x00
+        * ma[2] = 0x43
+        * ma[1] = 0x07
+        * ma[0] = 0x00
+        *
+        * The PM3393 requires byte swapping and reverse order entry
+        * when programming MAC addresses:
+        *
+        * low_bits[15:0]    = ma[1]:ma[0]
+        * mid_bits[31:16]   = ma[3]:ma[2]
+        * high_bits[47:32]  = ma[5]:ma[4]
+        */
+
+       /* Store local copy */
+       memcpy(cmac->instance->mac_addr, ma, 6);
+
+       lo = ((u32) ma[1] << 8) | (u32) ma[0];
+       mid = ((u32) ma[3] << 8) | (u32) ma[2];
+       hi = ((u32) ma[5] << 8) | (u32) ma[4];
+
+       /* Disable Rx/Tx MAC before configuring it. */
+       if (enabled)
+               pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+       /* Set RXXG Station Address */
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
+
+       /* Set TXXG Station Address */
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
+       pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
+
+       /* Setup Exact Match Filter 1 with our MAC address
+        *
+        * Must disable exact match filter before configuring it.
+        */
+       pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
+       val &= 0xff0f;
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
+
+       val |= 0x0090;
+       pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+       if (enabled)
+               pm3393_enable(cmac, enabled);
+       return 0;
+}
+
+static void pm3393_destroy(struct cmac *cmac)
+{
+       kfree(cmac);
+}
+
+static struct cmac_ops pm3393_ops = {
+       .destroy                 = pm3393_destroy,
+       .reset                   = pm3393_reset,
+       .interrupt_enable        = pm3393_interrupt_enable,
+       .interrupt_disable       = pm3393_interrupt_disable,
+       .interrupt_clear         = pm3393_interrupt_clear,
+       .interrupt_handler       = pm3393_interrupt_handler,
+       .enable                  = pm3393_enable_port,
+       .disable                 = pm3393_disable,
+       .loopback_enable         = pm3393_loopback_enable,
+       .loopback_disable        = pm3393_loopback_disable,
+       .set_mtu                 = pm3393_set_mtu,
+       .set_rx_mode             = pm3393_set_rx_mode,
+       .get_speed_duplex_fc     = pm3393_get_speed_duplex_fc,
+       .set_speed_duplex_fc     = pm3393_set_speed_duplex_fc,
+       .statistics_update       = pm3393_update_statistics,
+       .macaddress_get          = pm3393_macaddress_get,
+       .macaddress_set          = pm3393_macaddress_set
+};
+
+static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
+{
+       struct cmac *cmac;
+
+       cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+       if (!cmac)
+               return NULL;
+       memset(cmac, 0, sizeof(*cmac));
+
+       cmac->ops = &pm3393_ops;
+       cmac->instance = (cmac_instance *) (cmac + 1);
+       cmac->adapter = adapter;
+       cmac->instance->fc = PAUSE_TX | PAUSE_RX;
+
+       t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
+       t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
+       t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
+       t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001);   /* PL4IO Enable */
+       t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
+       t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
+       t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202);      /* PL4IO Calendar Repetitions */
+
+       t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080);      /* EFLX Enable */
+       t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000);      /* EFLX Channel Deprovision */
+       t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000);      /* EFLX Low Limit */
+       t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040);      /* EFLX High Limit */
+       t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc);      /* EFLX Almost Full */
+       t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199);      /* EFLX Almost Empty */
+       t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240);      /* EFLX Cut Through Threshold */
+       t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000);      /* EFLX Indirect Register Update */
+       t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001);      /* EFLX Channel Provision */
+       t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff);      /* EFLX Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff);      /* EFLX Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff);      /* EFLX enable overflow interrupt The other bit are undocumented */
+       t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff);      /* EFLX Undocumented */
+
+       t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000);      /* IFLX Configuration - enable */
+       t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000);      /* IFLX Channel Deprovision */
+       t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000);      /* IFLX Low Limit */
+       t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100);      /* IFLX High Limit */
+       t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00);      /* IFLX Almost Full Limit */
+       t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599);      /* IFLX Almost Empty Limit */
+       t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000);      /* IFLX Indirect Register Update */
+       t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001);      /* IFLX Channel Provision */
+       t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff);      /* IFLX Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff);      /* IFLX Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff);      /* IFLX Enable overflow interrupt.  The other bit are undocumented */
+
+       t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe);      /* PL4MOS Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff);      /* PL4MOS Undocumented */
+       t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008);      /* PL4MOS Starving Burst Size */
+       t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008);      /* PL4MOS Hungry Burst Size */
+       t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008);      /* PL4MOS Transfer Size */
+       t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005);      /* PL4MOS Disable */
+
+       t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103);      /* PL4ODP Training Repeat and SOP rule */
+       t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000);      /* PL4ODP MAX_T setting */
+
+       t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087);      /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
+       t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f);      /* PL4IDU Enable Dip4 check error interrupts */
+
+       t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32);  /* # TXXG Config */
+       /* For T1 use timer based Mac flow control. */
+       t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
+       t1_tpi_write(adapter, OFFSET(0x2040), 0x059c);  /* # RXXG Config */
+       t1_tpi_write(adapter, OFFSET(0x2049), 0x0001);  /* # RXXG Cut Through */
+       t1_tpi_write(adapter, OFFSET(0x2070), 0x0000);  /* # Disable promiscuous mode */
+
+       /* Setup Exact Match Filter 0 to allow broadcast packets.
+        */
+       t1_tpi_write(adapter, OFFSET(0x206e), 0x0000);  /* # Disable Match Enable bit */
+       t1_tpi_write(adapter, OFFSET(0x204a), 0xffff);  /* # low addr */
+       t1_tpi_write(adapter, OFFSET(0x204b), 0xffff);  /* # mid addr */
+       t1_tpi_write(adapter, OFFSET(0x204c), 0xffff);  /* # high addr */
+       t1_tpi_write(adapter, OFFSET(0x206e), 0x0009);  /* # Enable Match Enable bit */
+
+       t1_tpi_write(adapter, OFFSET(0x0003), 0x0000);  /* # NO SOP/ PAD_EN setup */
+       t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0);  /* # RXEQB disabled */
+       t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f);  /* # No Preemphasis */
+
+       return cmac;
+}
+
+static int pm3393_mac_reset(adapter_t * adapter)
+{
+       u32 val;
+       u32 x;
+       u32 is_pl4_reset_finished;
+       u32 is_pl4_outof_lock;
+       u32 is_xaui_mabc_pll_locked;
+       u32 successful_reset;
+       int i;
+
+       /* The following steps are required to properly reset
+        * the PM3393. This information is provided in the
+        * PM3393 datasheet (Issue 2: November 2002)
+        * section 13.1 -- Device Reset.
+        *
+        * The PM3393 has three types of components that are
+        * individually reset:
+        *
+        * DRESETB      - Digital circuitry
+        * PL4_ARESETB  - PL4 analog circuitry
+        * XAUI_ARESETB - XAUI bus analog circuitry
+        *
+        * Steps to reset PM3393 using RSTB pin:
+        *
+        * 1. Assert RSTB pin low ( write 0 )
+        * 2. Wait at least 1ms to initiate a complete initialization of device.
+        * 3. Wait until all external clocks and REFSEL are stable.
+        * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
+        * 5. De-assert RSTB ( write 1 )
+        * 6. Wait until internal timers to expires after ~14ms.
+        *    - Allows analog clock synthesizer(PL4CSU) to stabilize to
+        *      selected reference frequency before allowing the digital
+        *      portion of the device to operate.
+        * 7. Wait at least 200us for XAUI interface to stabilize.
+        * 8. Verify the PM3393 came out of reset successfully.
+        *    Set successful reset flag if everything worked else try again
+        *    a few more times.
+        */
+
+       successful_reset = 0;
+       for (i = 0; i < 3 && !successful_reset; i++) {
+               /* 1 */
+               t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+               val &= ~1;
+               t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+               /* 2 */
+               msleep(1);
+
+               /* 3 */
+               msleep(1);
+
+               /* 4 */
+               msleep(2 /*1 extra ms for safety */ );
+
+               /* 5 */
+               val |= 1;
+               t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+               /* 6 */
+               msleep(15 /*1 extra ms for safety */ );
+
+               /* 7 */
+               msleep(1);
+
+               /* 8 */
+
+               /* Has PL4 analog block come out of reset correctly? */
+               t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
+               is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
+
+               /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
+                *         figure out why? */
+
+               /* Have all PL4 block clocks locked? */
+               x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
+                    /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */  |
+                    SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
+                    SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
+                    SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
+               is_pl4_outof_lock = (val & x);
+
+               /* ??? If this fails, might be able to software reset the XAUI part
+                *     and try to recover... thus saving us from doing another HW reset */
+               /* Has the XAUI MABC PLL circuitry stablized? */
+               is_xaui_mabc_pll_locked =
+                   (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
+
+               successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
+                                   && is_xaui_mabc_pll_locked);
+       }
+       return successful_reset ? 0 : 1;
+}
+
+struct gmac t1_pm3393_ops = {
+       STATS_TICK_SECS,
+       pm3393_mac_create,
+       pm3393_mac_reset
+};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644 (file)
index 0000000..b90e11f
--- /dev/null
@@ -0,0 +1,468 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: regs.h                                                              *
+ * $Revision: 1.8 $                                                          *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_REGS_H_
+#define _CXGB_REGS_H_
+
+/* SGE registers */
+#define A_SG_CONTROL 0x0
+
+#define S_CMDQ0_ENABLE    0
+#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
+#define F_CMDQ0_ENABLE    V_CMDQ0_ENABLE(1U)
+
+#define S_CMDQ1_ENABLE    1
+#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
+#define F_CMDQ1_ENABLE    V_CMDQ1_ENABLE(1U)
+
+#define S_FL0_ENABLE    2
+#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
+#define F_FL0_ENABLE    V_FL0_ENABLE(1U)
+
+#define S_FL1_ENABLE    3
+#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
+#define F_FL1_ENABLE    V_FL1_ENABLE(1U)
+
+#define S_CPL_ENABLE    4
+#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
+#define F_CPL_ENABLE    V_CPL_ENABLE(1U)
+
+#define S_RESPONSE_QUEUE_ENABLE    5
+#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
+#define F_RESPONSE_QUEUE_ENABLE    V_RESPONSE_QUEUE_ENABLE(1U)
+
+#define S_CMDQ_PRIORITY    6
+#define M_CMDQ_PRIORITY    0x3
+#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
+#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+
+#define S_DISABLE_CMDQ1_GTS    9
+#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
+#define F_DISABLE_CMDQ1_GTS    V_DISABLE_CMDQ1_GTS(1U)
+
+#define S_DISABLE_FL0_GTS    10
+#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
+#define F_DISABLE_FL0_GTS    V_DISABLE_FL0_GTS(1U)
+
+#define S_DISABLE_FL1_GTS    11
+#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
+#define F_DISABLE_FL1_GTS    V_DISABLE_FL1_GTS(1U)
+
+#define S_ENABLE_BIG_ENDIAN    12
+#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
+#define F_ENABLE_BIG_ENDIAN    V_ENABLE_BIG_ENDIAN(1U)
+
+#define S_ISCSI_COALESCE    14
+#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
+#define F_ISCSI_COALESCE    V_ISCSI_COALESCE(1U)
+
+#define S_RX_PKT_OFFSET    15
+#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+
+#define S_VLAN_XTRACT    18
+#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
+#define F_VLAN_XTRACT    V_VLAN_XTRACT(1U)
+
+#define A_SG_DOORBELL 0x4
+#define A_SG_CMD0BASELWR 0x8
+#define A_SG_CMD0BASEUPR 0xc
+#define A_SG_CMD1BASELWR 0x10
+#define A_SG_CMD1BASEUPR 0x14
+#define A_SG_FL0BASELWR 0x18
+#define A_SG_FL0BASEUPR 0x1c
+#define A_SG_FL1BASELWR 0x20
+#define A_SG_FL1BASEUPR 0x24
+#define A_SG_CMD0SIZE 0x28
+#define A_SG_FL0SIZE 0x2c
+#define A_SG_RSPSIZE 0x30
+#define A_SG_RSPBASELWR 0x34
+#define A_SG_RSPBASEUPR 0x38
+#define A_SG_FLTHRESHOLD 0x3c
+#define A_SG_RSPQUEUECREDIT 0x40
+#define A_SG_SLEEPING 0x48
+#define A_SG_INTRTIMER 0x4c
+#define A_SG_CMD1SIZE 0xb0
+#define A_SG_FL1SIZE 0xb4
+#define A_SG_INT_ENABLE 0xb8
+
+#define S_RESPQ_EXHAUSTED    0
+#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
+#define F_RESPQ_EXHAUSTED    V_RESPQ_EXHAUSTED(1U)
+
+#define S_RESPQ_OVERFLOW    1
+#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
+#define F_RESPQ_OVERFLOW    V_RESPQ_OVERFLOW(1U)
+
+#define S_FL_EXHAUSTED    2
+#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
+#define F_FL_EXHAUSTED    V_FL_EXHAUSTED(1U)
+
+#define S_PACKET_TOO_BIG    3
+#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
+#define F_PACKET_TOO_BIG    V_PACKET_TOO_BIG(1U)
+
+#define S_PACKET_MISMATCH    4
+#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
+#define F_PACKET_MISMATCH    V_PACKET_MISMATCH(1U)
+
+#define A_SG_INT_CAUSE 0xbc
+#define A_SG_RESPACCUTIMER 0xc0
+
+/* MC3 registers */
+
+#define S_READY    1
+#define V_READY(x) ((x) << S_READY)
+#define F_READY    V_READY(1U)
+
+/* MC4 registers */
+
+#define A_MC4_CFG 0x180
+#define S_MC4_SLOW    25
+#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
+#define F_MC4_SLOW    V_MC4_SLOW(1U)
+
+/* TPI registers */
+
+#define A_TPI_ADDR 0x280
+#define A_TPI_WR_DATA 0x284
+#define A_TPI_RD_DATA 0x288
+#define A_TPI_CSR 0x28c
+
+#define S_TPIWR    0
+#define V_TPIWR(x) ((x) << S_TPIWR)
+#define F_TPIWR    V_TPIWR(1U)
+
+#define S_TPIRDY    1
+#define V_TPIRDY(x) ((x) << S_TPIRDY)
+#define F_TPIRDY    V_TPIRDY(1U)
+
+#define A_TPI_PAR 0x29c
+
+#define S_TPIPAR    0
+#define M_TPIPAR    0x7f
+#define V_TPIPAR(x) ((x) << S_TPIPAR)
+#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
+
+/* TP registers */
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_TP_IN_CSPI_CPL    3
+#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
+#define F_TP_IN_CSPI_CPL    V_TP_IN_CSPI_CPL(1U)
+
+#define S_TP_IN_CSPI_CHECK_IP_CSUM    5
+#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
+#define F_TP_IN_CSPI_CHECK_IP_CSUM    V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_CSPI_CHECK_TCP_CSUM    6
+#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
+#define F_TP_IN_CSPI_CHECK_TCP_CSUM    V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+
+#define S_TP_IN_ESPI_ETHERNET    8
+#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
+#define F_TP_IN_ESPI_ETHERNET    V_TP_IN_ESPI_ETHERNET(1U)
+
+#define S_TP_IN_ESPI_CHECK_IP_CSUM    12
+#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
+#define F_TP_IN_ESPI_CHECK_IP_CSUM    V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_ESPI_CHECK_TCP_CSUM    13
+#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
+#define F_TP_IN_ESPI_CHECK_TCP_CSUM    V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
+
+#define S_OFFLOAD_DISABLE    14
+#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
+#define F_OFFLOAD_DISABLE    V_OFFLOAD_DISABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_TP_OUT_CSPI_CPL    2
+#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
+#define F_TP_OUT_CSPI_CPL    V_TP_OUT_CSPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_ETHERNET    6
+#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
+#define F_TP_OUT_ESPI_ETHERNET    V_TP_OUT_ESPI_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_IP_CSUM    10
+#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_IP_CSUM    V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM    11
+#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM    V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_IP_TTL    0
+#define M_IP_TTL    0xff
+#define V_IP_TTL(x) ((x) << S_IP_TTL)
+
+#define S_TCP_CSUM    11
+#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
+#define F_TCP_CSUM    V_TCP_CSUM(1U)
+
+#define S_UDP_CSUM    12
+#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
+#define F_UDP_CSUM    V_UDP_CSUM(1U)
+
+#define S_IP_CSUM    13
+#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
+#define F_IP_CSUM    V_IP_CSUM(1U)
+
+#define S_PATH_MTU    15
+#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
+#define F_PATH_MTU    V_PATH_MTU(1U)
+
+#define S_5TUPLE_LOOKUP    17
+#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+
+#define S_SYN_COOKIE_PARAMETER    26
+#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+
+#define A_TP_PC_CONFIG 0x348
+#define S_DIS_TX_FILL_WIN_PUSH    12
+#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
+#define F_DIS_TX_FILL_WIN_PUSH    V_DIS_TX_FILL_WIN_PUSH(1U)
+
+#define S_TP_PC_REV    30
+#define M_TP_PC_REV    0x3
+#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+#define A_TP_RESET 0x44c
+#define S_TP_RESET    0
+#define V_TP_RESET(x) ((x) << S_TP_RESET)
+#define F_TP_RESET    V_TP_RESET(1U)
+
+#define A_TP_INT_ENABLE 0x470
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_TX_DROP_CONFIG 0x4b8
+
+#define S_ENABLE_TX_DROP    31
+#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
+#define F_ENABLE_TX_DROP    V_ENABLE_TX_DROP(1U)
+
+#define S_ENABLE_TX_ERROR    30
+#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
+#define F_ENABLE_TX_ERROR    V_ENABLE_TX_ERROR(1U)
+
+#define S_DROP_TICKS_CNT    4
+#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+
+#define S_NUM_PKTS_DROPPED    0
+#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+
+/* CSPI registers */
+
+#define S_DIP4ERR    0
+#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
+#define F_DIP4ERR    V_DIP4ERR(1U)
+
+#define S_RXDROP    1
+#define V_RXDROP(x) ((x) << S_RXDROP)
+#define F_RXDROP    V_RXDROP(1U)
+
+#define S_TXDROP    2
+#define V_TXDROP(x) ((x) << S_TXDROP)
+#define F_TXDROP    V_TXDROP(1U)
+
+#define S_RXOVERFLOW    3
+#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
+#define F_RXOVERFLOW    V_RXOVERFLOW(1U)
+
+#define S_RAMPARITYERR    4
+#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
+#define F_RAMPARITYERR    V_RAMPARITYERR(1U)
+
+/* ESPI registers */
+
+#define A_ESPI_SCH_TOKEN0 0x880
+#define A_ESPI_SCH_TOKEN1 0x884
+#define A_ESPI_SCH_TOKEN2 0x888
+#define A_ESPI_SCH_TOKEN3 0x88c
+#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+#define A_ESPI_CALENDAR_LENGTH 0x898
+#define A_PORT_CONFIG 0x89c
+
+#define S_RX_NPORTS    0
+#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+
+#define S_TX_NPORTS    8
+#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+
+#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
+
+#define S_RXSTATUSENABLE    0
+#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
+#define F_RXSTATUSENABLE    V_RXSTATUSENABLE(1U)
+
+#define S_INTEL1010MODE    4
+#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
+#define F_INTEL1010MODE    V_INTEL1010MODE(1U)
+
+#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
+#define A_ESPI_TRAIN 0x8ac
+#define A_ESPI_INTR_STATUS 0x8c8
+
+#define S_DIP2PARITYERR    5
+#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
+#define F_DIP2PARITYERR    V_DIP2PARITYERR(1U)
+
+#define A_ESPI_INTR_ENABLE 0x8cc
+#define A_RX_DROP_THRESHOLD 0x8d0
+#define A_ESPI_RX_RESET 0x8ec
+#define A_ESPI_MISC_CONTROL 0x8f0
+
+#define S_OUT_OF_SYNC_COUNT    0
+#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_PARITY_ERR_THRES    5
+#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+
+#define S_DIP4_THRES    9
+#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+
+#define S_MONITORED_PORT_NUM    25
+#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+
+#define S_MONITORED_DIRECTION    27
+#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
+#define F_MONITORED_DIRECTION    V_MONITORED_DIRECTION(1U)
+
+#define S_MONITORED_INTERFACE    28
+#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
+#define F_MONITORED_INTERFACE    V_MONITORED_INTERFACE(1U)
+
+#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+#define A_ESPI_CMD_ADDR 0x8f8
+
+#define S_WRITE_DATA    0
+#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+
+#define S_REGISTER_OFFSET    8
+#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+
+#define S_CHANNEL_ADDR    12
+#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+
+#define S_MODULE_ADDR    16
+#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+
+#define S_BUNDLE_ADDR    20
+#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+
+#define S_SPI4_COMMAND    24
+#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+
+#define A_ESPI_GOSTAT 0x8fc
+#define S_ESPI_CMD_BUSY    8
+#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
+#define F_ESPI_CMD_BUSY    V_ESPI_CMD_BUSY(1U)
+
+/* PL registers */
+
+#define A_PL_ENABLE 0xa00
+
+#define S_PL_INTR_SGE_ERR    0
+#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
+#define F_PL_INTR_SGE_ERR    V_PL_INTR_SGE_ERR(1U)
+
+#define S_PL_INTR_SGE_DATA    1
+#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
+#define F_PL_INTR_SGE_DATA    V_PL_INTR_SGE_DATA(1U)
+
+#define S_PL_INTR_TP    6
+#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
+#define F_PL_INTR_TP    V_PL_INTR_TP(1U)
+
+#define S_PL_INTR_ESPI    8
+#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
+#define F_PL_INTR_ESPI    V_PL_INTR_ESPI(1U)
+
+#define S_PL_INTR_PCIX    10
+#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
+#define F_PL_INTR_PCIX    V_PL_INTR_PCIX(1U)
+
+#define S_PL_INTR_EXT    11
+#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
+#define F_PL_INTR_EXT    V_PL_INTR_EXT(1U)
+
+#define A_PL_CAUSE 0xa04
+
+/* MC5 registers */
+
+#define A_MC5_CONFIG 0xc04
+
+#define S_TCAM_RESET    1
+#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
+#define F_TCAM_RESET    V_TCAM_RESET(1U)
+
+#define S_M_BUS_ENABLE    5
+#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
+#define F_M_BUS_ENABLE    V_M_BUS_ENABLE(1U)
+
+/* PCICFG registers */
+
+#define A_PCICFG_PM_CSR 0x44
+#define A_PCICFG_VPD_ADDR 0x4a
+
+#define S_VPD_OP_FLAG    15
+#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
+#define F_VPD_OP_FLAG    V_VPD_OP_FLAG(1U)
+
+#define A_PCICFG_VPD_DATA 0x4c
+
+#define A_PCICFG_INTR_ENABLE 0xf4
+#define A_PCICFG_INTR_CAUSE 0xf8
+
+#define A_PCICFG_MODE 0xfc
+
+#define S_PCI_MODE_64BIT    0
+#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
+#define F_PCI_MODE_64BIT    V_PCI_MODE_64BIT(1U)
+
+#define S_PCI_MODE_PCIX    5
+#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
+#define F_PCI_MODE_PCIX    V_PCI_MODE_PCIX(1U)
+
+#define S_PCI_MODE_CLK    6
+#define M_PCI_MODE_CLK    0x3
+#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
+
+#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644 (file)
index 0000000..53b41d9
--- /dev/null
@@ -0,0 +1,1684 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.c                                                               *
+ * $Revision: 1.26 $                                                         *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  DMA engine.                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+
+#ifdef NETIF_F_TSO
+#include <linux/tcp.h>
+#endif
+
+#define SGE_CMDQ_N             2
+#define SGE_FREELQ_N           2
+#define SGE_CMDQ0_E_N          1024
+#define SGE_CMDQ1_E_N          128
+#define SGE_FREEL_SIZE         4096
+#define SGE_JUMBO_FREEL_SIZE   512
+#define SGE_FREEL_REFILL_THRESH        16
+#define SGE_RESPQ_E_N          1024
+#define SGE_INTRTIMER_NRES     1000
+#define SGE_RX_COPY_THRES      256
+#define SGE_RX_SM_BUF_SIZE     1536
+
+# define SGE_RX_DROP_THRES 2
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer.  This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#ifndef NET_IP_ALIGN
+# define NET_IP_ALIGN 2
+#endif
+
+#define M_CMD_LEN       0x7fffffff
+#define V_CMD_LEN(v)    (v)
+#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v)   ((v) << 31)
+#define V_CMD_GEN2(v)   (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP       (1 << 2)
+#define V_CMD_EOP(v)    ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+       u32 addr_lo;
+       u32 len_gen;
+       u32 flags;
+       u32 addr_hi;
+};
+
+struct freelQ_e {
+       u32 addr_lo;
+       u32 len_gen;
+       u32 gen2;
+       u32 addr_hi;
+};
+
+struct respQ_e {
+       u32 Qsleeping           : 4;
+       u32 Cmdq1CreditReturn   : 5;
+       u32 Cmdq1DmaComplete    : 5;
+       u32 Cmdq0CreditReturn   : 5;
+       u32 Cmdq0DmaComplete    : 5;
+       u32 FreelistQid         : 2;
+       u32 CreditValid         : 1;
+       u32 DataValid           : 1;
+       u32 Offload             : 1;
+       u32 Eop                 : 1;
+       u32 Sop                 : 1;
+       u32 GenerationBit       : 1;
+       u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+       u32 len_gen;
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 flags;
+};
+
+struct freelQ_e {
+       u32 len_gen;
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 gen2;
+};
+
+struct respQ_e {
+       u32 BufferLength;
+       u32 GenerationBit       : 1;
+       u32 Sop                 : 1;
+       u32 Eop                 : 1;
+       u32 Offload             : 1;
+       u32 DataValid           : 1;
+       u32 CreditValid         : 1;
+       u32 FreelistQid         : 2;
+       u32 Cmdq0DmaComplete    : 5;
+       u32 Cmdq0CreditReturn   : 5;
+       u32 Cmdq1DmaComplete    : 5;
+       u32 Cmdq1CreditReturn   : 5;
+       u32 Qsleeping           : 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+       struct sk_buff *skb;
+       DECLARE_PCI_UNMAP_ADDR(dma_addr);
+       DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+       struct sk_buff *skb;
+       DECLARE_PCI_UNMAP_ADDR(dma_addr);
+       DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+       unsigned long   status;         /* HW DMA fetch status */
+       unsigned int    in_use;         /* # of in-use command descriptors */
+       unsigned int    size;           /* # of descriptors */
+       unsigned int    processed;      /* total # of descs HW has processed */
+       unsigned int    cleaned;        /* total # of descs SW has reclaimed */
+       unsigned int    stop_thres;     /* SW TX queue suspend threshold */
+       u16             pidx;           /* producer index (SW) */
+       u16             cidx;           /* consumer index (HW) */
+       u8              genbit;         /* current generation (=valid) bit */
+       u8              sop;            /* is next entry start of packet? */
+       struct cmdQ_e  *entries;        /* HW command descriptor Q */
+       struct cmdQ_ce *centries;       /* SW command context descriptor Q */
+       spinlock_t      lock;           /* Lock to protect cmdQ enqueuing */
+       dma_addr_t      dma_addr;       /* DMA addr HW command descriptor Q */
+};
+
+struct freelQ {
+       unsigned int    credits;        /* # of available RX buffers */
+       unsigned int    size;           /* free list capacity */
+       u16             pidx;           /* producer index (SW) */
+       u16             cidx;           /* consumer index (HW) */
+       u16             rx_buffer_size; /* Buffer size on this free list */
+       u16             dma_offset;     /* DMA offset to align IP headers */
+       u16             recycleq_idx;   /* skb recycle q to use */
+       u8              genbit;         /* current generation (=valid) bit */
+       struct freelQ_e *entries;       /* HW freelist descriptor Q */
+       struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
+       dma_addr_t      dma_addr;       /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+       unsigned int    credits;        /* credits to be returned to SGE */
+       unsigned int    size;           /* # of response Q descriptors */
+       u16             cidx;           /* consumer index (SW) */
+       u8              genbit;         /* current generation(=valid) bit */
+       struct respQ_e *entries;        /* HW response descriptor Q */
+       dma_addr_t      dma_addr;       /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+       CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
+       CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
+};
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * seperate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+       struct adapter *adapter;        /* adapter backpointer */
+       struct net_device *netdev;      /* netdevice backpointer */
+       struct freelQ   freelQ[SGE_FREELQ_N]; /* buffer free lists */
+       struct respQ    respQ;          /* response Q */
+       unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
+       unsigned int    rx_pkt_pad;     /* RX padding for L2 packets */
+       unsigned int    jumbo_fl;       /* jumbo freelist Q index */
+       unsigned int    intrtimer_nres; /* no-resource interrupt timer */
+       unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
+       struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+       struct timer_list espibug_timer;
+       unsigned int    espibug_timeout;
+       struct sk_buff  *espibug_skb;
+       u32             sge_control;    /* shadow value of sge control reg */
+       struct sge_intr_counts stats;
+       struct sge_port_stats port_stats[MAX_NPORTS];
+       struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+       wmb();
+       writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+       unsigned int cidx = q->cidx;
+
+       while (q->credits--) {
+               struct freelQ_ce *ce = &q->centries[cidx];
+
+               pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+                                pci_unmap_len(ce, dma_len),
+                                PCI_DMA_FROMDEVICE);
+               dev_kfree_skb(ce->skb);
+               ce->skb = NULL;
+               if (++cidx == q->size)
+                       cidx = 0;
+       }
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+       struct pci_dev *pdev = sge->adapter->pdev;
+       unsigned int size, i;
+
+       if (sge->respQ.entries) {
+               size = sizeof(struct respQ_e) * sge->respQ.size;
+               pci_free_consistent(pdev, size, sge->respQ.entries,
+                                   sge->respQ.dma_addr);
+       }
+
+       for (i = 0; i < SGE_FREELQ_N; i++) {
+               struct freelQ *q = &sge->freelQ[i];
+
+               if (q->centries) {
+                       free_freelQ_buffers(pdev, q);
+                       kfree(q->centries);
+               }
+               if (q->entries) {
+                       size = sizeof(struct freelQ_e) * q->size;
+                       pci_free_consistent(pdev, size, q->entries,
+                                           q->dma_addr);
+               }
+       }
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+       struct pci_dev *pdev = sge->adapter->pdev;
+       unsigned int size, i;
+
+       for (i = 0; i < SGE_FREELQ_N; i++) {
+               struct freelQ *q = &sge->freelQ[i];
+
+               q->genbit = 1;
+               q->size = p->freelQ_size[i];
+               q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+               size = sizeof(struct freelQ_e) * q->size;
+               q->entries = (struct freelQ_e *)
+                             pci_alloc_consistent(pdev, size, &q->dma_addr);
+               if (!q->entries)
+                       goto err_no_mem;
+               memset(q->entries, 0, size);
+               size = sizeof(struct freelQ_ce) * q->size;
+               q->centries = kmalloc(size, GFP_KERNEL);
+               if (!q->centries)
+                       goto err_no_mem;
+               memset(q->centries, 0, size);
+       }
+
+       /*
+        * Calculate the buffer sizes for the two free lists.  FL0 accommodates
+        * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+        * including all the sk_buff overhead.
+        *
+        * Note: For T2 FL0 and FL1 are reversed.
+        */
+       sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+               sizeof(struct cpl_rx_data) +
+               sge->freelQ[!sge->jumbo_fl].dma_offset;
+       sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       /*
+        * Setup which skb recycle Q should be used when recycling buffers from
+        * each free list.
+        */
+       sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+       sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+       sge->respQ.genbit = 1;
+       sge->respQ.size = SGE_RESPQ_E_N;
+       sge->respQ.credits = 0;
+       size = sizeof(struct respQ_e) * sge->respQ.size;
+       sge->respQ.entries = (struct respQ_e *)
+               pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+       if (!sge->respQ.entries)
+               goto err_no_mem;
+       memset(sge->respQ.entries, 0, size);
+       return 0;
+
+err_no_mem:
+       free_rx_resources(sge);
+       return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+       struct cmdQ_ce *ce;
+       struct pci_dev *pdev = sge->adapter->pdev;
+       unsigned int cidx = q->cidx;
+
+       q->in_use -= n;
+       ce = &q->centries[cidx];
+       while (n--) {
+               if (q->sop)
+                       pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+                                        pci_unmap_len(ce, dma_len),
+                                        PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+                                      pci_unmap_len(ce, dma_len),
+                                      PCI_DMA_TODEVICE);
+               q->sop = 0;
+               if (ce->skb) {
+                       dev_kfree_skb(ce->skb);
+                       q->sop = 1;
+               }
+               ce++;
+               if (++cidx == q->size) {
+                       cidx = 0;
+                       ce = q->centries;
+               }
+       }
+       q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+       struct pci_dev *pdev = sge->adapter->pdev;
+       unsigned int size, i;
+
+       for (i = 0; i < SGE_CMDQ_N; i++) {
+               struct cmdQ *q = &sge->cmdQ[i];
+
+               if (q->centries) {
+                       if (q->in_use)
+                               free_cmdQ_buffers(sge, q, q->in_use);
+                       kfree(q->centries);
+               }
+               if (q->entries) {
+                       size = sizeof(struct cmdQ_e) * q->size;
+                       pci_free_consistent(pdev, size, q->entries,
+                                           q->dma_addr);
+               }
+       }
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+       struct pci_dev *pdev = sge->adapter->pdev;
+       unsigned int size, i;
+
+       for (i = 0; i < SGE_CMDQ_N; i++) {
+               struct cmdQ *q = &sge->cmdQ[i];
+
+               q->genbit = 1;
+               q->sop = 1;
+               q->size = p->cmdQ_size[i];
+               q->in_use = 0;
+               q->status = 0;
+               q->processed = q->cleaned = 0;
+               q->stop_thres = 0;
+               spin_lock_init(&q->lock);
+               size = sizeof(struct cmdQ_e) * q->size;
+               q->entries = (struct cmdQ_e *)
+                             pci_alloc_consistent(pdev, size, &q->dma_addr);
+               if (!q->entries)
+                       goto err_no_mem;
+               memset(q->entries, 0, size);
+               size = sizeof(struct cmdQ_ce) * q->size;
+               q->centries = kmalloc(size, GFP_KERNEL);
+               if (!q->centries)
+                       goto err_no_mem;
+               memset(q->centries, 0, size);
+       }
+
+       /*
+        * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+        * only.  For queue 0 set the stop threshold so we can handle one more
+        * packet from each port, plus reserve an additional 24 entries for
+        * Ethernet packets only.  Queue 1 never suspends nor do we reserve
+        * space for Ethernet packets.
+        */
+       sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+               (MAX_SKB_FRAGS + 1);
+       return 0;
+
+err_no_mem:
+       free_tx_resources(sge);
+       return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+                                    u32 size, int base_reg_lo,
+                                    int base_reg_hi, int size_reg)
+{
+       writel((u32)addr, adapter->regs + base_reg_lo);
+       writel(addr >> 32, adapter->regs + base_reg_hi);
+       writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_set_vlan_accel(struct adapter *adapter, int on_off)
+{
+       struct sge *sge = adapter->sge;
+
+       sge->sge_control &= ~F_VLAN_XTRACT;
+       if (on_off)
+               sge->sge_control |= F_VLAN_XTRACT;
+       if (adapter->open_device_map) {
+               writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+               readl(adapter->regs + A_SG_CONTROL); /* flush */
+       }
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+       struct adapter *ap = sge->adapter;
+       
+       writel(0, ap->regs + A_SG_CONTROL);
+       setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+                         A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+       setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+                         A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+       setup_ring_params(ap, sge->freelQ[0].dma_addr,
+                         sge->freelQ[0].size, A_SG_FL0BASELWR,
+                         A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+       setup_ring_params(ap, sge->freelQ[1].dma_addr,
+                         sge->freelQ[1].size, A_SG_FL1BASELWR,
+                         A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+       /* The threshold comparison uses <. */
+       writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+       setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+                         A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+       writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+       sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+               F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+               V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+               F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
+               V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+       sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+       /* Initialize no-resource timer */
+       sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+       t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+       return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+               sge->freelQ[sge->jumbo_fl].dma_offset -
+               sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+       if (sge->espibug_skb)
+               kfree_skb(sge->espibug_skb);
+
+       free_tx_resources(sge);
+       free_rx_resources(sge);
+       kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+       struct pci_dev *pdev = sge->adapter->pdev;
+       struct freelQ_ce *ce = &q->centries[q->pidx];
+       struct freelQ_e *e = &q->entries[q->pidx];
+       unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+
+       while (q->credits < q->size) {
+               struct sk_buff *skb;
+               dma_addr_t mapping;
+
+               skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+               if (!skb)
+                       break;
+
+               skb_reserve(skb, q->dma_offset);
+               mapping = pci_map_single(pdev, skb->data, dma_len,
+                                        PCI_DMA_FROMDEVICE);
+               ce->skb = skb;
+               pci_unmap_addr_set(ce, dma_addr, mapping);
+               pci_unmap_len_set(ce, dma_len, dma_len);
+               e->addr_lo = (u32)mapping;
+               e->addr_hi = (u64)mapping >> 32;
+               e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+               wmb();
+               e->gen2 = V_CMD_GEN2(q->genbit);
+
+               e++;
+               ce++;
+               if (++q->pidx == q->size) {
+                       q->pidx = 0;
+                       q->genbit ^= 1;
+                       ce = q->centries;
+                       e = q->entries;
+               }
+               q->credits++;
+       }
+
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+       struct adapter *adapter = sge->adapter;
+       u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+       u32 irqholdoff_reg;
+
+       refill_free_list(sge, &sge->freelQ[0]);
+       refill_free_list(sge, &sge->freelQ[1]);
+
+       if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+           sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+               irq_reg |= F_FL_EXHAUSTED;
+               irqholdoff_reg = sge->fixed_intrtimer;
+       } else {
+               /* Clear the F_FL_EXHAUSTED interrupts for now */
+               irq_reg &= ~F_FL_EXHAUSTED;
+               irqholdoff_reg = sge->intrtimer_nres;
+       }
+       writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+       writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+       /* We reenable the Qs to force a freelist GTS interrupt later */
+       doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+                       F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+       u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+       writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+       writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+       u32 en = SGE_INT_ENABLE;
+       u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+       if (sge->adapter->flags & TSO_CAPABLE)
+               en &= ~F_PACKET_TOO_BIG;
+       writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+       writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+       writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+       writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+       struct adapter *adapter = sge->adapter;
+       u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+       if (adapter->flags & TSO_CAPABLE)
+               cause &= ~F_PACKET_TOO_BIG;
+       if (cause & F_RESPQ_EXHAUSTED)
+               sge->stats.respQ_empty++;
+       if (cause & F_RESPQ_OVERFLOW) {
+               sge->stats.respQ_overflow++;
+               CH_ALERT("%s: SGE response queue overflow\n",
+                        adapter->name);
+       }
+       if (cause & F_FL_EXHAUSTED) {
+               sge->stats.freelistQ_empty++;
+               freelQs_empty(sge);
+       }
+       if (cause & F_PACKET_TOO_BIG) {
+               sge->stats.pkt_too_big++;
+               CH_ALERT("%s: SGE max packet size exceeded\n",
+                        adapter->name);
+       }
+       if (cause & F_PACKET_MISMATCH) {
+               sge->stats.pkt_mismatch++;
+               CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
+       }
+       if (cause & SGE_INT_FATAL)
+               t1_fatal_err(adapter);
+
+       writel(cause, adapter->regs + A_SG_INT_CAUSE);
+       return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+{
+       return &sge->stats;
+}
+
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+{
+       return &sge->port_stats[port];
+}
+
+/**
+ *     recycle_fl_buf - recycle a free list buffer
+ *     @fl: the free list
+ *     @idx: index of buffer to recycle
+ *
+ *     Recycles the specified buffer on the given free list by adding it at
+ *     the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+       struct freelQ_e *from = &fl->entries[idx];
+       struct freelQ_e *to = &fl->entries[fl->pidx];
+
+       fl->centries[fl->pidx] = fl->centries[idx];
+       to->addr_lo = from->addr_lo;
+       to->addr_hi = from->addr_hi;
+       to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+       wmb();
+       to->gen2 = V_CMD_GEN2(fl->genbit);
+       fl->credits++;
+
+       if (++fl->pidx == fl->size) {
+               fl->pidx = 0;
+               fl->genbit ^= 1;
+       }
+}
+
+/**
+ *     get_packet - return the next ingress packet buffer
+ *     @pdev: the PCI device that received the packet
+ *     @fl: the SGE free list holding the packet
+ *     @len: the actual packet length, excluding any SGE padding
+ *     @dma_pad: padding at beginning of buffer left by SGE DMA
+ *     @skb_pad: padding to be used if the packet is copied
+ *     @copy_thres: length threshold under which a packet should be copied
+ *     @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *     Get the next packet from a free list and complete setup of the
+ *     sk_buff.  If the packet is small we make a copy and recycle the
+ *     original buffer, otherwise we use the original buffer itself.  If a
+ *     positive drop threshold is supplied packets are dropped and their
+ *     buffers recycled if (a) the number of remaining buffers is under the
+ *     threshold and the packet is too big to copy, or (b) the packet should
+ *     be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+                                        struct freelQ *fl, unsigned int len,
+                                        int dma_pad, int skb_pad,
+                                        unsigned int copy_thres,
+                                        unsigned int drop_thres)
+{
+       struct sk_buff *skb;
+       struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+       if (len < copy_thres) {
+               skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
+               if (likely(skb != NULL)) {
+                       skb_reserve(skb, skb_pad);
+                       skb_put(skb, len);
+                       pci_dma_sync_single_for_cpu(pdev,
+                                           pci_unmap_addr(ce, dma_addr),
+                                           pci_unmap_len(ce, dma_len),
+                                           PCI_DMA_FROMDEVICE);
+                       memcpy(skb->data, ce->skb->data + dma_pad, len);
+                       pci_dma_sync_single_for_device(pdev,
+                                           pci_unmap_addr(ce, dma_addr),
+                                           pci_unmap_len(ce, dma_len),
+                                           PCI_DMA_FROMDEVICE);
+               } else if (!drop_thres)
+                       goto use_orig_buf;
+
+               recycle_fl_buf(fl, fl->cidx);
+               return skb;
+       }
+
+       if (fl->credits < drop_thres) {
+               recycle_fl_buf(fl, fl->cidx);
+               return NULL;
+       }
+
+use_orig_buf:
+       pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+                        pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+       skb = ce->skb;
+       skb_reserve(skb, dma_pad);
+       skb_put(skb, len);
+       return skb;
+}
+
+/**
+ *     unexpected_offload - handle an unexpected offload packet
+ *     @adapter: the adapter
+ *     @fl: the free list that received the packet
+ *
+ *     Called when we receive an unexpected offload packet (e.g., the TOE
+ *     function is disabled or the card is a NIC).  Prints a message and
+ *     recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+       struct freelQ_ce *ce = &fl->centries[fl->cidx];
+       struct sk_buff *skb = ce->skb;
+
+       pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
+                           pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+       CH_ERR("%s: unexpected offload packet, cmd %u\n",
+              adapter->name, *skb->data);
+       recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+                                 unsigned int pidx, unsigned int gen,
+                                 struct cmdQ *q)
+{
+       dma_addr_t mapping;
+       struct cmdQ_e *e, *e1;
+       struct cmdQ_ce *ce;
+       unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+
+       mapping = pci_map_single(adapter->pdev, skb->data,
+                                skb->len - skb->data_len, PCI_DMA_TODEVICE);
+       ce = &q->centries[pidx];
+       ce->skb = NULL;
+       pci_unmap_addr_set(ce, dma_addr, mapping);
+       pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+
+       flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
+               V_CMD_GEN2(gen);
+       e = &q->entries[pidx];
+       e->addr_lo = (u32)mapping;
+       e->addr_hi = (u64)mapping >> 32;
+       e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
+       for (e1 = e, i = 0; nfrags--; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               ce++;
+               e1++;
+               if (++pidx == q->size) {
+                       pidx = 0;
+                       gen ^= 1;
+                       ce = q->centries;
+                       e1 = q->entries;
+               }
+
+               mapping = pci_map_page(adapter->pdev, frag->page,
+                                      frag->page_offset, frag->size,
+                                      PCI_DMA_TODEVICE);
+               ce->skb = NULL;
+               pci_unmap_addr_set(ce, dma_addr, mapping);
+               pci_unmap_len_set(ce, dma_len, frag->size);
+
+               e1->addr_lo = (u32)mapping;
+               e1->addr_hi = (u64)mapping >> 32;
+               e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
+               e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
+                           V_CMD_GEN2(gen);
+       }
+
+       ce->skb = skb;
+       wmb();
+       e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+       unsigned int reclaim = q->processed - q->cleaned;
+
+       if (reclaim) {
+               free_cmdQ_buffers(sge, q, reclaim);
+               q->cleaned += reclaim;
+       }
+}
+
+#ifndef SET_ETHTOOL_OPS
+# define __netif_rx_complete(dev) netif_rx_complete(dev)
+#endif
+
+/*
+ * We cannot use the standard netif_rx_schedule_prep() because we have multiple
+ * ports plus the TOE all multiplexing onto a single response queue, therefore
+ * accepting new responses cannot depend on the state of any particular port.
+ * So define our own equivalent that omits the netif_running() test.
+ */
+static inline int napi_schedule_prep(struct net_device *dev)
+{
+       return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+
+/**
+ *     sge_rx - process an ingress ethernet packet
+ *     @sge: the sge structure
+ *     @fl: the free list that contains the packet buffer
+ *     @len: the packet length
+ *
+ *     Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+       struct sk_buff *skb;
+       struct cpl_rx_pkt *p;
+       struct adapter *adapter = sge->adapter;
+
+       sge->stats.ethernet_pkts++;
+       skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
+                        sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
+                        SGE_RX_DROP_THRES);
+       if (!skb) {
+               sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+               return 0;
+       }
+
+       p = (struct cpl_rx_pkt *)skb->data;
+       skb_pull(skb, sizeof(*p));
+       skb->dev = adapter->port[p->iff].dev;
+       skb->dev->last_rx = jiffies;
+       skb->protocol = eth_type_trans(skb, skb->dev);
+       if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+           skb->protocol == htons(ETH_P_IP) &&
+           (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+               sge->port_stats[p->iff].rx_cso_good++;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else
+               skb->ip_summed = CHECKSUM_NONE;
+
+       if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
+               sge->port_stats[p->iff].vlan_xtract++;
+               if (adapter->params.sge.polling)
+                       vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+                                                ntohs(p->vlan));
+               else
+                       vlan_hwaccel_rx(skb, adapter->vlan_grp,
+                                       ntohs(p->vlan));
+       } else if (adapter->params.sge.polling)
+               netif_receive_skb(skb);
+       else
+               netif_rx(skb);
+       return 0;
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+       unsigned int r = q->processed - q->cleaned;
+
+       return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+       struct adapter *adap = sge->adapter;
+
+       if (enough_free_Tx_descs(&sge->cmdQ[0])) {
+               int i;
+
+               for_each_port(adap, i) {
+                       struct net_device *nd = adap->port[i].dev;
+
+                       if (test_and_clear_bit(nd->if_port,
+                                              &sge->stopped_tx_queues) &&
+                           netif_running(nd)) {
+                               sge->stats.cmdQ_restarted[3]++;
+                               netif_wake_queue(nd);
+                       }
+               }
+       }
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter, 
+                                         unsigned int flags, 
+                                         unsigned int pr0)
+{
+       struct sge *sge = adapter->sge;
+       struct cmdQ *cmdq = &sge->cmdQ[0];
+
+       cmdq->processed += pr0;
+
+       if (flags & F_CMDQ0_ENABLE) {
+               clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+       
+               if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+                   !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+                       set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+                       writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+               }
+               flags &= ~F_CMDQ0_ENABLE;
+       }
+       
+       if (unlikely(sge->stopped_tx_queues != 0))
+               restart_tx_queues(sge);
+
+       return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget.  Returns the number of
+ * responses processed.  A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &sge->respQ;
+       struct respQ_e *e = &q->entries[q->cidx];
+       int budget_left = budget;
+       unsigned int flags = 0;
+       unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+       
+
+       while (likely(budget_left && e->GenerationBit == q->genbit)) {
+               flags |= e->Qsleeping;
+               
+               cmdq_processed[0] += e->Cmdq0CreditReturn;
+               cmdq_processed[1] += e->Cmdq1CreditReturn;
+               
+               /* We batch updates to the TX side to avoid cacheline
+                * ping-pong of TX state information on MP where the sender
+                * might run on a different CPU than this function...
+                */
+               if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
+                       flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+                       cmdq_processed[0] = 0;
+               }
+               if (unlikely(cmdq_processed[1] > 16)) {
+                       sge->cmdQ[1].processed += cmdq_processed[1];
+                       cmdq_processed[1] = 0;
+               }
+               if (likely(e->DataValid)) {
+                       struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+                       if (unlikely(!e->Sop || !e->Eop))
+                               BUG();
+                       if (unlikely(e->Offload))
+                               unexpected_offload(adapter, fl);
+                       else
+                               sge_rx(sge, fl, e->BufferLength);
+
+                       /*
+                        * Note: this depends on each packet consuming a
+                        * single free-list buffer; cf. the BUG above.
+                        */
+                       if (++fl->cidx == fl->size)
+                               fl->cidx = 0;
+                       if (unlikely(--fl->credits <
+                                    fl->size - SGE_FREEL_REFILL_THRESH))
+                               refill_free_list(sge, fl);
+               } else
+                       sge->stats.pure_rsps++;
+
+               e++;
+               if (unlikely(++q->cidx == q->size)) {
+                       q->cidx = 0;
+                       q->genbit ^= 1;
+                       e = q->entries;
+               }
+               prefetch(e);
+
+               if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+                       writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+                       q->credits = 0;
+               }
+               --budget_left;
+       }
+
+       flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+       sge->cmdQ[1].processed += cmdq_processed[1];
+
+       budget -= budget_left;
+       return budget;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses.  Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context.  The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response.  Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
+{
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &sge->respQ;
+       unsigned int flags = 0;
+       unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+       do {
+               flags |= e->Qsleeping;
+
+               cmdq_processed[0] += e->Cmdq0CreditReturn;
+               cmdq_processed[1] += e->Cmdq1CreditReturn;
+               
+               e++;
+               if (unlikely(++q->cidx == q->size)) {
+                       q->cidx = 0;
+                       q->genbit ^= 1;
+                       e = q->entries;
+               }
+               prefetch(e);
+
+               if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+                       writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+                       q->credits = 0;
+               }
+               sge->stats.pure_rsps++;
+       } while (e->GenerationBit == q->genbit && !e->DataValid);
+
+       flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+       sge->cmdQ[1].processed += cmdq_processed[1];
+
+       return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI.  This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+static int t1_poll(struct net_device *dev, int *budget)
+{
+       struct adapter *adapter = dev->priv;
+       int effective_budget = min(*budget, dev->quota);
+
+       int work_done = process_responses(adapter, effective_budget);
+       *budget -= work_done;
+       dev->quota -= work_done;
+
+       if (work_done >= effective_budget)
+               return 1;
+
+       __netif_rx_complete(dev);
+
+       /*
+        * Because we don't atomically flush the following write it is
+        * possible that in very rare cases it can reach the device in a way
+        * that races with a new response being written plus an error interrupt
+        * causing the NAPI interrupt handler below to return unhandled status
+        * to the OS.  To protect against this would require flushing the write
+        * and doing both the write and the flush with interrupts off.  Way too
+        * expensive and unjustifiable given the rarity of the race.
+        */
+       writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+       return 0;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct net_device *dev)
+{
+       return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+/*
+ * NAPI version of the main interrupt handler.
+ */
+static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
+{
+       int handled;
+       struct adapter *adapter = data;
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &adapter->sge->respQ;
+
+       /*
+        * Clear the SGE_DATA interrupt first thing.  Normally the NAPI
+        * handler has control of the response queue and the interrupt handler
+        * can look at the queue reliably only once it knows NAPI is off.
+        * We can't wait that long to clear the SGE_DATA interrupt because we
+        * could race with t1_poll rearming the SGE interrupt, so we need to
+        * clear the interrupt speculatively and really early on.
+        */
+       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+       spin_lock(&adapter->async_lock);
+       if (!napi_is_scheduled(sge->netdev)) {
+               struct respQ_e *e = &q->entries[q->cidx];
+
+               if (e->GenerationBit == q->genbit) {
+                       if (e->DataValid ||
+                           process_pure_responses(adapter, e)) {
+                               if (likely(napi_schedule_prep(sge->netdev)))
+                                       __netif_rx_schedule(sge->netdev);
+                               else
+                                       printk(KERN_CRIT
+                                              "NAPI schedule failure!\n");
+                       } else
+                       writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+                       handled = 1;
+                       goto unlock;
+               } else
+               writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+       }  else
+       if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
+               printk(KERN_ERR "data interrupt while NAPI running\n");
+       
+       handled = t1_slow_intr_handler(adapter);
+       if (!handled)
+               sge->stats.unhandled_irqs++;
+ unlock:
+       spin_unlock(&adapter->async_lock);
+       return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Main interrupt handler, optimized assuming that we took a 'DATA'
+ * interrupt.
+ *
+ * 1. Clear the interrupt
+ * 2. Loop while we find valid descriptors and process them; accumulate
+ *      information that can be processed after the loop
+ * 3. Tell the SGE at which index we stopped processing descriptors
+ * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
+ *      outstanding TX buffers waiting, replenish RX buffers, potentially
+ *      reenable upper layers if they were turned off due to lack of TX
+ *      resources which are available again.
+ * 5. If we took an interrupt, but no valid respQ descriptors was found we
+ *      let the slow_intr_handler run and do error handling.
+ */
+static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+{
+       int work_done;
+       struct respQ_e *e;
+       struct adapter *adapter = cookie;
+       struct respQ *Q = &adapter->sge->respQ;
+
+       spin_lock(&adapter->async_lock);
+       e = &Q->entries[Q->cidx];
+       prefetch(e);
+
+       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+       if (likely(e->GenerationBit == Q->genbit))
+               work_done = process_responses(adapter, -1);
+       else
+               work_done = t1_slow_intr_handler(adapter);
+
+       /*
+        * The unconditional clearing of the PL_CAUSE above may have raced
+        * with DMA completion and the corresponding generation of a response
+        * to cause us to miss the resulting data interrupt.  The next write
+        * is also unconditional to recover the missed interrupt and render
+        * this race harmless.
+        */
+       writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
+
+       if (!work_done)
+               adapter->sge->stats.unhandled_irqs++;
+       spin_unlock(&adapter->async_lock);
+       return IRQ_RETVAL(work_done != 0);
+}
+
+intr_handler_t t1_select_intr_handler(adapter_t *adapter)
+{
+       return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjuction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+                      unsigned int qid, struct net_device *dev)
+{
+       struct sge *sge = adapter->sge;
+       struct cmdQ *q = &sge->cmdQ[qid];
+       unsigned int credits, pidx, genbit, count;
+
+       spin_lock(&q->lock);
+       reclaim_completed_tx(sge, q);
+
+       pidx = q->pidx;
+       credits = q->size - q->in_use;
+       count = 1 + skb_shinfo(skb)->nr_frags;
+
+       {       /* Ethernet packet */
+               if (unlikely(credits < count)) {
+                       netif_stop_queue(dev);
+                       set_bit(dev->if_port, &sge->stopped_tx_queues);
+                       sge->stats.cmdQ_full[3]++;
+                       spin_unlock(&q->lock);
+                       CH_ERR("%s: Tx ring full while queue awake!\n",
+                              adapter->name);
+                       return 1;
+               }
+               if (unlikely(credits - count < q->stop_thres)) {
+                       sge->stats.cmdQ_full[3]++;
+                       netif_stop_queue(dev);
+                       set_bit(dev->if_port, &sge->stopped_tx_queues);
+               }
+       }
+       q->in_use += count;
+       genbit = q->genbit;
+       q->pidx += count;
+       if (q->pidx >= q->size) {
+               q->pidx -= q->size;
+               q->genbit ^= 1;
+       }
+       spin_unlock(&q->lock);
+
+       write_tx_descs(adapter, skb, pidx, genbit, q);
+
+       /*
+        * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
+        * the doorbell if the Q is asleep. There is a natural race, where
+        * the hardware is going to sleep just after we checked, however,
+        * then the interrupt handler will detect the outstanding TX packet
+        * and ring the doorbell for us.
+        */
+       if (qid)
+               doorbell_pio(adapter, F_CMDQ1_ENABLE);
+       else {
+               clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+               if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+                       set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+                       writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+               }
+       }
+       return 0;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ *     eth_hdr_len - return the length of an Ethernet header
+ *     @data: pointer to the start of the Ethernet header
+ *
+ *     Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+       const struct ethhdr *e = data;
+
+       return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct adapter *adapter = dev->priv;
+       struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
+       struct sge *sge = adapter->sge;
+       struct cpl_tx_pkt *cpl;
+
+#ifdef NETIF_F_TSO
+       if (skb_shinfo(skb)->tso_size) {
+               int eth_type;
+               struct cpl_tx_pkt_lso *hdr;
+
+               st->tso++;
+
+               eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+                       CPL_ETH_II : CPL_ETH_II_VLAN;
+
+               hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+               hdr->opcode = CPL_TX_PKT_LSO;
+               hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+               hdr->ip_hdr_words = skb->nh.iph->ihl;
+               hdr->tcp_hdr_words = skb->h.th->doff;
+               hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+                                               skb_shinfo(skb)->tso_size));
+               hdr->len = htonl(skb->len - sizeof(*hdr));
+               cpl = (struct cpl_tx_pkt *)hdr;
+               sge->stats.tx_lso_pkts++;
+       } else
+#endif
+       {
+               /*
+                * Packets shorter than ETH_HLEN can break the MAC, drop them
+                * early.  Also, we may get oversized packets because some
+                * parts of the kernel don't handle our unusual hard_header_len
+                * right, drop those too.
+                */
+               if (unlikely(skb->len < ETH_HLEN ||
+                            skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+                       dev_kfree_skb_any(skb);
+                       return NET_XMIT_SUCCESS;
+               }
+
+               /*
+                * We are using a non-standard hard_header_len and some kernel
+                * components, such as pktgen, do not handle it right.
+                * Complain when this happens but try to fix things up.
+                */
+               if (unlikely(skb_headroom(skb) <
+                            dev->hard_header_len - ETH_HLEN)) {
+                       struct sk_buff *orig_skb = skb;
+
+                       if (net_ratelimit())
+                               printk(KERN_ERR "%s: inadequate headroom in "
+                                      "Tx packet\n", dev->name);
+                       skb = skb_realloc_headroom(skb, sizeof(*cpl));
+                       dev_kfree_skb_any(orig_skb);
+                       if (!skb)
+                               return -ENOMEM;
+               }
+
+               if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
+                   skb->ip_summed == CHECKSUM_HW &&
+                   skb->nh.iph->protocol == IPPROTO_UDP)
+                       if (unlikely(skb_checksum_help(skb, 0))) {
+                               dev_kfree_skb_any(skb);
+                               return -ENOMEM;
+                       }
+
+               /* Hmmm, assuming to catch the gratious arp... and we'll use
+                * it to flush out stuck espi packets...
+                 */
+               if (unlikely(!adapter->sge->espibug_skb)) {
+                       if (skb->protocol == htons(ETH_P_ARP) &&
+                           skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+                               adapter->sge->espibug_skb = skb;
+                               /* We want to re-use this skb later. We
+                                * simply bump the reference count and it
+                                * will not be freed...
+                                */
+                               skb = skb_get(skb);
+                       }
+               }
+
+               cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+               cpl->opcode = CPL_TX_PKT;
+               cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
+               cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
+               /* the length field isn't used so don't bother setting it */
+
+               st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
+               sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
+               sge->stats.tx_reg_pkts++;
+       }
+       cpl->iff = dev->if_port;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+       if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
+               cpl->vlan_valid = 1;
+               cpl->vlan = htons(vlan_tx_tag_get(skb));
+               st->vlan_insert++;
+       } else
+#endif
+               cpl->vlan_valid = 0;
+
+       dev->trans_start = jiffies;
+       return t1_sge_tx(skb, adapter, 0, dev);
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+       int i;
+       struct sge *sge = (struct sge *)data;
+
+       for (i = 0; i < SGE_CMDQ_N; ++i) {
+               struct cmdQ *q = &sge->cmdQ[i];
+
+               if (!spin_trylock(&q->lock))
+                       continue;
+
+               reclaim_completed_tx(sge, q);
+               if (i == 0 && q->in_use)   /* flush pending credits */
+                       writel(F_CMDQ0_ENABLE,
+                               sge->adapter->regs + A_SG_DOORBELL);
+
+               spin_unlock(&q->lock);
+       }
+       mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+       sge->netdev->poll = t1_poll;
+       sge->fixed_intrtimer = p->rx_coalesce_usecs *
+               core_ticks_per_usec(sge->adapter);
+       writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+       return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+       if (alloc_rx_resources(sge, p))
+               return -ENOMEM;
+       if (alloc_tx_resources(sge, p)) {
+               free_rx_resources(sge);
+               return -ENOMEM;
+       }
+       configure_sge(sge, p);
+
+       /*
+        * Now that we have sized the free lists calculate the payload
+        * capacity of the large buffers.  Other parts of the driver use
+        * this to set the max offload coalescing size so that RX packets
+        * do not overflow our large buffers.
+        */
+       p->large_buf_capacity = jumbo_payload_capacity(sge);
+       return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+       writel(0, sge->adapter->regs + A_SG_CONTROL);
+       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+       if (is_T2(sge->adapter))
+               del_timer_sync(&sge->espibug_timer);
+       del_timer_sync(&sge->tx_reclaim_timer);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+       refill_free_list(sge, &sge->freelQ[0]);
+       refill_free_list(sge, &sge->freelQ[1]);
+
+       writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+       doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+       mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+       if (is_T2(sge->adapter)) 
+               mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround(void *data)
+{
+       struct adapter *adapter = (struct adapter *)data;
+       struct sge *sge = adapter->sge;
+
+       if (netif_running(adapter->port[0].dev)) {
+               struct sk_buff *skb = sge->espibug_skb;
+
+               u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+               if ((seop & 0xfff0fff) == 0xfff && skb) {
+                       if (!skb->cb[0]) {
+                               u8 ch_mac_addr[ETH_ALEN] =
+                                   {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+                                   ch_mac_addr, ETH_ALEN);
+                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
+                                   ETH_ALEN);
+                               skb->cb[0] = 0xff;
+                       }
+
+                       /* bump the reference count to avoid freeing of the
+                        * skb once the DMA has completed.
+                        */
+                       skb = skb_get(skb);
+                       t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+               }
+       }
+       mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+                                    struct sge_params *p)
+{
+       struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+
+       if (!sge)
+               return NULL;
+       memset(sge, 0, sizeof(*sge));
+
+       sge->adapter = adapter;
+       sge->netdev = adapter->port[0].dev;
+       sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+       sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+       init_timer(&sge->tx_reclaim_timer);
+       sge->tx_reclaim_timer.data = (unsigned long)sge;
+       sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+       if (is_T2(sge->adapter)) {
+               init_timer(&sge->espibug_timer);
+               sge->espibug_timer.function = (void *)&espibug_workaround;
+               sge->espibug_timer.data = (unsigned long)sge->adapter;
+               sge->espibug_timeout = 1;
+       }
+        
+
+       p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+       p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+       p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+       p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+       p->rx_coalesce_usecs =  50;
+       p->coalesce_enable = 0;
+       p->sample_interval_usecs = 0;
+       p->polling = 0;
+
+       return sge;
+}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644 (file)
index 0000000..434b255
--- /dev/null
@@ -0,0 +1,105 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.h                                                               *
+ * $Revision: 1.11 $                                                          *
+ * $Date: 2005/06/21 22:10:55 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SGE_H_
+#define _CXGB_SGE_H_
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#ifndef IRQ_RETVAL
+#define IRQ_RETVAL(x)
+typedef void irqreturn_t;
+#endif
+
+typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
+
+struct sge_intr_counts {
+       unsigned int respQ_empty;      /* # times respQ empty */
+       unsigned int respQ_overflow;   /* # respQ overflow (fatal) */
+       unsigned int freelistQ_empty;  /* # times freelist empty */
+       unsigned int pkt_too_big;      /* packet too large (fatal) */
+       unsigned int pkt_mismatch;
+       unsigned int cmdQ_full[3];     /* not HW IRQ, host cmdQ[] full */
+       unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
+       unsigned int ethernet_pkts;    /* # of Ethernet packets received */
+       unsigned int offload_pkts;     /* # of offload packets received */
+       unsigned int offload_bundles;  /* # of offload pkt bundles delivered */
+       unsigned int pure_rsps;        /* # of non-payload responses */
+       unsigned int unhandled_irqs;   /* # of unhandled interrupts */
+       unsigned int tx_ipfrags;
+       unsigned int tx_reg_pkts;
+       unsigned int tx_lso_pkts;
+       unsigned int tx_do_cksum;
+};
+
+struct sge_port_stats {
+       unsigned long rx_cso_good;     /* # of successful RX csum offloads */
+       unsigned long tx_cso;          /* # of TX checksum offloads */
+       unsigned long vlan_xtract;     /* # of VLAN tag extractions */
+       unsigned long vlan_insert;     /* # of VLAN tag extractions */
+       unsigned long tso;             /* # of TSO requests */
+       unsigned long rx_drops;        /* # of packets dropped due to no mem */
+};
+
+struct sk_buff;
+struct net_device;
+struct adapter;
+struct sge_params;
+struct sge;
+
+struct sge *t1_sge_create(struct adapter *, struct sge_params *);
+int t1_sge_configure(struct sge *, struct sge_params *);
+int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
+void t1_sge_destroy(struct sge *);
+intr_handler_t t1_select_intr_handler(adapter_t *adapter);
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+                      unsigned int qid, struct net_device *netdev);
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void t1_set_vlan_accel(struct adapter *adapter, int on_off);
+void t1_sge_start(struct sge *);
+void t1_sge_stop(struct sge *);
+int t1_sge_intr_error_handler(struct sge *);
+void t1_sge_intr_enable(struct sge *);
+void t1_sge_intr_disable(struct sge *);
+void t1_sge_intr_clear(struct sge *);
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
+
+#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644 (file)
index 0000000..1ebb5d1
--- /dev/null
@@ -0,0 +1,812 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: subr.c                                                              *
+ * $Revision: 1.27 $                                                         *
+ * $Date: 2005/06/22 01:08:36 $                                              *
+ * Description:                                                              *
+ *  Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "elmer0.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+/**
+ *     t1_wait_op_done - wait until an operation is completed
+ *     @adapter: the adapter performing the operation
+ *     @reg: the register to check for completion
+ *     @mask: a single-bit field within @reg that indicates completion
+ *     @polarity: the value of the field when the operation is completed
+ *     @attempts: number of check iterations
+ *      @delay: delay in usecs between iterations
+ *
+ *     Wait until an operation is completed by checking a bit in a register
+ *     up to @attempts times.  Returns %0 if the operation completes and %1
+ *     otherwise.
+ */
+static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
+                   int attempts, int delay)
+{
+       while (1) {
+               u32 val = readl(adapter->regs + reg) & mask;
+
+               if (!!val == polarity)
+                       return 0;
+               if (--attempts == 0)
+                       return 1;
+               if (delay)
+                       udelay(delay);
+       }
+}
+
+#define TPI_ATTEMPTS 50
+
+/*
+ * Write a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+       int tpi_busy;
+
+       writel(addr, adapter->regs + A_TPI_ADDR);
+       writel(value, adapter->regs + A_TPI_WR_DATA);
+       writel(F_TPIWR, adapter->regs + A_TPI_CSR);
+
+       tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+                                  TPI_ATTEMPTS, 3);
+       if (tpi_busy)
+               CH_ALERT("%s: TPI write to 0x%x failed\n",
+                        adapter->name, addr);
+       return tpi_busy;
+}
+
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+       int ret;
+
+       spin_lock(&(adapter)->tpi_lock);
+       ret = __t1_tpi_write(adapter, addr, value);
+       spin_unlock(&(adapter)->tpi_lock);
+       return ret;
+}
+
+/*
+ * Read a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+       int tpi_busy;
+
+       writel(addr, adapter->regs + A_TPI_ADDR);
+       writel(0, adapter->regs + A_TPI_CSR);
+
+       tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+                                  TPI_ATTEMPTS, 3);
+       if (tpi_busy)
+               CH_ALERT("%s: TPI read from 0x%x failed\n",
+                        adapter->name, addr);
+       else
+               *valp = readl(adapter->regs + A_TPI_RD_DATA);
+       return tpi_busy;
+}
+
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+       int ret;
+
+       spin_lock(&(adapter)->tpi_lock);
+       ret = __t1_tpi_read(adapter, addr, valp);
+       spin_unlock(&(adapter)->tpi_lock);
+       return ret;
+}
+
+/*
+ * Called when a port's link settings change to propagate the new values to the
+ * associated PHY and MAC.  After performing the common tasks it invokes an
+ * OS-specific handler.
+ */
+/* static */ void link_changed(adapter_t *adapter, int port_id)
+{
+       int link_ok, speed, duplex, fc;
+       struct cphy *phy = adapter->port[port_id].phy;
+       struct link_config *lc = &adapter->port[port_id].link_config;
+
+       phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+       lc->speed = speed < 0 ? SPEED_INVALID : speed;
+       lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+       if (!(lc->requested_fc & PAUSE_AUTONEG))
+               fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+       if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+               /* Set MAC speed, duplex, and flow control to match PHY. */
+               struct cmac *mac = adapter->port[port_id].mac;
+
+               mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
+               lc->fc = (unsigned char)fc;
+       }
+       t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+static int t1_pci_intr_handler(adapter_t *adapter)
+{
+       u32 pcix_cause;
+
+       pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+
+       if (pcix_cause) {
+               pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
+                                        pcix_cause);
+               t1_fatal_err(adapter);    /* PCI errors are fatal */
+       }
+       return 0;
+}
+
+
+/*
+ * Wait until Elmer's MI1 interface is ready for new operations.
+ */
+static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
+{
+       int attempts = 100, busy;
+
+       do {
+               u32 val;
+
+               __t1_tpi_read(adapter, mi1_reg, &val);
+               busy = val & F_MI1_OP_BUSY;
+               if (busy)
+                       udelay(10);
+       } while (busy && --attempts);
+       if (busy)
+               CH_ALERT("%s: MDIO operation timed out\n",
+                        adapter->name);
+       return busy;
+}
+
+/*
+ * MI1 MDIO initialization.
+ */
+static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
+{
+       u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
+       u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
+               V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
+
+       if (!(bi->caps & SUPPORTED_10000baseT_Full))
+               val |= V_MI1_SOF(1);
+       t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
+}
+
+static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+                            int reg_addr, unsigned int *valp)
+{
+       u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+       spin_lock(&(adapter)->tpi_lock);
+
+       /* Write the address we want. */
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+                      MI1_OP_INDIRECT_ADDRESS);
+       mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+       /* Write the operation we want. */
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+       mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+       /* Read the data. */
+       __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+       spin_unlock(&(adapter)->tpi_lock);
+       return 0;
+}
+
+static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+                             int reg_addr, unsigned int val)
+{
+       u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+       spin_lock(&(adapter)->tpi_lock);
+
+       /* Write the address we want. */
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+                      MI1_OP_INDIRECT_ADDRESS);
+       mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+       /* Write the data. */
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+       __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
+       mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+       spin_unlock(&(adapter)->tpi_lock);
+       return 0;
+}
+
+static struct mdio_ops mi1_mdio_ext_ops = {
+       mi1_mdio_init,
+       mi1_mdio_ext_read,
+       mi1_mdio_ext_write
+};
+
+enum {
+       CH_BRD_N110_1F,
+       CH_BRD_N210_1F,
+};
+
+static struct board_info t1_board[] = {
+
+{ CHBT_BOARD_N110, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N110 1x10GBaseX NIC" },
+
+{ CHBT_BOARD_N210, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N210 1x10GBaseX NIC" },
+
+};
+
+struct pci_device_id t1_pci_tbl[] = {
+       CH_DEVICE(7, 0, CH_BRD_N110_1F),
+       CH_DEVICE(10, 1, CH_BRD_N210_1F),
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
+
+/*
+ * Return the board_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct board_info *t1_get_board_info(unsigned int board_id)
+{
+       return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
+}
+
+struct chelsio_vpd_t {
+       u32 format_version;
+       u8 serial_number[16];
+       u8 mac_base_address[6];
+       u8 pad[2];           /* make multiple-of-4 size requirement explicit */
+};
+
+#define EEPROMSIZE        (8 * 1024)
+#define EEPROM_MAX_POLL   4
+
+/*
+ * Read SEEPROM. A zero is written to the flag register when the addres is
+ * written to the Control register. The hardware device will set the flag to a
+ * one when 4B have been transferred to the Data register.
+ */
+int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
+{
+       int i = EEPROM_MAX_POLL;
+       u16 val;
+
+       if (addr >= EEPROMSIZE || (addr & 3))
+               return -EINVAL;
+
+       pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
+       do {
+               udelay(50);
+               pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
+       } while (!(val & F_VPD_OP_FLAG) && --i);
+
+       if (!(val & F_VPD_OP_FLAG)) {
+               CH_ERR("%s: reading EEPROM address 0x%x failed\n",
+                      adapter->name, addr);
+               return -EIO;
+       }
+       pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
+       *data = le32_to_cpu(*data);
+       return 0;
+}
+
+static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
+{
+       int addr, ret = 0;
+
+       for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
+               ret = t1_seeprom_read(adapter, addr,
+                                     (u32 *)((u8 *)vpd + addr));
+
+       return ret;
+}
+
+/*
+ * Read a port's MAC address from the VPD ROM.
+ */
+static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
+{
+       struct chelsio_vpd_t vpd;
+
+       if (t1_eeprom_vpd_get(adapter, &vpd))
+               return 1;
+       memcpy(mac_addr, vpd.mac_base_address, 5);
+       mac_addr[5] = vpd.mac_base_address[5] + index;
+       return 0;
+}
+
+/*
+ * Set up the MAC/PHY according to the requested link settings.
+ *
+ * If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired and reset.
+ *
+ * If the PHY does not auto-negotiate we just reset it.
+ *
+ * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+       unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+       if (lc->supported & SUPPORTED_Autoneg) {
+               lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
+               if (fc) {
+                       lc->advertising |= ADVERTISED_ASYM_PAUSE;
+                       if (fc == (PAUSE_RX | PAUSE_TX))
+                               lc->advertising |= ADVERTISED_PAUSE;
+               }
+               phy->ops->advertise(phy, lc->advertising);
+
+               if (lc->autoneg == AUTONEG_DISABLE) {
+                       lc->speed = lc->requested_speed;
+                       lc->duplex = lc->requested_duplex;
+                       lc->fc = (unsigned char)fc;
+                       mac->ops->set_speed_duplex_fc(mac, lc->speed,
+                                                     lc->duplex, fc);
+                       /* Also disables autoneg */
+                       phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+                       phy->ops->reset(phy, 0);
+               } else
+                       phy->ops->autoneg_enable(phy); /* also resets PHY */
+       } else {
+               mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
+               lc->fc = (unsigned char)fc;
+               phy->ops->reset(phy, 0);
+       }
+       return 0;
+}
+
+/*
+ * External interrupt handler for boards using elmer0.
+ */
+int elmer0_ext_intr_handler(adapter_t *adapter)
+{
+       struct cphy *phy;
+       int phy_cause;
+       u32 cause;
+
+       t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
+
+       switch (board_info(adapter)->board) {
+       case CHBT_BOARD_N210:
+       case CHBT_BOARD_N110:
+               if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
+                       phy = adapter->port[0].phy;
+                       phy_cause = phy->ops->interrupt_handler(phy);
+                       if (phy_cause & cphy_cause_link_change)
+                               link_changed(adapter, 0);
+               }
+               break;
+       }
+       t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
+       return 0;
+}
+
+/* Enables all interrupts. */
+void t1_interrupts_enable(adapter_t *adapter)
+{
+       unsigned int i;
+       u32 pl_intr;
+
+       adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
+
+       t1_sge_intr_enable(adapter->sge);
+       if (adapter->espi) {
+               adapter->slow_intr_mask |= F_PL_INTR_ESPI;
+               t1_espi_intr_enable(adapter->espi);
+       }
+
+       /* Enable MAC/PHY interrupts for each port. */
+       for_each_port(adapter, i) {
+               adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
+               adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
+       }
+
+       /* Enable PCIX & external chip interrupts on ASIC boards. */
+       pl_intr = readl(adapter->regs + A_PL_ENABLE);
+
+       /* PCI-X interrupts */
+       pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+                              0xffffffff);
+
+       adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+       pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+       writel(pl_intr, adapter->regs + A_PL_ENABLE);
+}
+
+/* Disables all interrupts. */
+void t1_interrupts_disable(adapter_t* adapter)
+{
+       unsigned int i;
+
+       t1_sge_intr_disable(adapter->sge);
+       if (adapter->espi)
+               t1_espi_intr_disable(adapter->espi);
+
+       /* Disable MAC/PHY interrupts for each port. */
+       for_each_port(adapter, i) {
+               adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
+               adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
+       }
+
+       /* Disable PCIX & external chip interrupts. */
+       writel(0, adapter->regs + A_PL_ENABLE);
+
+       /* PCI-X interrupts */
+       pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
+
+       adapter->slow_intr_mask = 0;
+}
+
+/* Clears all interrupts */
+void t1_interrupts_clear(adapter_t* adapter)
+{
+       unsigned int i;
+       u32 pl_intr;
+
+
+       t1_sge_intr_clear(adapter->sge);
+       if (adapter->espi)
+               t1_espi_intr_clear(adapter->espi);
+
+       /* Clear MAC/PHY interrupts for each port. */
+       for_each_port(adapter, i) {
+               adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
+               adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
+       }
+
+       /* Enable interrupts for external devices. */
+       pl_intr = readl(adapter->regs + A_PL_CAUSE);
+
+       writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+              adapter->regs + A_PL_CAUSE);
+
+       /* PCI-X interrupts */
+       pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
+}
+
+/*
+ * Slow path interrupt handler for ASICs.
+ */
+int t1_slow_intr_handler(adapter_t *adapter)
+{
+       u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+       cause &= adapter->slow_intr_mask;
+       if (!cause)
+               return 0;
+       if (cause & F_PL_INTR_SGE_ERR)
+               t1_sge_intr_error_handler(adapter->sge);
+       if (cause & F_PL_INTR_ESPI)
+               t1_espi_intr_handler(adapter->espi);
+       if (cause & F_PL_INTR_PCIX)
+               t1_pci_intr_handler(adapter);
+       if (cause & F_PL_INTR_EXT)
+               t1_elmer0_ext_intr(adapter);
+
+       /* Clear the interrupts just processed. */
+       writel(cause, adapter->regs + A_PL_CAUSE);
+       (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+       return 1;
+}
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT  1
+
+static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
+{
+       u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+
+       if (enable)
+               val |= csum_bit;
+       else
+               val &= ~csum_bit;
+       writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
+{
+       set_csum_offload(adapter, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
+{
+       set_csum_offload(adapter, F_UDP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
+{
+       set_csum_offload(adapter, F_TCP_CSUM, enable);
+}
+
+static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
+{
+       u32 val;
+
+       val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+             F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+       val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
+              F_TP_IN_ESPI_CHECK_TCP_CSUM;
+       writel(val, adapter->regs + A_TP_IN_CONFIG);
+       writel(F_TP_OUT_CSPI_CPL |
+              F_TP_OUT_ESPI_ETHERNET |
+              F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+              F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
+              adapter->regs + A_TP_OUT_CONFIG);
+
+       val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+       val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
+       writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+
+       /*
+        * Enable pause frame deadlock prevention.
+        */
+       if (is_T2(adapter)) {
+               u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+               writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+                      V_DROP_TICKS_CNT(drop_ticks) |
+                      V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+                      adapter->regs + A_TP_TX_DROP_CONFIG);
+       }
+
+       writel(F_TP_RESET, adapter->regs + A_TP_RESET);
+}
+
+int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+                              struct adapter_params *p)
+{
+       p->chip_version = bi->chip_term;
+       if (p->chip_version == CHBT_TERM_T1 ||
+           p->chip_version == CHBT_TERM_T2) {
+               u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
+
+               val = G_TP_PC_REV(val);
+               if (val == 2)
+                       p->chip_revision = TERM_T1B;
+               else if (val == 3)
+                       p->chip_revision = TERM_T2;
+               else
+                       return -1;
+       } else
+               return -1;
+       return 0;
+}
+
+/*
+ * Enable board components other than the Chelsio chip, such as external MAC
+ * and PHY.
+ */
+static int board_init(adapter_t *adapter, const struct board_info *bi)
+{
+       switch (bi->board) {
+       case CHBT_BOARD_N110:
+       case CHBT_BOARD_N210:
+               writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
+               t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+               break;
+       }
+       return 0;
+}
+
+/*
+ * Initialize and configure the Terminator HW modules.  Note that external
+ * MAC and PHYs are initialized separately.
+ */
+int t1_init_hw_modules(adapter_t *adapter)
+{
+       int err = -EIO;
+       const struct board_info *bi = board_info(adapter);
+
+       if (!bi->clock_mc4) {
+               u32 val = readl(adapter->regs + A_MC4_CFG);
+
+               writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
+               writel(F_M_BUS_ENABLE | F_TCAM_RESET,
+                      adapter->regs + A_MC5_CONFIG);
+       }
+
+       if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
+                                         bi->espi_nports))
+               goto out_err;
+
+       t1_tp_reset(adapter, bi->clock_core);
+
+       err = t1_sge_configure(adapter->sge, &adapter->params.sge);
+       if (err)
+               goto out_err;
+
+       err = 0;
+ out_err:
+       return err;
+}
+
+/*
+ * Determine a card's PCI mode.
+ */
+static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+{
+       static unsigned short speed_map[] = { 33, 66, 100, 133 };
+       u32 pci_mode;
+
+       pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
+       p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
+       p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
+       p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
+}
+
+/*
+ * Release the structures holding the SW per-Terminator-HW-module state.
+ */
+void t1_free_sw_modules(adapter_t *adapter)
+{
+       unsigned int i;
+
+       for_each_port(adapter, i) {
+               struct cmac *mac = adapter->port[i].mac;
+               struct cphy *phy = adapter->port[i].phy;
+
+               if (mac)
+                       mac->ops->destroy(mac);
+               if (phy)
+                       phy->ops->destroy(phy);
+       }
+
+       if (adapter->sge)
+               t1_sge_destroy(adapter->sge);
+       if (adapter->espi)
+               t1_espi_destroy(adapter->espi);
+}
+
+static void __devinit init_link_config(struct link_config *lc,
+                                      const struct board_info *bi)
+{
+       lc->supported = bi->caps;
+       lc->requested_speed = lc->speed = SPEED_INVALID;
+       lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+       lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+       if (lc->supported & SUPPORTED_Autoneg) {
+               lc->advertising = lc->supported;
+               lc->autoneg = AUTONEG_ENABLE;
+               lc->requested_fc |= PAUSE_AUTONEG;
+       } else {
+               lc->advertising = 0;
+               lc->autoneg = AUTONEG_DISABLE;
+       }
+}
+
+
+/*
+ * Allocate and initialize the data structures that hold the SW state of
+ * the Terminator HW modules.
+ */
+int __devinit t1_init_sw_modules(adapter_t *adapter,
+                                const struct board_info *bi)
+{
+       unsigned int i;
+
+       adapter->params.brd_info = bi;
+       adapter->params.nports = bi->port_number;
+       adapter->params.stats_update_period = bi->gmac->stats_update_period;
+
+       adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
+       if (!adapter->sge) {
+               CH_ERR("%s: SGE initialization failed\n",
+                      adapter->name);
+               goto error;
+       }
+
+       if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
+               CH_ERR("%s: ESPI initialization failed\n",
+                      adapter->name);
+               goto error;
+       }
+
+       board_init(adapter, bi);
+       bi->mdio_ops->init(adapter, bi);
+       if (bi->gphy->reset)
+               bi->gphy->reset(adapter);
+       if (bi->gmac->reset)
+               bi->gmac->reset(adapter);
+
+       for_each_port(adapter, i) {
+               u8 hw_addr[6];
+               struct cmac *mac;
+               int phy_addr = bi->mdio_phybaseaddr + i;
+
+               adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
+                                                       bi->mdio_ops);
+               if (!adapter->port[i].phy) {
+                       CH_ERR("%s: PHY %d initialization failed\n",
+                              adapter->name, i);
+                       goto error;
+               }
+
+               adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
+               if (!mac) {
+                       CH_ERR("%s: MAC %d initialization failed\n",
+                              adapter->name, i);
+                       goto error;
+               }
+
+               /*
+                * Get the port's MAC addresses either from the EEPROM if one
+                * exists or the one hardcoded in the MAC.
+                */
+               if (vpd_macaddress_get(adapter, i, hw_addr)) {
+                       CH_ERR("%s: could not read MAC address from VPD ROM\n",
+                              adapter->port[i].dev->name);
+                       goto error;
+               }
+               memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+               init_link_config(&adapter->port[i].link_config, bi);
+       }
+
+       get_pci_mode(adapter, &adapter->params.pci);
+       t1_interrupts_clear(adapter);
+       return 0;
+
+ error:
+       t1_free_sw_modules(adapter);
+       return -1;
+}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644 (file)
index 0000000..81816c2
--- /dev/null
@@ -0,0 +1,213 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: suni1x10gexp_regs.h                                                 *
+ * $Revision: 1.9 $                                                          *
+ * $Date: 2005/06/22 00:17:04 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: PMC/SIERRA                                                       *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
+#define _CXGB_SUNI1x10GEXP_REGS_H_
+
+/******************************************************************************/
+/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP                                     **/
+/******************************************************************************/
+/* Refer to the Register Bit Masks bellow for the naming of each register and */
+/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit        */
+/******************************************************************************/
+
+#define SUNI1x10GEXP_REG_DEVICE_STATUS                                   0x0004
+#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS                         0x000D
+#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE                         0x000E
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE                    0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS                    0x0104
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_1                                   0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_3                                   0x2042
+#define SUNI1x10GEXP_REG_RXXG_INTERRUPT                                  0x2043
+#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH                           0x2045
+#define SUNI1x10GEXP_REG_RXXG_SA_15_0                                    0x2046
+#define SUNI1x10GEXP_REG_RXXG_SA_31_16                                   0x2047
+#define SUNI1x10GEXP_REG_RXXG_SA_47_32                                   0x2048
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW                     0x204D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID                     0x204E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH                    0x204F
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW                         0x206A
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW                      0x206B
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH                     0x206C
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH                        0x206D
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0                   0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2                   0x2070
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE                            0x2088
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS                            0x2089
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE                       0x208B
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS                       0x208C
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE                          0x20C7
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS                          0x20C8
+#define SUNI1x10GEXP_REG_MSTAT_CONTROL                                   0x2100
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0                        0x2101
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1                        0x2102
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2                        0x2103
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3                        0x2104
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0                          0x2105
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1                          0x2106
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2                          0x2107
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3                          0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW                             0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW                             0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW                             0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW                             0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW                             0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW                             0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW                            0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW                            0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW                            0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW                            0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW                            0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW                            0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW                            0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW                            0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW                            0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW                            0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW                            0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW                            0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW                            0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW                            0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW                            0x21BC
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE                       0x2209
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT                    0x220A
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK                           0x2282
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT                                0x2283
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS                        0x2300
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE                        0x2301
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK                          0x2302
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_1                                   0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_3                                   0x3042
+#define SUNI1x10GEXP_REG_TXXG_INTERRUPT                                  0x3043
+#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE                             0x3045
+#define SUNI1x10GEXP_REG_TXXG_SA_15_0                                    0x3047
+#define SUNI1x10GEXP_REG_TXXG_SA_31_16                                   0x3048
+#define SUNI1x10GEXP_REG_TXXG_SA_47_32                                   0x3049
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS                           0x3084
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE                           0x3085
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE                          0x30C6
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS                          0x30C7
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE                 0x320C
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION             0x320D
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK                           0x3282
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT                                0x3283
+
+/******************************************************************************/
+/*                 -- End register offset definitions --                      */
+/******************************************************************************/
+
+/******************************************************************************/
+/** SUNI-1x10GE-XP REGISTER BIT MASKS                                        **/
+/******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * Register 0x0004: S/UNI-1x10GE-XP Device Status
+ *    Bit 9 TOP_SXRA_EXPIRED
+ *    Bit 8 TOP_MDIO_BUSY
+ *    Bit 7 TOP_DTRB
+ *    Bit 6 TOP_EXPIRED
+ *    Bit 5 TOP_PAUSED
+ *    Bit 4 TOP_PL4_ID_DOOL
+ *    Bit 3 TOP_PL4_IS_DOOL
+ *    Bit 2 TOP_PL4_ID_ROOL
+ *    Bit 1 TOP_PL4_IS_ROOL
+ *    Bit 0 TOP_PL4_OUT_ROOL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED  0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED       0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL   0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL   0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL   0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL   0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000E:PM3393 Global interrupt enable
+ *    Bit 15 TOP_INTE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_INTE  0x8000
+
+/*----------------------------------------------------------------------------
+ * Register 0x2040: RXXG Configuration 1
+ *    Bit 15  RXXG_RXEN
+ *    Bit 14  RXXG_ROCF
+ *    Bit 13  RXXG_PAD_STRIP
+ *    Bit 10  RXXG_PUREP
+ *    Bit 9   RXXG_LONGP
+ *    Bit 8   RXXG_PARF
+ *    Bit 7   RXXG_FLCHK
+ *    Bit 5   RXXG_PASS_CTRL
+ *    Bit 3   RXXG_CRC_STRIP
+ *    Bit 2-0 RXXG_MIFG
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_RXEN       0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_PUREP      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK      0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP  0x0008
+
+/*----------------------------------------------------------------------------
+ * Register 0x2070: RXXG Address Filter Control 2
+ *    Bit 1 RXXG_PMODE
+ *    Bit 0 RXXG_MHASH_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_PMODE     0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2100: MSTAT Control
+ *    Bit 2 MSTAT_WRITE
+ *    Bit 1 MSTAT_CLEAR
+ *    Bit 0 MSTAT_SNAP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR  0x0002
+#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3040: TXXG Configuration Register 1
+ *    Bit 15   TXXG_TXEN0
+ *    Bit 13   TXXG_HOSTPAUSE
+ *    Bit 12-7 TXXG_IPGT
+ *    Bit 5    TXXG_32BIT_ALIGN
+ *    Bit 4    TXXG_CRCEN
+ *    Bit 3    TXXG_FCTX
+ *    Bit 2    TXXG_FCRX
+ *    Bit 1    TXXG_PADEN
+ *    Bit 0    TXXG_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0        0x8000
+#define SUNI1x10GEXP_BITOFF_TXXG_IPGT         7
+#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN  0x0020
+#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN        0x0010
+#define SUNI1x10GEXP_BITMSK_TXXG_FCTX         0x0008
+#define SUNI1x10GEXP_BITMSK_TXXG_FCRX         0x0004
+#define SUNI1x10GEXP_BITMSK_TXXG_PADEN        0x0002
+
+#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
+
index d0fa2448761d0fdb1dcebae2b23de9b9c23d0b25..25cc20e415dae23247825fe947b4cb19725ee0bf 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
 
 #define DRV_NAME               "e100"
 #define DRV_EXT                "-NAPI"
-#define DRV_VERSION            "3.4.8-k2"DRV_EXT
+#define DRV_VERSION            "3.4.14-k2"DRV_EXT
 #define DRV_DESCRIPTION                "Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT          "Copyright(c) 1999-2005 Intel Corporation"
 #define PFX                    DRV_NAME ": "
@@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
 }
 
 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
+#define E100_WAIT_SCB_FAST 20       /* delay like the old code */
 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
 {
        unsigned long flags;
@@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
                if(likely(!readb(&nic->csr->scb.cmd_lo)))
                        break;
                cpu_relax();
-               if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1)))
+               if(unlikely(i > E100_WAIT_SCB_FAST))
                        udelay(5);
        }
        if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
 
 static void e100_get_defaults(struct nic *nic)
 {
-       struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
-       struct param_range cbs  = { .min = 64, .max = 256, .count = 64 };
+       struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
+       struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
 
        pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
        /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
                c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
 }
 
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 8                      */
+/********************************************************/
+
+/*  Parameter values for the D101M B-step  */
+#define D101M_CPUSAVER_TIMER_DWORD             78
+#define D101M_CPUSAVER_BUNDLE_DWORD            65
+#define D101M_CPUSAVER_MIN_SIZE_DWORD          126
+
+#define D101M_B_RCVBUNDLE_UCODE \
+{\
+0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
+0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
+0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
+0x00380438, 0x00000000, 0x00140000, 0x00380555, \
+0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
+0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
+0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
+0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
+0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
+0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
+0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
+0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
+0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
+0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
+0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
+0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
+0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
+0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
+0x00380559, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
+0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
+0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
+}
+
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 9                      */
+/********************************************************/
+
+/*  Parameter values for the D101S  */
+#define D101S_CPUSAVER_TIMER_DWORD             78
+#define D101S_CPUSAVER_BUNDLE_DWORD            67
+#define D101S_CPUSAVER_MIN_SIZE_DWORD          128
+
+#define D101S_RCVBUNDLE_UCODE \
+{\
+0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
+0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
+0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
+0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
+0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
+0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
+0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
+0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
+0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
+0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
+0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
+0x00101313, 0x00380700, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
+0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
+0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
+0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
+0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
+0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
+0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
+0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
+0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00130831, \
+0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
+0x00041000, 0x00010004, 0x00380700  \
+}
+
+/********************************************************/
+/*  Micro code for the 8086:1229 Rev F/10               */
+/********************************************************/
+
+/*  Parameter values for the D102 E-step  */
+#define D102_E_CPUSAVER_TIMER_DWORD            42
+#define D102_E_CPUSAVER_BUNDLE_DWORD           54
+#define D102_E_CPUSAVER_MIN_SIZE_DWORD         46
+
+#define     D102_E_RCVBUNDLE_UCODE \
+{\
+0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
+0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
+0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
+0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
+0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
+0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
+0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+}
+
 static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
-       int i;
-       static const u32 ucode[UCODE_SIZE] = {
-               /* NFS packets are misinterpreted as TCO packets and
-                * incorrectly routed to the BMC over SMBus.  This
-                * microcode patch checks the fragmented IP bit in the
-                * NFS/UDP header to distinguish between NFS and TCO. */
-               0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
-               0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
-               0x00906EFD, 0x00900EFD, 0x00E00EF8,
-       };
+/* *INDENT-OFF* */
+       static struct {
+               u32 ucode[UCODE_SIZE + 1];
+               u8 mac;
+               u8 timer_dword;
+               u8 bundle_dword;
+               u8 min_size_dword;
+       } ucode_opts[] = {
+               { D101M_B_RCVBUNDLE_UCODE,
+                 mac_82559_D101M,
+                 D101M_CPUSAVER_TIMER_DWORD,
+                 D101M_CPUSAVER_BUNDLE_DWORD,
+                 D101M_CPUSAVER_MIN_SIZE_DWORD },
+               { D101S_RCVBUNDLE_UCODE,
+                 mac_82559_D101S,
+                 D101S_CPUSAVER_TIMER_DWORD,
+                 D101S_CPUSAVER_BUNDLE_DWORD,
+                 D101S_CPUSAVER_MIN_SIZE_DWORD },
+               { D102_E_RCVBUNDLE_UCODE,
+                 mac_82551_F,
+                 D102_E_CPUSAVER_TIMER_DWORD,
+                 D102_E_CPUSAVER_BUNDLE_DWORD,
+                 D102_E_CPUSAVER_MIN_SIZE_DWORD },
+               { D102_E_RCVBUNDLE_UCODE,
+                 mac_82551_10,
+                 D102_E_CPUSAVER_TIMER_DWORD,
+                 D102_E_CPUSAVER_BUNDLE_DWORD,
+                 D102_E_CPUSAVER_MIN_SIZE_DWORD },
+               { {0}, 0, 0, 0, 0}
+       }, *opts;
+/* *INDENT-ON* */
+
+#define BUNDLESMALL 1
+#define BUNDLEMAX 50
+#define INTDELAY 15000
+
+       opts = ucode_opts;
+
+       /* do not load u-code for ICH devices */
+       if (nic->flags & ich)
+               return;
+
+       /* Search for ucode match against h/w rev_id */
+       while (opts->mac) {
+               if (nic->mac == opts->mac) {
+                       int i;
+                       u32 *ucode = opts->ucode;
+
+                       /* Insert user-tunable settings */
+                       ucode[opts->timer_dword] &= 0xFFFF0000;
+                       ucode[opts->timer_dword] |=
+                               (u16) INTDELAY;
+                       ucode[opts->bundle_dword] &= 0xFFFF0000;
+                       ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
+                       ucode[opts->min_size_dword] &= 0xFFFF0000;
+                       ucode[opts->min_size_dword] |=
+                               (BUNDLESMALL) ?  0xFFFF : 0xFF80;
+
+                       for(i = 0; i < UCODE_SIZE; i++)
+                               cb->u.ucode[i] = cpu_to_le32(ucode[i]);
+                       cb->command = cpu_to_le16(cb_ucode);
+                       return;
+               }
+               opts++;
+       }
 
-       if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
-               for(i = 0; i < UCODE_SIZE; i++)
-                       cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-               cb->command = cpu_to_le16(cb_ucode);
-       } else
-               cb->command = cpu_to_le16(cb_nop);
+       cb->command = cpu_to_le16(cb_nop);
 }
 
 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
 {
        cb->command = nic->tx_command;
        /* interrupt every 16 packets regardless of delay */
-       if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
+       if((nic->cbs_avail & ~15) == nic->cbs_avail)
+               cb->command |= cpu_to_le16(cb_i);
        cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
        cb->u.tcb.tcb_byte_count = 0;
        cb->u.tcb.threshold = nic->tx_threshold;
        cb->u.tcb.tbd_count = 1;
        cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
                skb->data, skb->len, PCI_DMA_TODEVICE));
-       // check for mapping failure?
+       /* check for mapping failure? */
        cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
 }
 
@@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
                /* Don't indicate if hardware indicates errors */
                nic->net_stats.rx_dropped++;
                dev_kfree_skb_any(skb);
-       } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) {
+       } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
                /* Don't indicate oversized frames */
                nic->rx_over_length_errors++;
                nic->net_stats.rx_dropped++;
@@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget)
 static void e100_netpoll(struct net_device *netdev)
 {
        struct nic *nic = netdev_priv(netdev);
+
        e100_disable_irq(nic);
        e100_intr(nic->pdev->irq, netdev, NULL);
        e100_tx_clean(nic);
@@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev,
        }
        for(i = 0; i < E100_TEST_LEN; i++)
                test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
+
+       msleep_interruptible(4 * 1000);
 }
 
 static int e100_phys_id(struct net_device *netdev, u32 data)
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644 (file)
index 0000000..bf3440a
--- /dev/null
@@ -0,0 +1,1843 @@
+/*
+   sis190.c: Silicon Integrated Systems SiS190 ethernet driver
+
+   Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
+   Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
+   Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
+
+   Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
+   genuine driver.
+
+   This software may be used and distributed according to the terms of
+   the GNU General Public License (GPL), incorporated herein by reference.
+   Drivers based on or derived from this code fall under the GPL and must
+   retain the authorship, copyright and license notice.  This file is not
+   a complete program and may only be used when the entire operating
+   system is licensed under the GPL.
+
+   See the file COPYING in this distribution for more information.
+
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <asm/irq.h>
+
+#define net_drv(p, arg...)     if (netif_msg_drv(p)) \
+                                       printk(arg)
+#define net_probe(p, arg...)   if (netif_msg_probe(p)) \
+                                       printk(arg)
+#define net_link(p, arg...)    if (netif_msg_link(p)) \
+                                       printk(arg)
+#define net_intr(p, arg...)    if (netif_msg_intr(p)) \
+                                       printk(arg)
+#define net_tx_err(p, arg...)  if (netif_msg_tx_err(p)) \
+                                       printk(arg)
+
+#define PHY_MAX_ADDR           32
+#define PHY_ID_ANY             0x1f
+#define MII_REG_ANY            0x1f
+
+#ifdef CONFIG_SIS190_NAPI
+#define NAPI_SUFFIX    "-NAPI"
+#else
+#define NAPI_SUFFIX    ""
+#endif
+
+#define DRV_VERSION            "1.2" NAPI_SUFFIX
+#define DRV_NAME               "sis190"
+#define SIS190_DRIVER_NAME     DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+#ifdef CONFIG_SIS190_NAPI
+#define sis190_rx_skb                  netif_receive_skb
+#define sis190_rx_quota(count, quota)  min(count, quota)
+#else
+#define sis190_rx_skb                  netif_rx
+#define sis190_rx_quota(count, quota)  count
+#endif
+
+#define MAC_ADDR_LEN           6
+
+#define NUM_TX_DESC            64      /* [8..1024] */
+#define NUM_RX_DESC            64      /* [8..8192] */
+#define TX_RING_BYTES          (NUM_TX_DESC * sizeof(struct TxDesc))
+#define RX_RING_BYTES          (NUM_RX_DESC * sizeof(struct RxDesc))
+#define RX_BUF_SIZE            1536
+#define RX_BUF_MASK            0xfff8
+
+#define SIS190_REGS_SIZE       0x80
+#define SIS190_TX_TIMEOUT      (6*HZ)
+#define SIS190_PHY_TIMEOUT     (10*HZ)
+#define SIS190_MSG_DEFAULT     (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+                                NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+                                NETIF_MSG_IFDOWN)
+
+/* Enhanced PHY access register bit definitions */
+#define EhnMIIread             0x0000
+#define EhnMIIwrite            0x0020
+#define EhnMIIdataShift                16
+#define EhnMIIpmdShift         6       /* 7016 only */
+#define EhnMIIregShift         11
+#define EhnMIIreq              0x0010
+#define EhnMIInotDone          0x0010
+
+/* Write/read MMIO register */
+#define SIS_W8(reg, val)       writeb ((val), ioaddr + (reg))
+#define SIS_W16(reg, val)      writew ((val), ioaddr + (reg))
+#define SIS_W32(reg, val)      writel ((val), ioaddr + (reg))
+#define SIS_R8(reg)            readb (ioaddr + (reg))
+#define SIS_R16(reg)           readw (ioaddr + (reg))
+#define SIS_R32(reg)           readl (ioaddr + (reg))
+
+#define SIS_PCI_COMMIT()       SIS_R32(IntrControl)
+
+enum sis190_registers {
+       TxControl               = 0x00,
+       TxDescStartAddr         = 0x04,
+       rsv0                    = 0x08, // reserved
+       TxSts                   = 0x0c, // unused (Control/Status)
+       RxControl               = 0x10,
+       RxDescStartAddr         = 0x14,
+       rsv1                    = 0x18, // reserved
+       RxSts                   = 0x1c, // unused
+       IntrStatus              = 0x20,
+       IntrMask                = 0x24,
+       IntrControl             = 0x28,
+       IntrTimer               = 0x2c, // unused (Interupt Timer)
+       PMControl               = 0x30, // unused (Power Mgmt Control/Status)
+       rsv2                    = 0x34, // reserved
+       ROMControl              = 0x38,
+       ROMInterface            = 0x3c,
+       StationControl          = 0x40,
+       GMIIControl             = 0x44,
+       GIoCR                   = 0x48, // unused (GMAC IO Compensation)
+       GIoCtrl                 = 0x4c, // unused (GMAC IO Control)
+       TxMacControl            = 0x50,
+       TxLimit                 = 0x54, // unused (Tx MAC Timer/TryLimit)
+       RGDelay                 = 0x58, // unused (RGMII Tx Internal Delay)
+       rsv3                    = 0x5c, // reserved
+       RxMacControl            = 0x60,
+       RxMacAddr               = 0x62,
+       RxHashTable             = 0x68,
+       // Undocumented         = 0x6c,
+       RxWolCtrl               = 0x70,
+       RxWolData               = 0x74, // unused (Rx WOL Data Access)
+       RxMPSControl            = 0x78, // unused (Rx MPS Control)
+       rsv4                    = 0x7c, // reserved
+};
+
+enum sis190_register_content {
+       /* IntrStatus */
+       SoftInt                 = 0x40000000,   // unused
+       Timeup                  = 0x20000000,   // unused
+       PauseFrame              = 0x00080000,   // unused
+       MagicPacket             = 0x00040000,   // unused
+       WakeupFrame             = 0x00020000,   // unused
+       LinkChange              = 0x00010000,
+       RxQEmpty                = 0x00000080,
+       RxQInt                  = 0x00000040,
+       TxQ1Empty               = 0x00000020,   // unused
+       TxQ1Int                 = 0x00000010,
+       TxQ0Empty               = 0x00000008,   // unused
+       TxQ0Int                 = 0x00000004,
+       RxHalt                  = 0x00000002,
+       TxHalt                  = 0x00000001,
+
+       /* {Rx/Tx}CmdBits */
+       CmdReset                = 0x10,
+       CmdRxEnb                = 0x08,         // unused
+       CmdTxEnb                = 0x01,
+       RxBufEmpty              = 0x01,         // unused
+
+       /* Cfg9346Bits */
+       Cfg9346_Lock            = 0x00,         // unused
+       Cfg9346_Unlock          = 0xc0,         // unused
+
+       /* RxMacControl */
+       AcceptErr               = 0x20,         // unused
+       AcceptRunt              = 0x10,         // unused
+       AcceptBroadcast         = 0x0800,
+       AcceptMulticast         = 0x0400,
+       AcceptMyPhys            = 0x0200,
+       AcceptAllPhys           = 0x0100,
+
+       /* RxConfigBits */
+       RxCfgFIFOShift          = 13,
+       RxCfgDMAShift           = 8,            // 0x1a in RxControl ?
+
+       /* TxConfigBits */
+       TxInterFrameGapShift    = 24,
+       TxDMAShift              = 8, /* DMA burst value (0-7) is shift this many bits */
+
+       /* StationControl */
+       _1000bpsF               = 0x1c00,
+       _1000bpsH               = 0x0c00,
+       _100bpsF                = 0x1800,
+       _100bpsH                = 0x0800,
+       _10bpsF                 = 0x1400,
+       _10bpsH                 = 0x0400,
+
+       LinkStatus              = 0x02,         // unused
+       FullDup                 = 0x01,         // unused
+
+       /* TBICSRBit */
+       TBILinkOK               = 0x02000000,   // unused
+};
+
+struct TxDesc {
+       __le32 PSize;
+       __le32 status;
+       __le32 addr;
+       __le32 size;
+};
+
+struct RxDesc {
+       __le32 PSize;
+       __le32 status;
+       __le32 addr;
+       __le32 size;
+};
+
+enum _DescStatusBit {
+       /* _Desc.status */
+       OWNbit          = 0x80000000, // RXOWN/TXOWN
+       INTbit          = 0x40000000, // RXINT/TXINT
+       CRCbit          = 0x00020000, // CRCOFF/CRCEN
+       PADbit          = 0x00010000, // PREADD/PADEN
+       /* _Desc.size */
+       RingEnd         = 0x80000000,
+       /* TxDesc.status */
+       LSEN            = 0x08000000, // TSO ? -- FR
+       IPCS            = 0x04000000,
+       TCPCS           = 0x02000000,
+       UDPCS           = 0x01000000,
+       BSTEN           = 0x00800000,
+       EXTEN           = 0x00400000,
+       DEFEN           = 0x00200000,
+       BKFEN           = 0x00100000,
+       CRSEN           = 0x00080000,
+       COLEN           = 0x00040000,
+       THOL3           = 0x30000000,
+       THOL2           = 0x20000000,
+       THOL1           = 0x10000000,
+       THOL0           = 0x00000000,
+       /* RxDesc.status */
+       IPON            = 0x20000000,
+       TCPON           = 0x10000000,
+       UDPON           = 0x08000000,
+       Wakup           = 0x00400000,
+       Magic           = 0x00200000,
+       Pause           = 0x00100000,
+       DEFbit          = 0x00200000,
+       BCAST           = 0x000c0000,
+       MCAST           = 0x00080000,
+       UCAST           = 0x00040000,
+       /* RxDesc.PSize */
+       TAGON           = 0x80000000,
+       RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
+       ABORT           = 0x00800000,
+       SHORT           = 0x00400000,
+       LIMIT           = 0x00200000,
+       MIIER           = 0x00100000,
+       OVRUN           = 0x00080000,
+       NIBON           = 0x00040000,
+       COLON           = 0x00020000,
+       CRCOK           = 0x00010000,
+       RxSizeMask      = 0x0000ffff
+       /*
+        * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
+        * provide two (unused with Linux) Tx queues. No publically
+        * available documentation alas.
+        */
+};
+
+enum sis190_eeprom_access_register_bits {
+       EECS    = 0x00000001,   // unused
+       EECLK   = 0x00000002,   // unused
+       EEDO    = 0x00000008,   // unused
+       EEDI    = 0x00000004,   // unused
+       EEREQ   = 0x00000080,
+       EEROP   = 0x00000200,
+       EEWOP   = 0x00000100    // unused
+};
+
+/* EEPROM Addresses */
+enum sis190_eeprom_address {
+       EEPROMSignature = 0x00,
+       EEPROMCLK       = 0x01, // unused
+       EEPROMInfo      = 0x02,
+       EEPROMMACAddr   = 0x03
+};
+
+struct sis190_private {
+       void __iomem *mmio_addr;
+       struct pci_dev *pci_dev;
+       struct net_device_stats stats;
+       spinlock_t lock;
+       u32 rx_buf_sz;
+       u32 cur_rx;
+       u32 cur_tx;
+       u32 dirty_rx;
+       u32 dirty_tx;
+       dma_addr_t rx_dma;
+       dma_addr_t tx_dma;
+       struct RxDesc *RxDescRing;
+       struct TxDesc *TxDescRing;
+       struct sk_buff *Rx_skbuff[NUM_RX_DESC];
+       struct sk_buff *Tx_skbuff[NUM_TX_DESC];
+       struct work_struct phy_task;
+       struct timer_list timer;
+       u32 msg_enable;
+       struct mii_if_info mii_if;
+       struct list_head first_phy;
+};
+
+struct sis190_phy {
+       struct list_head list;
+       int phy_id;
+       u16 id[2];
+       u16 status;
+       u8  type;
+};
+
+enum sis190_phy_type {
+       UNKNOWN = 0x00,
+       HOME    = 0x01,
+       LAN     = 0x02,
+       MIX     = 0x03
+};
+
+static struct mii_chip_info {
+        const char *name;
+        u16 id[2];
+        unsigned int type;
+} mii_chip_table[] = {
+       { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN },
+       { "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN },
+       { "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN },
+       { "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN },
+       { NULL, }
+};
+
+const static struct {
+       const char *name;
+       u8 version;             /* depend on docs */
+       u32 RxConfigMask;       /* clear the bits supported by this chip */
+} sis_chip_info[] = {
+       { DRV_NAME, 0x00, 0xff7e1880, },
+};
+
+static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+       { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
+       { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
+
+static int rx_copybreak = 200;
+
+static struct {
+       u32 msg_enable;
+} debug = { -1 };
+
+MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+static const u32 sis190_intr_mask =
+       RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
+
+/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * The chips use a 64 element hash table based on the Ethernet CRC.
+ */
+static int multicast_filter_limit = 32;
+
+static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
+{
+       unsigned int i;
+
+       SIS_W32(GMIIControl, ctl);
+
+       msleep(1);
+
+       for (i = 0; i < 100; i++) {
+               if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
+                       break;
+               msleep(1);
+       }
+
+       if (i > 999)
+               printk(KERN_ERR PFX "PHY command failed !\n");
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
+{
+       __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
+               (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
+               (((u32) val) << EhnMIIdataShift));
+}
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
+{
+       __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
+               (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
+
+       return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
+}
+
+static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       mdio_write(tp->mmio_addr, phy_id, reg, val);
+}
+
+static int __mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return mdio_read(tp->mmio_addr, phy_id, reg);
+}
+
+static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
+{
+       mdio_read(ioaddr, phy_id, reg);
+       return mdio_read(ioaddr, phy_id, reg);
+}
+
+static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
+{
+       u16 data = 0xffff;
+       unsigned int i;
+
+       if (!(SIS_R32(ROMControl) & 0x0002))
+               return 0;
+
+       SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
+
+       for (i = 0; i < 200; i++) {
+               if (!(SIS_R32(ROMInterface) & EEREQ)) {
+                       data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
+                       break;
+               }
+               msleep(1);
+       }
+
+       return data;
+}
+
+static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
+{
+       SIS_W32(IntrMask, 0x00);
+       SIS_W32(IntrStatus, 0xffffffff);
+       SIS_PCI_COMMIT();
+}
+
+static void sis190_asic_down(void __iomem *ioaddr)
+{
+       /* Stop the chip's Tx and Rx DMA processes. */
+
+       SIS_W32(TxControl, 0x1a00);
+       SIS_W32(RxControl, 0x1a00);
+
+       sis190_irq_mask_and_ack(ioaddr);
+}
+
+static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
+{
+       desc->size |= cpu_to_le32(RingEnd);
+}
+
+static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+       u32 eor = le32_to_cpu(desc->size) & RingEnd;
+
+       desc->PSize = 0x0;
+       desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
+       wmb();
+       desc->status = cpu_to_le32(OWNbit | INTbit);
+}
+
+static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+                                     u32 rx_buf_sz)
+{
+       desc->addr = cpu_to_le32(mapping);
+       sis190_give_to_asic(desc, rx_buf_sz);
+}
+
+static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
+{
+       desc->PSize = 0x0;
+       desc->addr = 0xdeadbeef;
+       desc->size &= cpu_to_le32(RingEnd);
+       wmb();
+       desc->status = 0x0;
+}
+
+static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+                              struct RxDesc *desc, u32 rx_buf_sz)
+{
+       struct sk_buff *skb;
+       dma_addr_t mapping;
+       int ret = 0;
+
+       skb = dev_alloc_skb(rx_buf_sz);
+       if (!skb)
+               goto err_out;
+
+       *sk_buff = skb;
+
+       mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+                                PCI_DMA_FROMDEVICE);
+
+       sis190_map_to_asic(desc, mapping, rx_buf_sz);
+out:
+       return ret;
+
+err_out:
+       ret = -ENOMEM;
+       sis190_make_unusable_by_asic(desc);
+       goto out;
+}
+
+static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
+                         u32 start, u32 end)
+{
+       u32 cur;
+
+       for (cur = start; cur < end; cur++) {
+               int ret, i = cur % NUM_RX_DESC;
+
+               if (tp->Rx_skbuff[i])
+                       continue;
+
+               ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+                                         tp->RxDescRing + i, tp->rx_buf_sz);
+               if (ret < 0)
+                       break;
+       }
+       return cur - start;
+}
+
+static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+                                    struct RxDesc *desc, int rx_buf_sz)
+{
+       int ret = -1;
+
+       if (pkt_size < rx_copybreak) {
+               struct sk_buff *skb;
+
+               skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+               if (skb) {
+                       skb_reserve(skb, NET_IP_ALIGN);
+                       eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+                       *sk_buff = skb;
+                       sis190_give_to_asic(desc, rx_buf_sz);
+                       ret = 0;
+               }
+       }
+       return ret;
+}
+
+static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define ErrMask        (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
+
+       if ((status & CRCOK) && !(status & ErrMask))
+               return 0;
+
+       if (!(status & CRCOK))
+               stats->rx_crc_errors++;
+       else if (status & OVRUN)
+               stats->rx_over_errors++;
+       else if (status & (SHORT | LIMIT))
+               stats->rx_length_errors++;
+       else if (status & (MIIER | NIBON | COLON))
+               stats->rx_frame_errors++;
+
+       stats->rx_errors++;
+       return -1;
+}
+
+static int sis190_rx_interrupt(struct net_device *dev,
+                              struct sis190_private *tp, void __iomem *ioaddr)
+{
+       struct net_device_stats *stats = &tp->stats;
+       u32 rx_left, cur_rx = tp->cur_rx;
+       u32 delta, count;
+
+       rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+       rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
+
+       for (; rx_left > 0; rx_left--, cur_rx++) {
+               unsigned int entry = cur_rx % NUM_RX_DESC;
+               struct RxDesc *desc = tp->RxDescRing + entry;
+               u32 status;
+
+               if (desc->status & OWNbit)
+                       break;
+
+               status = le32_to_cpu(desc->PSize);
+
+               // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
+               //       status);
+
+               if (sis190_rx_pkt_err(status, stats) < 0)
+                       sis190_give_to_asic(desc, tp->rx_buf_sz);
+               else {
+                       struct sk_buff *skb = tp->Rx_skbuff[entry];
+                       int pkt_size = (status & RxSizeMask) - 4;
+                       void (*pci_action)(struct pci_dev *, dma_addr_t,
+                               size_t, int) = pci_dma_sync_single_for_device;
+
+                       if (unlikely(pkt_size > tp->rx_buf_sz)) {
+                               net_intr(tp, KERN_INFO
+                                        "%s: (frag) status = %08x.\n",
+                                        dev->name, status);
+                               stats->rx_dropped++;
+                               stats->rx_length_errors++;
+                               sis190_give_to_asic(desc, tp->rx_buf_sz);
+                               continue;
+                       }
+
+                       pci_dma_sync_single_for_cpu(tp->pci_dev,
+                               le32_to_cpu(desc->addr), tp->rx_buf_sz,
+                               PCI_DMA_FROMDEVICE);
+
+                       if (sis190_try_rx_copy(&skb, pkt_size, desc,
+                                              tp->rx_buf_sz)) {
+                               pci_action = pci_unmap_single;
+                               tp->Rx_skbuff[entry] = NULL;
+                               sis190_make_unusable_by_asic(desc);
+                       }
+
+                       pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
+                                  tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+                       skb->dev = dev;
+                       skb_put(skb, pkt_size);
+                       skb->protocol = eth_type_trans(skb, dev);
+
+                       sis190_rx_skb(skb);
+
+                       dev->last_rx = jiffies;
+                       stats->rx_packets++;
+                       stats->rx_bytes += pkt_size;
+                       if ((status & BCAST) == MCAST)
+                               stats->multicast++;
+               }
+       }
+       count = cur_rx - tp->cur_rx;
+       tp->cur_rx = cur_rx;
+
+       delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+       if (!delta && count && netif_msg_intr(tp))
+               printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+       tp->dirty_rx += delta;
+
+       if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
+               printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+
+       return count;
+}
+
+static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+                               struct TxDesc *desc)
+{
+       unsigned int len;
+
+       len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+       pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+
+       memset(desc, 0x00, sizeof(*desc));
+}
+
+static void sis190_tx_interrupt(struct net_device *dev,
+                               struct sis190_private *tp, void __iomem *ioaddr)
+{
+       u32 pending, dirty_tx = tp->dirty_tx;
+       /*
+        * It would not be needed if queueing was allowed to be enabled
+        * again too early (hint: think preempt and unclocked smp systems).
+        */
+       unsigned int queue_stopped;
+
+       smp_rmb();
+       pending = tp->cur_tx - dirty_tx;
+       queue_stopped = (pending == NUM_TX_DESC);
+
+       for (; pending; pending--, dirty_tx++) {
+               unsigned int entry = dirty_tx % NUM_TX_DESC;
+               struct TxDesc *txd = tp->TxDescRing + entry;
+               struct sk_buff *skb;
+
+               if (le32_to_cpu(txd->status) & OWNbit)
+                       break;
+
+               skb = tp->Tx_skbuff[entry];
+
+               tp->stats.tx_packets++;
+               tp->stats.tx_bytes += skb->len;
+
+               sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
+               tp->Tx_skbuff[entry] = NULL;
+               dev_kfree_skb_irq(skb);
+       }
+
+       if (tp->dirty_tx != dirty_tx) {
+               tp->dirty_tx = dirty_tx;
+               smp_wmb();
+               if (queue_stopped)
+                       netif_wake_queue(dev);
+       }
+}
+
+/*
+ * The interrupt handler does all of the Rx thread work and cleans up after
+ * the Tx thread.
+ */
+static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
+{
+       struct net_device *dev = __dev;
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       unsigned int handled = 0;
+       u32 status;
+
+       status = SIS_R32(IntrStatus);
+
+       if ((status == 0xffffffff) || !status)
+               goto out;
+
+       handled = 1;
+
+       if (unlikely(!netif_running(dev))) {
+               sis190_asic_down(ioaddr);
+               goto out;
+       }
+
+       SIS_W32(IntrStatus, status);
+
+       // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+
+       if (status & LinkChange) {
+               net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+               schedule_work(&tp->phy_task);
+       }
+
+       if (status & RxQInt)
+               sis190_rx_interrupt(dev, tp, ioaddr);
+
+       if (status & TxQ0Int)
+               sis190_tx_interrupt(dev, tp, ioaddr);
+out:
+       return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis190_netpoll(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct pci_dev *pdev = tp->pci_dev;
+
+       disable_irq(pdev->irq);
+       sis190_interrupt(pdev->irq, dev, NULL);
+       enable_irq(pdev->irq);
+}
+#endif
+
+static void sis190_free_rx_skb(struct sis190_private *tp,
+                              struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+       struct pci_dev *pdev = tp->pci_dev;
+
+       pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+                        PCI_DMA_FROMDEVICE);
+       dev_kfree_skb(*sk_buff);
+       *sk_buff = NULL;
+       sis190_make_unusable_by_asic(desc);
+}
+
+static void sis190_rx_clear(struct sis190_private *tp)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_RX_DESC; i++) {
+               if (!tp->Rx_skbuff[i])
+                       continue;
+               sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
+       }
+}
+
+static void sis190_init_ring_indexes(struct sis190_private *tp)
+{
+       tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static int sis190_init_ring(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       sis190_init_ring_indexes(tp);
+
+       memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+       memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+       if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+               goto err_rx_clear;
+
+       sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
+
+       return 0;
+
+err_rx_clear:
+       sis190_rx_clear(tp);
+       return -ENOMEM;
+}
+
+static void sis190_set_rx_mode(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       unsigned long flags;
+       u32 mc_filter[2];       /* Multicast hash filter */
+       u16 rx_mode;
+
+       if (dev->flags & IFF_PROMISC) {
+               /* Unconditionally log net taps. */
+               net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+                       dev->name);
+               rx_mode =
+                       AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+                       AcceptAllPhys;
+               mc_filter[1] = mc_filter[0] = 0xffffffff;
+       } else if ((dev->mc_count > multicast_filter_limit) ||
+                  (dev->flags & IFF_ALLMULTI)) {
+               /* Too many to filter perfectly -- accept all multicasts. */
+               rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+               mc_filter[1] = mc_filter[0] = 0xffffffff;
+       } else {
+               struct dev_mc_list *mclist;
+               unsigned int i;
+
+               rx_mode = AcceptBroadcast | AcceptMyPhys;
+               mc_filter[1] = mc_filter[0] = 0;
+               for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+                    i++, mclist = mclist->next) {
+                       int bit_nr =
+                               ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+                       mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+                       rx_mode |= AcceptMulticast;
+               }
+       }
+
+       spin_lock_irqsave(&tp->lock, flags);
+
+       SIS_W16(RxMacControl, rx_mode | 0x2);
+       SIS_W32(RxHashTable, mc_filter[0]);
+       SIS_W32(RxHashTable + 4, mc_filter[1]);
+
+       spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void sis190_soft_reset(void __iomem *ioaddr)
+{
+       SIS_W32(IntrControl, 0x8000);
+       SIS_PCI_COMMIT();
+       msleep(1);
+       SIS_W32(IntrControl, 0x0);
+       sis190_asic_down(ioaddr);
+       msleep(1);
+}
+
+static void sis190_hw_start(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       sis190_soft_reset(ioaddr);
+
+       SIS_W32(TxDescStartAddr, tp->tx_dma);
+       SIS_W32(RxDescStartAddr, tp->rx_dma);
+
+       SIS_W32(IntrStatus, 0xffffffff);
+       SIS_W32(IntrMask, 0x0);
+       /*
+        * Default is 100Mbps.
+        * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
+        */
+       SIS_W16(StationControl, 0x1901);
+       SIS_W32(GMIIControl, 0x0);
+       SIS_W32(TxMacControl, 0x60);
+       SIS_W16(RxMacControl, 0x02);
+       SIS_W32(RxHashTable, 0x0);
+       SIS_W32(0x6c, 0x0);
+       SIS_W32(RxWolCtrl, 0x0);
+       SIS_W32(RxWolData, 0x0);
+
+       SIS_PCI_COMMIT();
+
+       sis190_set_rx_mode(dev);
+
+       /* Enable all known interrupts by setting the interrupt mask. */
+       SIS_W32(IntrMask, sis190_intr_mask);
+
+       SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
+       SIS_W32(RxControl, 0x1a1d);
+
+       netif_start_queue(dev);
+}
+
+static void sis190_phy_task(void * data)
+{
+       struct net_device *dev = data;
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       int phy_id = tp->mii_if.phy_id;
+       u16 val;
+
+       rtnl_lock();
+
+       val = mdio_read(ioaddr, phy_id, MII_BMCR);
+       if (val & BMCR_RESET) {
+               // FIXME: needlessly high ?  -- FR 02/07/2005
+               mod_timer(&tp->timer, jiffies + HZ/10);
+       } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
+                    BMSR_ANEGCOMPLETE)) {
+               net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
+                        dev->name);
+               mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
+               mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
+       } else {
+               /* Rejoice ! */
+               struct {
+                       int val;
+                       const char *msg;
+                       u16 ctl;
+               } reg31[] = {
+                       { LPA_1000XFULL | LPA_SLCT,
+                               "1000 Mbps Full Duplex",
+                               0x01 | _1000bpsF },
+                       { LPA_1000XHALF | LPA_SLCT,
+                               "1000 Mbps Half Duplex",
+                               0x01 | _1000bpsH },
+                       { LPA_100FULL,
+                               "100 Mbps Full Duplex",
+                               0x01 | _100bpsF },
+                       { LPA_100HALF,
+                               "100 Mbps Half Duplex",
+                               0x01 | _100bpsH },
+                       { LPA_10FULL,
+                               "10 Mbps Full Duplex",
+                               0x01 | _10bpsF },
+                       { LPA_10HALF,
+                               "10 Mbps Half Duplex",
+                               0x01 | _10bpsH },
+                       { 0, "unknown", 0x0000 }
+               }, *p;
+               u16 adv;
+
+               val = mdio_read(ioaddr, phy_id, 0x1f);
+               net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+
+               val = mdio_read(ioaddr, phy_id, MII_LPA);
+               adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+               net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
+                        dev->name, val, adv);
+
+               val &= adv;
+
+               for (p = reg31; p->ctl; p++) {
+                       if ((val & p->val) == p->val)
+                               break;
+               }
+               if (p->ctl)
+                       SIS_W16(StationControl, p->ctl);
+               net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
+                        p->msg);
+               netif_carrier_on(dev);
+       }
+
+       rtnl_unlock();
+}
+
+static void sis190_phy_timer(unsigned long __opaque)
+{
+       struct net_device *dev = (struct net_device *)__opaque;
+       struct sis190_private *tp = netdev_priv(dev);
+
+       if (likely(netif_running(dev)))
+               schedule_work(&tp->phy_task);
+}
+
+static inline void sis190_delete_timer(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       del_timer_sync(&tp->timer);
+}
+
+static inline void sis190_request_timer(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct timer_list *timer = &tp->timer;
+
+       init_timer(timer);
+       timer->expires = jiffies + SIS190_PHY_TIMEOUT;
+       timer->data = (unsigned long)dev;
+       timer->function = sis190_phy_timer;
+       add_timer(timer);
+}
+
+static void sis190_set_rxbufsize(struct sis190_private *tp,
+                                struct net_device *dev)
+{
+       unsigned int mtu = dev->mtu;
+
+       tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+       /* RxDesc->size has a licence to kill the lower bits */
+       if (tp->rx_buf_sz & 0x07) {
+               tp->rx_buf_sz += 8;
+               tp->rx_buf_sz &= RX_BUF_MASK;
+       }
+}
+
+static int sis190_open(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct pci_dev *pdev = tp->pci_dev;
+       int rc = -ENOMEM;
+
+       sis190_set_rxbufsize(tp, dev);
+
+       /*
+        * Rx and Tx descriptors need 256 bytes alignment.
+        * pci_alloc_consistent() guarantees a stronger alignment.
+        */
+       tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+       if (!tp->TxDescRing)
+               goto out;
+
+       tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+       if (!tp->RxDescRing)
+               goto err_free_tx_0;
+
+       rc = sis190_init_ring(dev);
+       if (rc < 0)
+               goto err_free_rx_1;
+
+       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+       sis190_request_timer(dev);
+
+       rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
+       if (rc < 0)
+               goto err_release_timer_2;
+
+       sis190_hw_start(dev);
+out:
+       return rc;
+
+err_release_timer_2:
+       sis190_delete_timer(dev);
+       sis190_rx_clear(tp);
+err_free_rx_1:
+       pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
+               tp->rx_dma);
+err_free_tx_0:
+       pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
+               tp->tx_dma);
+       goto out;
+}
+
+static void sis190_tx_clear(struct sis190_private *tp)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_TX_DESC; i++) {
+               struct sk_buff *skb = tp->Tx_skbuff[i];
+
+               if (!skb)
+                       continue;
+
+               sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
+               tp->Tx_skbuff[i] = NULL;
+               dev_kfree_skb(skb);
+
+               tp->stats.tx_dropped++;
+       }
+       tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void sis190_down(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       unsigned int poll_locked = 0;
+
+       sis190_delete_timer(dev);
+
+       netif_stop_queue(dev);
+
+       flush_scheduled_work();
+
+       do {
+               spin_lock_irq(&tp->lock);
+
+               sis190_asic_down(ioaddr);
+
+               spin_unlock_irq(&tp->lock);
+
+               synchronize_irq(dev->irq);
+
+               if (!poll_locked) {
+                       netif_poll_disable(dev);
+                       poll_locked++;
+               }
+
+               synchronize_sched();
+
+       } while (SIS_R32(IntrMask));
+
+       sis190_tx_clear(tp);
+       sis190_rx_clear(tp);
+}
+
+static int sis190_close(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct pci_dev *pdev = tp->pci_dev;
+
+       sis190_down(dev);
+
+       free_irq(dev->irq, dev);
+
+       netif_poll_enable(dev);
+
+       pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+       pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+
+       tp->TxDescRing = NULL;
+       tp->RxDescRing = NULL;
+
+       return 0;
+}
+
+static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       u32 len, entry, dirty_tx;
+       struct TxDesc *desc;
+       dma_addr_t mapping;
+
+       if (unlikely(skb->len < ETH_ZLEN)) {
+               skb = skb_padto(skb, ETH_ZLEN);
+               if (!skb) {
+                       tp->stats.tx_dropped++;
+                       goto out;
+               }
+               len = ETH_ZLEN;
+       } else {
+               len = skb->len;
+       }
+
+       entry = tp->cur_tx % NUM_TX_DESC;
+       desc = tp->TxDescRing + entry;
+
+       if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
+               netif_stop_queue(dev);
+               net_tx_err(tp, KERN_ERR PFX
+                          "%s: BUG! Tx Ring full when queue awake!\n",
+                          dev->name);
+               return NETDEV_TX_BUSY;
+       }
+
+       mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+       tp->Tx_skbuff[entry] = skb;
+
+       desc->PSize = cpu_to_le32(len);
+       desc->addr = cpu_to_le32(mapping);
+
+       desc->size = cpu_to_le32(len);
+       if (entry == (NUM_TX_DESC - 1))
+               desc->size |= cpu_to_le32(RingEnd);
+
+       wmb();
+
+       desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+
+       tp->cur_tx++;
+
+       smp_wmb();
+
+       SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
+
+       dev->trans_start = jiffies;
+
+       dirty_tx = tp->dirty_tx;
+       if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
+               netif_stop_queue(dev);
+               smp_rmb();
+               if (dirty_tx != tp->dirty_tx)
+                       netif_wake_queue(dev);
+       }
+out:
+       return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *sis190_get_stats(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return &tp->stats;
+}
+
+static void sis190_free_phy(struct list_head *first_phy)
+{
+       struct sis190_phy *cur, *next;
+
+       list_for_each_entry_safe(cur, next, first_phy, list) {
+               kfree(cur);
+       }
+}
+
+/**
+ *     sis190_default_phy - Select default PHY for sis190 mac.
+ *     @dev: the net device to probe for
+ *
+ *     Select first detected PHY with link as default.
+ *     If no one is link on, select PHY whose types is HOME as default.
+ *     If HOME doesn't exist, select LAN.
+ */
+static u16 sis190_default_phy(struct net_device *dev)
+{
+       struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
+       struct sis190_private *tp = netdev_priv(dev);
+       struct mii_if_info *mii_if = &tp->mii_if;
+       void __iomem *ioaddr = tp->mmio_addr;
+       u16 status;
+
+       phy_home = phy_default = phy_lan = NULL;
+
+       list_for_each_entry(phy, &tp->first_phy, list) {
+               status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
+
+               // Link ON & Not select default PHY & not ghost PHY.
+               if ((status & BMSR_LSTATUS) &&
+                   !phy_default &&
+                   (phy->type != UNKNOWN)) {
+                       phy_default = phy;
+               } else {
+                       status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
+                       mdio_write(ioaddr, phy->phy_id, MII_BMCR,
+                                  status | BMCR_ANENABLE | BMCR_ISOLATE);
+                       if (phy->type == HOME)
+                               phy_home = phy;
+                       else if (phy->type == LAN)
+                               phy_lan = phy;
+               }
+       }
+
+       if (!phy_default) {
+               if (phy_home)
+                       phy_default = phy_home;
+               else if (phy_lan)
+                       phy_default = phy_lan;
+               else
+                       phy_default = list_entry(&tp->first_phy,
+                                                struct sis190_phy, list);
+       }
+
+       if (mii_if->phy_id != phy_default->phy_id) {
+               mii_if->phy_id = phy_default->phy_id;
+               net_probe(tp, KERN_INFO
+                      "%s: Using transceiver at address %d as default.\n",
+                      pci_name(tp->pci_dev), mii_if->phy_id);
+       }
+
+       status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
+       status &= (~BMCR_ISOLATE);
+
+       mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
+       status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
+
+       return status;
+}
+
+static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
+                           struct sis190_phy *phy, unsigned int phy_id,
+                           u16 mii_status)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       struct mii_chip_info *p;
+
+       INIT_LIST_HEAD(&phy->list);
+       phy->status = mii_status;
+       phy->phy_id = phy_id;
+
+       phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
+       phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
+
+       for (p = mii_chip_table; p->type; p++) {
+               if ((p->id[0] == phy->id[0]) &&
+                   (p->id[1] == (phy->id[1] & 0xfff0))) {
+                       break;
+               }
+       }
+
+       if (p->id[1]) {
+               phy->type = (p->type == MIX) ?
+                       ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
+                               LAN : HOME) : p->type;
+       } else
+               phy->type = UNKNOWN;
+
+       net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
+                 pci_name(tp->pci_dev),
+                 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
+}
+
+/**
+ *     sis190_mii_probe - Probe MII PHY for sis190
+ *     @dev: the net device to probe for
+ *
+ *     Search for total of 32 possible mii phy addresses.
+ *     Identify and set current phy if found one,
+ *     return error if it failed to found.
+ */
+static int __devinit sis190_mii_probe(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct mii_if_info *mii_if = &tp->mii_if;
+       void __iomem *ioaddr = tp->mmio_addr;
+       int phy_id;
+       int rc = 0;
+
+       INIT_LIST_HEAD(&tp->first_phy);
+
+       for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+               struct sis190_phy *phy;
+               u16 status;
+
+               status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+
+               // Try next mii if the current one is not accessible.
+               if (status == 0xffff || status == 0x0000)
+                       continue;
+
+               phy = kmalloc(sizeof(*phy), GFP_KERNEL);
+               if (!phy) {
+                       sis190_free_phy(&tp->first_phy);
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               sis190_init_phy(dev, tp, phy, phy_id, status);
+
+               list_add(&tp->first_phy, &phy->list);
+       }
+
+       if (list_empty(&tp->first_phy)) {
+               net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
+                         pci_name(tp->pci_dev));
+               rc = -EIO;
+               goto out;
+       }
+
+       /* Select default PHY for mac */
+       sis190_default_phy(dev);
+
+       mii_if->dev = dev;
+       mii_if->mdio_read = __mdio_read;
+       mii_if->mdio_write = __mdio_write;
+       mii_if->phy_id_mask = PHY_ID_ANY;
+       mii_if->reg_num_mask = MII_REG_ANY;
+out:
+       return rc;
+}
+
+static void __devexit sis190_mii_remove(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       sis190_free_phy(&tp->first_phy);
+}
+
+static void sis190_release_board(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct sis190_private *tp = netdev_priv(dev);
+
+       iounmap(tp->mmio_addr);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       free_netdev(dev);
+}
+
+static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
+{
+       struct sis190_private *tp;
+       struct net_device *dev;
+       void __iomem *ioaddr;
+       int rc;
+
+       dev = alloc_etherdev(sizeof(*tp));
+       if (!dev) {
+               net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+               rc = -ENOMEM;
+               goto err_out_0;
+       }
+
+       SET_MODULE_OWNER(dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       tp = netdev_priv(dev);
+       tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
+
+       rc = pci_enable_device(pdev);
+       if (rc < 0) {
+               net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+               goto err_free_dev_1;
+       }
+
+       rc = -ENODEV;
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
+                         pci_name(pdev));
+               goto err_pci_disable_2;
+       }
+       if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
+               net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
+                         pci_name(pdev));
+               goto err_pci_disable_2;
+       }
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc < 0) {
+               net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
+                         pci_name(pdev));
+               goto err_pci_disable_2;
+       }
+
+       rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+       if (rc < 0) {
+               net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
+                         pci_name(pdev));
+               goto err_free_res_3;
+       }
+
+       pci_set_master(pdev);
+
+       ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
+       if (!ioaddr) {
+               net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
+                         pci_name(pdev));
+               rc = -EIO;
+               goto err_free_res_3;
+       }
+
+       tp->pci_dev = pdev;
+       tp->mmio_addr = ioaddr;
+
+       sis190_irq_mask_and_ack(ioaddr);
+
+       sis190_soft_reset(ioaddr);
+out:
+       return dev;
+
+err_free_res_3:
+       pci_release_regions(pdev);
+err_pci_disable_2:
+       pci_disable_device(pdev);
+err_free_dev_1:
+       free_netdev(dev);
+err_out_0:
+       dev = ERR_PTR(rc);
+       goto out;
+}
+
+static void sis190_tx_timeout(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       u8 tmp8;
+
+       /* Disable Tx, if not already */
+       tmp8 = SIS_R8(TxControl);
+       if (tmp8 & CmdTxEnb)
+               SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
+
+
+       net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
+                  dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+
+       /* Disable interrupts by clearing the interrupt mask. */
+       SIS_W32(IntrMask, 0x0000);
+
+       /* Stop a shared interrupt from scavenging while we are. */
+       spin_lock_irq(&tp->lock);
+       sis190_tx_clear(tp);
+       spin_unlock_irq(&tp->lock);
+
+       /* ...and finally, reset everything. */
+       sis190_hw_start(dev);
+
+       netif_wake_queue(dev);
+}
+
+static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+                                                    struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       u16 sig;
+       int i;
+
+       net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
+                 pci_name(pdev));
+
+       /* Check to see if there is a sane EEPROM */
+       sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
+
+       if ((sig == 0xffff) || (sig == 0x0000)) {
+               net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
+                         pci_name(pdev), sig);
+               return -EIO;
+       }
+
+       /* Get MAC address from EEPROM */
+       for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+               __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
+
+               ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
+       }
+
+       return 0;
+}
+
+/**
+ *     sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
+ *     @pdev: PCI device
+ *     @dev:  network device to get address for
+ *
+ *     SiS965 model, use APC CMOS RAM to store MAC address.
+ *     APC CMOS RAM is accessed through ISA bridge.
+ *     MAC address is read into @net_dev->dev_addr.
+ */
+static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+                                                 struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       struct pci_dev *isa_bridge;
+       u8 reg, tmp8;
+       int i;
+
+       net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
+                 pci_name(pdev));
+
+       isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
+       if (!isa_bridge) {
+               net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
+                         pci_name(pdev));
+               return -EIO;
+       }
+
+       /* Enable port 78h & 79h to access APC Registers. */
+       pci_read_config_byte(isa_bridge, 0x48, &tmp8);
+       reg = (tmp8 & ~0x02);
+       pci_write_config_byte(isa_bridge, 0x48, reg);
+       udelay(50);
+       pci_read_config_byte(isa_bridge, 0x48, &reg);
+
+        for (i = 0; i < MAC_ADDR_LEN; i++) {
+                outb(0x9 + i, 0x78);
+                dev->dev_addr[i] = inb(0x79);
+        }
+
+       outb(0x12, 0x78);
+       reg = inb(0x79);
+
+       /* Restore the value to ISA Bridge */
+       pci_write_config_byte(isa_bridge, 0x48, tmp8);
+       pci_dev_put(isa_bridge);
+
+       return 0;
+}
+
+/**
+ *      sis190_init_rxfilter - Initialize the Rx filter
+ *      @dev: network device to initialize
+ *
+ *      Set receive filter address to our MAC address
+ *      and enable packet filtering.
+ */
+static inline void sis190_init_rxfilter(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       u16 ctl;
+       int i;
+
+       ctl = SIS_R16(RxMacControl);
+       /*
+        * Disable packet filtering before setting filter.
+        * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
+        * only and followed by RxMacAddr (6 bytes). Strange. -- FR
+        */
+       SIS_W16(RxMacControl, ctl & ~0x0f00);
+
+       for (i = 0; i < MAC_ADDR_LEN; i++)
+               SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
+
+       SIS_W16(RxMacControl, ctl);
+       SIS_PCI_COMMIT();
+}
+
+static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
+{
+       u8 from;
+
+       pci_read_config_byte(pdev, 0x73, &from);
+
+       return (from & 0x00000001) ?
+               sis190_get_mac_addr_from_apc(pdev, dev) :
+               sis190_get_mac_addr_from_eeprom(pdev, dev);
+}
+
+static void sis190_set_speed_auto(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       void __iomem *ioaddr = tp->mmio_addr;
+       int phy_id = tp->mii_if.phy_id;
+       int val;
+
+       net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+
+       val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+
+       // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
+       // unchanged.
+       mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
+                  ADVERTISE_100FULL | ADVERTISE_10FULL |
+                  ADVERTISE_100HALF | ADVERTISE_10HALF);
+
+       // Enable 1000 Full Mode.
+       mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
+
+       // Enable auto-negotiation and restart auto-negotiation.
+       mdio_write(ioaddr, phy_id, MII_BMCR,
+                  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
+}
+
+static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return mii_ethtool_gset(&tp->mii_if, cmd);
+}
+
+static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return mii_ethtool_sset(&tp->mii_if, cmd);
+}
+
+static void sis190_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->bus_info, pci_name(tp->pci_dev));
+}
+
+static int sis190_get_regs_len(struct net_device *dev)
+{
+       return SIS190_REGS_SIZE;
+}
+
+static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                           void *p)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+       unsigned long flags;
+
+       if (regs->len > SIS190_REGS_SIZE)
+               regs->len = SIS190_REGS_SIZE;
+
+       spin_lock_irqsave(&tp->lock, flags);
+       memcpy_fromio(p, tp->mmio_addr, regs->len);
+       spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static int sis190_nway_reset(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return mii_nway_restart(&tp->mii_if);
+}
+
+static u32 sis190_get_msglevel(struct net_device *dev)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return tp->msg_enable;
+}
+
+static void sis190_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       tp->msg_enable = value;
+}
+
+static struct ethtool_ops sis190_ethtool_ops = {
+       .get_settings   = sis190_get_settings,
+       .set_settings   = sis190_set_settings,
+       .get_drvinfo    = sis190_get_drvinfo,
+       .get_regs_len   = sis190_get_regs_len,
+       .get_regs       = sis190_get_regs,
+       .get_link       = ethtool_op_get_link,
+       .get_msglevel   = sis190_get_msglevel,
+       .set_msglevel   = sis190_set_msglevel,
+       .nway_reset     = sis190_nway_reset,
+};
+
+static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       struct sis190_private *tp = netdev_priv(dev);
+
+       return !netif_running(dev) ? -EINVAL :
+               generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
+}
+
+static int __devinit sis190_init_one(struct pci_dev *pdev,
+                                    const struct pci_device_id *ent)
+{
+       static int printed_version = 0;
+       struct sis190_private *tp;
+       struct net_device *dev;
+       void __iomem *ioaddr;
+       int rc;
+
+       if (!printed_version) {
+               net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+               printed_version = 1;
+       }
+
+       dev = sis190_init_board(pdev);
+       if (IS_ERR(dev)) {
+               rc = PTR_ERR(dev);
+               goto out;
+       }
+
+       tp = netdev_priv(dev);
+       ioaddr = tp->mmio_addr;
+
+       rc = sis190_get_mac_addr(pdev, dev);
+       if (rc < 0)
+               goto err_release_board;
+
+       sis190_init_rxfilter(dev);
+
+       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+       dev->open = sis190_open;
+       dev->stop = sis190_close;
+       dev->do_ioctl = sis190_ioctl;
+       dev->get_stats = sis190_get_stats;
+       dev->tx_timeout = sis190_tx_timeout;
+       dev->watchdog_timeo = SIS190_TX_TIMEOUT;
+       dev->hard_start_xmit = sis190_start_xmit;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = sis190_netpoll;
+#endif
+       dev->set_multicast_list = sis190_set_rx_mode;
+       SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+       dev->irq = pdev->irq;
+       dev->base_addr = (unsigned long) 0xdead;
+
+       spin_lock_init(&tp->lock);
+
+       rc = sis190_mii_probe(dev);
+       if (rc < 0)
+               goto err_release_board;
+
+       rc = register_netdev(dev);
+       if (rc < 0)
+               goto err_remove_mii;
+
+       pci_set_drvdata(pdev, dev);
+
+       net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
+              "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+              pci_name(pdev), sis_chip_info[ent->driver_data].name,
+              ioaddr, dev->irq,
+              dev->dev_addr[0], dev->dev_addr[1],
+              dev->dev_addr[2], dev->dev_addr[3],
+              dev->dev_addr[4], dev->dev_addr[5]);
+
+       netif_carrier_off(dev);
+
+       sis190_set_speed_auto(dev);
+out:
+       return rc;
+
+err_remove_mii:
+       sis190_mii_remove(dev);
+err_release_board:
+       sis190_release_board(pdev);
+       goto out;
+}
+
+static void __devexit sis190_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       sis190_mii_remove(dev);
+       unregister_netdev(dev);
+       sis190_release_board(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver sis190_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = sis190_pci_tbl,
+       .probe          = sis190_init_one,
+       .remove         = __devexit_p(sis190_remove_one),
+};
+
+static int __init sis190_init_module(void)
+{
+       return pci_module_init(&sis190_pci_driver);
+}
+
+static void __exit sis190_cleanup_module(void)
+{
+       pci_unregister_driver(&sis190_pci_driver);
+}
+
+module_init(sis190_init_module);
+module_exit(sis190_cleanup_module);
index e2cdaf876201d80ead250cc2136c2d6cb6293dca..8c9634a98c111f92aee2cf674e7a856de3dd48a1 100644 (file)
@@ -135,6 +135,18 @@ config DM9102
          <file:Documentation/networking/net-modules.txt>.  The module will
          be called dmfe.
 
+config ULI526X
+       tristate "ULi M526x controller support"
+       depends on NET_TULIP && PCI
+       select CRC32
+       ---help---
+         This driver is for ULi M5261/M5263 10/100M Ethernet Controller
+         (<http://www.uli.com.tw/>).
+
+         To compile this driver as a module, choose M here and read
+         <file:Documentation/networking/net-modules.txt>.  The module will
+         be called uli526x.
+         
 config PCMCIA_XIRCOM
        tristate "Xircom CardBus support (new driver)"
        depends on NET_TULIP && CARDBUS
index 8bb9b4683979bb5df1d0900bb777af240f6e9e46..451090d6fcca29e0a773bb125fb6ea18b72ac6ae 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840)       += winbond-840.o
 obj-$(CONFIG_DE2104X)          += de2104x.o
 obj-$(CONFIG_TULIP)            += tulip.o
 obj-$(CONFIG_DE4X5)            += de4x5.o
+obj-$(CONFIG_ULI526X)          += uli526x.o
 
 # Declare multi-part drivers.
 
index e26c31f944bf1622f8eb77933ac6623de6056cfe..f53396fe79c9f95d296dc3995208f0f66c3da8b8 100644 (file)
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
                return retval & 0xffff;
        }
 
-       if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
-               int value;
-               int i = 1000;
-               
-               value = ioread32(ioaddr + CSR9);
-               iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-               
-               value = (phy_id << 21) | (location << 16) | 0x08000000;
-               iowrite32(value, ioaddr + CSR10);
-               
-               while(--i > 0) {
-                       mdio_delay();
-                       if(ioread32(ioaddr + CSR10) & 0x10000000)
-                               break;
-               }
-               retval = ioread32(ioaddr + CSR10);
-               spin_unlock_irqrestore(&tp->mii_lock, flags);
-               return retval & 0xFFFF;
-       }
        /* Establish sync by sending at least 32 logic ones. */
        for (i = 32; i >= 0; i--) {
                iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
                spin_unlock_irqrestore(&tp->mii_lock, flags);
                return;
        }
-       if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
-               int value;
-               int i = 1000;
-               
-               value = ioread32(ioaddr + CSR9);
-               iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-               
-               value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
-               iowrite32(value, ioaddr + CSR10);
-               
-               while(--i > 0) {
-                       if (ioread32(ioaddr + CSR10) & 0x10000000)
-                               break;
-               }
-               spin_unlock_irqrestore(&tp->mii_lock, flags);
-               return;
-       }
                
        /* Establish sync by sending 32 logic ones. */
        for (i = 32; i >= 0; i--) {
index 691568283553b48e1c4744a9cd3393a3f2093c7d..e058a9fbfe884414308b005926e9d81a33e94392 100644 (file)
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
        case MX98713:
        case COMPEX9881:
        case DM910X:
-       case ULI526X:
        default: {
                struct medialeaf *mleaf;
                unsigned char *p;
index 20346d847d9e176f888728129d65b6e93ba2aec7..05d2d96f7be26858c3c66a8f9fbacaf170c8e1bc 100644 (file)
@@ -88,7 +88,6 @@ enum chips {
        I21145,
        DM910X,
        CONEXANT,
-       ULI526X
 };
 
 
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
 
 static inline void tulip_restart_rxtx(struct tulip_private *tp)
 {
-       if(!(tp->chip_id == ULI526X && 
-               (tp->revision == 0x40 || tp->revision == 0x50))) {
-               tulip_stop_rxtx(tp);
-               udelay(5);
-       }
+       tulip_stop_rxtx(tp);
+       udelay(5);
        tulip_start_rxtx(tp);
 }
 
index d45d8f56e5b4a3df298aa6ae3b42b602db2f5b55..05da5bea564c9481846b9109aac24712b6f3ed3c 100644 (file)
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
   { "Conexant LANfinity", 256, 0x0001ebef,
        HAS_MII | HAS_ACPI, tulip_timer },
 
-   /* ULi526X */
-   { "ULi M5261/M5263", 128, 0x0001ebef,
-        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
 };
 
 
@@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tbl[] = {
        { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
        { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
        { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
-       { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },      /* ALi 1563 integrated ethernet */
-       { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },      /* ALi 1563 integrated ethernet */
        { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
        { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
        { } /* terminate list */
@@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_device *dev)
                                   dev->name);
        } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
                           || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
-                          || tp->chip_id == DM910X || tp->chip_id == ULI526X) {
+                          || tp->chip_id == DM910X) {
                printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
                           "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
                           dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_device *dev)
                        entry = tp->cur_tx++ % TX_RING_SIZE;
 
                        if (entry != 0) {
-                               /* Avoid a chip errata by prefixing a dummy entry. Don't do
-                                  this on the ULI526X as it triggers a different problem */
-                               if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
-                                       tp->tx_buffers[entry].skb = NULL;
-                                       tp->tx_buffers[entry].mapping = 0;
-                                       tp->tx_ring[entry].length =
-                                               (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
-                                       tp->tx_ring[entry].buffer1 = 0;
-                                       /* Must set DescOwned later to avoid race with chip */
-                                       dummy = entry;
-                                       entry = tp->cur_tx++ % TX_RING_SIZE;
-                               }
+                               /* Avoid a chip errata by prefixing a dummy entry. */
+                               tp->tx_buffers[entry].skb = NULL;
+                               tp->tx_buffers[entry].mapping = 0;
+                               tp->tx_ring[entry].length =
+                                       (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+                               tp->tx_ring[entry].buffer1 = 0;
+                               /* Must set DescOwned later to avoid race with chip */
+                               dummy = entry;
+                               entry = tp->cur_tx++ % TX_RING_SIZE;
+
                        }
 
                        tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
 {
        if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
                return 1;
-       if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
-               return 1;
-       if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
-               return 1;
        return 0;
 }
 
@@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        switch (chip_idx) {
        case DC21140:
        case DM910X:
-       case ULI526X:
        default:
                if (tp->mtable)
                        iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644 (file)
index 0000000..5ae22b7
--- /dev/null
@@ -0,0 +1,1749 @@
+/*
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License
+    as published by the Free Software Foundation; either version 2
+    of the License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    
+*/
+
+#define DRV_NAME       "uli526x"
+#define DRV_VERSION    "0.9.3"
+#define DRV_RELDATE    "2005-7-29"
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_ULI5261_ID  0x526110B9     /* ULi M5261 ID*/
+#define PCI_ULI5263_ID  0x526310B9     /* ULi M5263 ID*/
+
+#define ULI526X_IO_SIZE 0x100
+#define TX_DESC_CNT     0x20            /* Allocated Tx descriptors */
+#define RX_DESC_CNT     0x30            /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)     /* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)     /* TX wakeup count */
+#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC    0x600
+#define RX_ALLOC_SIZE   0x620
+#define ULI526X_RESET    1
+#define CR0_DEFAULT     0
+#define CR6_DEFAULT     0x22200000
+#define CR7_DEFAULT     0x180c1
+#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define ULI5261_MAX_MULTICAST 14
+#define RX_COPY_SIZE   100
+#define MAX_CHECK_PACKET 0x8000
+
+#define ULI526X_10MHF      0
+#define ULI526X_100MHF     1
+#define ULI526X_10MFD      4
+#define ULI526X_100MFD     5
+#define ULI526X_AUTO       8
+
+#define ULI526X_TXTH_72        0x400000        /* TX TH 72 byte */
+#define ULI526X_TXTH_96        0x404000        /* TX TH 96 byte */
+#define ULI526X_TXTH_128       0x0000          /* TX TH 128 byte */
+#define ULI526X_TXTH_256       0x4000          /* TX TH 256 byte */
+#define ULI526X_TXTH_512       0x8000          /* TX TH 512 byte */
+#define ULI526X_TXTH_1K        0xC000          /* TX TH 1K  byte */
+
+#define ULI526X_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
+#define ULI526X_TX_KICK        (4*HZ/2)        /* tx packet Kick-out time 2 s" */
+
+#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+
+#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ   0x4800
+#define CR9_SRCS        0x1
+#define CR9_SRCLK       0x2
+#define CR9_CRDOUT      0x8
+#define SROM_DATA_0     0x0
+#define SROM_DATA_1     0x4
+#define PHY_DATA_1      0x20000
+#define PHY_DATA_0      0x00000
+#define MDCLKH          0x10000
+
+#define PHY_POWER_DOWN 0x800
+
+#define SROM_V41_CODE   0x14
+
+#define SROM_CLK_WRITE(data, ioaddr)                                   \
+               outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);               \
+               udelay(5);                                              \
+               outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);     \
+               udelay(5);                                              \
+               outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);               \
+               udelay(5);
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+        u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+        char *tx_buf_ptr;               /* Data for us */
+        struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+       u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+       struct sk_buff *rx_skb_ptr;     /* Data for us */
+       struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct uli526x_board_info {
+       u32 chip_id;                    /* Chip vendor/Device ID */
+       struct net_device *next_dev;    /* next device */
+       struct pci_dev *pdev;           /* PCI device */
+       spinlock_t lock;
+
+       long ioaddr;                    /* I/O base address */
+       u32 cr0_data;
+       u32 cr5_data;
+       u32 cr6_data;
+       u32 cr7_data;
+       u32 cr15_data;
+
+       /* pointer for memory physical address */
+       dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
+       dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
+       dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
+       dma_addr_t first_tx_desc_dma;
+       dma_addr_t first_rx_desc_dma;
+
+       /* descriptor pointer */
+       unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
+       unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
+       unsigned char *desc_pool_ptr;   /* descriptor pool memory */
+       struct tx_desc *first_tx_desc;
+       struct tx_desc *tx_insert_ptr;
+       struct tx_desc *tx_remove_ptr;
+       struct rx_desc *first_rx_desc;
+       struct rx_desc *rx_insert_ptr;
+       struct rx_desc *rx_ready_ptr;   /* packet come pointer */
+       unsigned long tx_packet_cnt;    /* transmitted packet count */
+       unsigned long rx_avail_cnt;     /* available rx descriptor count */
+       unsigned long interval_rx_cnt;  /* rx packet count a callback time */
+
+       u16 dbug_cnt;
+       u16 NIC_capability;             /* NIC media capability */
+       u16 PHY_reg4;                   /* Saved Phyxcer register 4 value */
+
+       u8 media_mode;                  /* user specify media mode */
+       u8 op_mode;                     /* real work media mode */
+       u8 phy_addr;
+       u8 link_failed;                 /* Ever link failed */
+       u8 wait_reset;                  /* Hardware failed, need to reset */
+       struct timer_list timer;
+
+       /* System defined statistic counter */
+       struct net_device_stats stats;
+
+       /* Driver defined statistic counter */
+       unsigned long tx_fifo_underrun;
+       unsigned long tx_loss_carrier;
+       unsigned long tx_no_carrier;
+       unsigned long tx_late_collision;
+       unsigned long tx_excessive_collision;
+       unsigned long tx_jabber_timeout;
+       unsigned long reset_count;
+       unsigned long reset_cr8;
+       unsigned long reset_fatal;
+       unsigned long reset_TXtimeout;
+
+       /* NIC SROM data */
+       unsigned char srom[128];
+       u8 init;        
+};
+
+enum uli526x_offsets {
+       DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+       DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+       DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+       DCR15 = 0x78
+};
+
+enum uli526x_CR6_bits {
+       CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+       CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+       CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static char version[] __devinitdata =
+       KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
+       DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static int uli526x_debug;
+static unsigned char uli526x_media_mode = ULI526X_AUTO;
+static u32 uli526x_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static unsigned char mode = 8;
+
+/* function declaration ------------------------------------- */
+static int uli526x_open(struct net_device *);
+static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
+static int uli526x_stop(struct net_device *);
+static struct net_device_stats * uli526x_get_stats(struct net_device *);
+static void uli526x_set_filter_mode(struct net_device *);
+static struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long, int);
+static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *);
+static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
+static void allocate_rx_buffer(struct uli526x_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct net_device *, int);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static u16 phy_readby_cr10(unsigned long, u8, u8);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_writeby_cr10(unsigned long, u8, u8, u16);
+static void phy_write_1bit(unsigned long, u32, u32);
+static u16 phy_read_1bit(unsigned long, u32);
+static u8 uli526x_sense_speed(struct uli526x_board_info *);
+static void uli526x_process_mode(struct uli526x_board_info *);
+static void uli526x_timer(unsigned long);
+static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
+static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
+static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
+static void uli526x_dynamic_reset(struct net_device *);
+static void uli526x_free_rxbuffer(struct uli526x_board_info *);
+static void uli526x_init(struct net_device *);
+static void uli526x_set_phyxcer(struct uli526x_board_info *);
+
+/* ULI526X network board routine ---------------------------- */
+
+/*
+ *     Search ULI526X board, allocate space and register it
+ */
+
+static int __devinit uli526x_init_one (struct pci_dev *pdev,
+                                   const struct pci_device_id *ent)
+{
+       struct uli526x_board_info *db;  /* board information structure */
+       struct net_device *dev;
+       int i, err;
+       
+       ULI526X_DBUG(0, "uli526x_init_one()", 0);
+
+       if (!printed_version++)
+               printk(version);
+
+       /* Init network device */
+       dev = alloc_etherdev(sizeof(*db));
+       if (dev == NULL)
+               return -ENOMEM;
+       SET_MODULE_OWNER(dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+               printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+               err = -ENODEV;
+               goto err_out_free;
+       }
+
+       /* Enable Master/IO access, Disable memory access */
+       err = pci_enable_device(pdev);
+       if (err)
+               goto err_out_free;
+
+       if (!pci_resource_start(pdev, 0)) {
+               printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+               err = -ENODEV;
+               goto err_out_disable;
+       }
+
+       if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
+               printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+               err = -ENODEV;
+               goto err_out_disable;
+       }
+
+       if (pci_request_regions(pdev, DRV_NAME)) {
+               printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+               err = -ENODEV;
+               goto err_out_disable;
+       }
+
+       /* Init system & device */
+       db = netdev_priv(dev);
+
+       /* Allocate Tx/Rx descriptor memory */
+       db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+       if(db->desc_pool_ptr == NULL)
+       {
+               err = -ENOMEM;
+               goto err_out_nomem;
+       }
+       db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+       if(db->buf_pool_ptr == NULL)
+       {
+               err = -ENOMEM;
+               goto err_out_nomem;
+       }
+       
+       db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+       db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+       db->buf_pool_start = db->buf_pool_ptr;
+       db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+       db->chip_id = ent->driver_data;
+       db->ioaddr = pci_resource_start(pdev, 0);
+       
+       db->pdev = pdev;
+       db->init = 1;
+       
+       dev->base_addr = db->ioaddr;
+       dev->irq = pdev->irq;
+       pci_set_drvdata(pdev, dev);
+       
+       /* Register some necessary functions */
+       dev->open = &uli526x_open;
+       dev->hard_start_xmit = &uli526x_start_xmit;
+       dev->stop = &uli526x_stop;
+       dev->get_stats = &uli526x_get_stats;
+       dev->set_multicast_list = &uli526x_set_filter_mode;
+       dev->ethtool_ops = &netdev_ethtool_ops;
+       spin_lock_init(&db->lock);
+
+               
+       /* read 64 word srom data */
+       for (i = 0; i < 64; i++)
+               ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+       /* Set Node address */
+       if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)               /* SROM absent, so read MAC address from ID Table */
+       {
+               outl(0x10000, db->ioaddr + DCR0);       //Diagnosis mode
+               outl(0x1c0, db->ioaddr + DCR13);        //Reset dianostic pointer port
+               outl(0, db->ioaddr + DCR14);            //Clear reset port
+               outl(0x10, db->ioaddr + DCR14);         //Reset ID Table pointer
+               outl(0, db->ioaddr + DCR14);            //Clear reset port
+               outl(0, db->ioaddr + DCR13);            //Clear CR13
+               outl(0x1b0, db->ioaddr + DCR13);        //Select ID Table access port
+               //Read MAC address from CR14
+               for (i = 0; i < 6; i++)
+                       dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+               //Read end
+               outl(0, db->ioaddr + DCR13);    //Clear CR13
+               outl(0, db->ioaddr + DCR0);             //Clear CR0
+               udelay(10);
+       }
+       else            /*Exist SROM*/
+       {
+               for (i = 0; i < 6; i++)
+                       dev->dev_addr[i] = db->srom[20 + i];
+       }
+       err = register_netdev (dev);
+       if (err)
+               goto err_out_res;
+
+       printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
+       
+       for (i = 0; i < 6; i++)
+               printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+       printk(", irq %d.\n", dev->irq);
+
+       pci_set_master(pdev);
+
+       return 0;
+
+err_out_res:
+       pci_release_regions(pdev);
+err_out_nomem:
+       if(db->desc_pool_ptr)
+               pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+                       db->desc_pool_ptr, db->desc_pool_dma_ptr);
+                       
+       if(db->buf_pool_ptr != NULL)
+               pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+                       db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_disable:
+       pci_disable_device(pdev);
+err_out_free:
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(dev);
+
+       return err;
+}
+
+
+static void __devexit uli526x_remove_one (struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct uli526x_board_info *db = netdev_priv(dev);
+
+       ULI526X_DBUG(0, "uli526x_remove_one()", 0);
+
+       pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+                               DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+                               db->desc_pool_dma_ptr);
+       pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+                               db->buf_pool_ptr, db->buf_pool_dma_ptr);
+       unregister_netdev(dev);
+       pci_release_regions(pdev);
+       free_netdev(dev);       /* free board information */
+       pci_set_drvdata(pdev, NULL);
+       pci_disable_device(pdev);
+       ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+}
+
+
+/*
+ *     Open the interface.
+ *     The interface is opened whenever "ifconfig" activates it.
+ */
+
+static int uli526x_open(struct net_device *dev)
+{
+       int ret;
+       struct uli526x_board_info *db = netdev_priv(dev);
+       
+       ULI526X_DBUG(0, "uli526x_open", 0);
+
+       ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
+       if (ret)
+               return ret;
+
+       /* system variable init */
+       db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
+       db->tx_packet_cnt = 0;
+       db->rx_avail_cnt = 0;
+       db->link_failed = 1;
+       netif_carrier_off(dev);
+       db->wait_reset = 0;
+
+       db->NIC_capability = 0xf;       /* All capability*/
+       db->PHY_reg4 = 0x1e0;
+
+       /* CR6 operation mode decision */
+       db->cr6_data |= ULI526X_TXTH_256;
+       db->cr0_data = CR0_DEFAULT;
+       
+       /* Initialize ULI526X board */
+       uli526x_init(dev);
+
+       /* Active System Interface */
+       netif_wake_queue(dev);
+
+       /* set and active a timer process */
+       init_timer(&db->timer);
+       db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
+       db->timer.data = (unsigned long)dev;
+       db->timer.function = &uli526x_timer;
+       add_timer(&db->timer);
+
+       return 0;
+}
+
+
+/*     Initialize ULI526X board
+ *     Reset ULI526X board
+ *     Initialize TX/Rx descriptor chain structure
+ *     Send the set-up frame
+ *     Enable Tx/Rx machine
+ */
+
+static void uli526x_init(struct net_device *dev)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+       unsigned long ioaddr = db->ioaddr;
+       u8      phy_tmp;
+       u16     phy_value;
+       u16 phy_reg_reset;
+
+       ULI526X_DBUG(0, "uli526x_init()", 0);
+
+       /* Reset M526x MAC controller */
+       outl(ULI526X_RESET, ioaddr + DCR0);     /* RESET MAC */
+       udelay(100);
+       outl(db->cr0_data, ioaddr + DCR0);
+       udelay(5);
+
+       /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
+       db->phy_addr = 1;
+       for(phy_tmp=0;phy_tmp<32;phy_tmp++)
+       {
+               phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
+               if(phy_value != 0xffff&&phy_value!=0)
+               {
+                       db->phy_addr = phy_tmp;
+                       break;
+               }
+       }
+       if(phy_tmp == 32)
+               printk(KERN_WARNING "Can not find the phy address!!!");
+       /* Parser SROM and media mode */
+       db->media_mode = uli526x_media_mode;
+
+       /* Phyxcer capability setting */
+       phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+       phy_reg_reset = (phy_reg_reset | 0x8000);
+       phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+       udelay(500);
+
+       /* Process Phyxcer Media Mode */
+       uli526x_set_phyxcer(db);
+
+       /* Media Mode Process */
+       if ( !(db->media_mode & ULI526X_AUTO) )
+               db->op_mode = db->media_mode;   /* Force Mode */
+
+       /* Initialize Transmit/Receive decriptor and CR3/4 */
+       uli526x_descriptor_init(db, ioaddr);
+
+       /* Init CR6 to program M526X operation */
+       update_cr6(db->cr6_data, ioaddr);
+
+       /* Send setup frame */
+       send_filter_frame(dev, dev->mc_count);  /* M5261/M5263 */
+
+       /* Init CR7, interrupt active bit */
+       db->cr7_data = CR7_DEFAULT;
+       outl(db->cr7_data, ioaddr + DCR7);
+
+       /* Init CR15, Tx jabber and Rx watchdog timer */
+       outl(db->cr15_data, ioaddr + DCR15);
+
+       /* Enable ULI526X Tx/Rx function */
+       db->cr6_data |= CR6_RXSC | CR6_TXSC;
+       update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ *     Hardware start transmission.
+ *     Send a packet to media from the upper layer.
+ */
+
+static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+       struct tx_desc *txptr;
+       unsigned long flags;
+
+       ULI526X_DBUG(0, "uli526x_start_xmit", 0);
+
+       /* Resource flag check */
+       netif_stop_queue(dev);
+
+       /* Too large packet check */
+       if (skb->len > MAX_PACKET_SIZE) {
+               printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* No Tx resource check, it never happen nromally */
+       if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
+               spin_unlock_irqrestore(&db->lock, flags);
+               printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+               return 1;
+       }
+
+       /* Disable NIC interrupt */
+       outl(0, dev->base_addr + DCR7);
+
+       /* transmit this packet */
+       txptr = db->tx_insert_ptr;
+       memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+       txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+       /* Point to next transmit free descriptor */
+       db->tx_insert_ptr = txptr->next_tx_desc;
+
+       /* Transmit Packet Process */
+       if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+               txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+               db->tx_packet_cnt++;                    /* Ready to send */
+               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
+               dev->trans_start = jiffies;             /* saved time stamp */
+       }
+
+       /* Tx resource check */
+       if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
+               netif_wake_queue(dev);
+
+       /* Restore CR7 to enable interrupt */
+       spin_unlock_irqrestore(&db->lock, flags);
+       outl(db->cr7_data, dev->base_addr + DCR7);
+       
+       /* free this SKB */
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
+
+/*
+ *     Stop the interface.
+ *     The interface is stopped when it is brought.
+ */
+
+static int uli526x_stop(struct net_device *dev)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+       unsigned long ioaddr = dev->base_addr;
+
+       ULI526X_DBUG(0, "uli526x_stop", 0);
+
+       /* disable system */
+       netif_stop_queue(dev);
+
+       /* deleted timer */
+       del_timer_sync(&db->timer);
+
+       /* Reset & stop ULI526X board */
+       outl(ULI526X_RESET, ioaddr + DCR0);
+       udelay(5);
+       phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+       /* free interrupt */
+       free_irq(dev->irq, dev);
+
+       /* free allocated rx buffer */
+       uli526x_free_rxbuffer(db);
+
+#if 0
+       /* show statistic counter */
+       printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+               db->tx_fifo_underrun, db->tx_excessive_collision,
+               db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+               db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+               db->reset_fatal, db->reset_TXtimeout);
+#endif
+
+       return 0;
+}
+
+
+/*
+ *     M5261/M5263 insterrupt handler
+ *     receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct net_device *dev = dev_id;
+       struct uli526x_board_info *db = netdev_priv(dev);
+       unsigned long ioaddr = dev->base_addr;
+       unsigned long flags;
+
+       if (!dev) {
+               ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
+               return IRQ_NONE;
+       }
+
+       spin_lock_irqsave(&db->lock, flags);
+       outl(0, ioaddr + DCR7);
+
+       /* Got ULI526X status */
+       db->cr5_data = inl(ioaddr + DCR5);
+       outl(db->cr5_data, ioaddr + DCR5);
+       if ( !(db->cr5_data & 0x180c1) ) {
+               spin_unlock_irqrestore(&db->lock, flags);
+               outl(db->cr7_data, ioaddr + DCR7);
+               return IRQ_HANDLED;
+       }
+
+       /* Check system status */
+       if (db->cr5_data & 0x2000) {
+               /* system bus error happen */
+               ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+               db->reset_fatal++;
+               db->wait_reset = 1;     /* Need to RESET */
+               spin_unlock_irqrestore(&db->lock, flags);
+               return IRQ_HANDLED;
+       }
+
+        /* Received the coming packet */
+       if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+               uli526x_rx_packet(dev, db);
+
+       /* reallocate rx descriptor buffer */
+       if (db->rx_avail_cnt<RX_DESC_CNT)
+               allocate_rx_buffer(db);
+
+       /* Free the transmitted descriptor */
+       if ( db->cr5_data & 0x01)
+               uli526x_free_tx_pkt(dev, db);
+
+       /* Restore CR7 to enable interrupt mask */
+       outl(db->cr7_data, ioaddr + DCR7);
+
+       spin_unlock_irqrestore(&db->lock, flags);
+       return IRQ_HANDLED;
+}
+
+
+/*
+ *     Free TX resource after TX complete
+ */
+
+static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
+{
+       struct tx_desc *txptr;
+       u32 tdes0;
+
+       txptr = db->tx_remove_ptr;
+       while(db->tx_packet_cnt) {
+               tdes0 = le32_to_cpu(txptr->tdes0);
+               /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+               if (tdes0 & 0x80000000)
+                       break;
+
+               /* A packet sent completed */
+               db->tx_packet_cnt--;
+               db->stats.tx_packets++;
+
+               /* Transmit statistic counter */
+               if ( tdes0 != 0x7fffffff ) {
+                       /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+                       db->stats.collisions += (tdes0 >> 3) & 0xf;
+                       db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+                       if (tdes0 & TDES0_ERR_MASK) {
+                               db->stats.tx_errors++;
+                               if (tdes0 & 0x0002) {   /* UnderRun */
+                                       db->tx_fifo_underrun++;
+                                       if ( !(db->cr6_data & CR6_SFT) ) {
+                                               db->cr6_data = db->cr6_data | CR6_SFT;
+                                               update_cr6(db->cr6_data, db->ioaddr);
+                                       }
+                               }
+                               if (tdes0 & 0x0100)
+                                       db->tx_excessive_collision++;
+                               if (tdes0 & 0x0200)
+                                       db->tx_late_collision++;
+                               if (tdes0 & 0x0400)
+                                       db->tx_no_carrier++;
+                               if (tdes0 & 0x0800)
+                                       db->tx_loss_carrier++;
+                               if (tdes0 & 0x4000)
+                                       db->tx_jabber_timeout++;
+                       }
+               }
+
+               txptr = txptr->next_tx_desc;
+       }/* End of while */
+
+       /* Update TX remove pointer to next */
+       db->tx_remove_ptr = txptr;
+
+       /* Resource available check */
+       if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
+               netif_wake_queue(dev);  /* Active upper layer, send again */
+}
+
+
+/*
+ *     Receive the come packet and pass to upper layer
+ */
+
+static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
+{
+       struct rx_desc *rxptr;
+       struct sk_buff *skb;
+       int rxlen;
+       u32 rdes0;
+       
+       rxptr = db->rx_ready_ptr;
+
+       while(db->rx_avail_cnt) {
+               rdes0 = le32_to_cpu(rxptr->rdes0);
+               if (rdes0 & 0x80000000) /* packet owner check */
+               {
+                       break;
+               }
+
+               db->rx_avail_cnt--;
+               db->interval_rx_cnt++;
+
+               pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+               if ( (rdes0 & 0x300) != 0x300) {
+                       /* A packet without First/Last flag */
+                       /* reuse this SKB */
+                       ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+                       uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+               } else {
+                       /* A packet with First/Last flag */
+                       rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+                       /* error summary bit check */
+                       if (rdes0 & 0x8000) {
+                               /* This is a error packet */
+                               //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+                               db->stats.rx_errors++;
+                               if (rdes0 & 1)
+                                       db->stats.rx_fifo_errors++;
+                               if (rdes0 & 2)
+                                       db->stats.rx_crc_errors++;
+                               if (rdes0 & 0x80)
+                                       db->stats.rx_length_errors++;
+                       }
+
+                       if ( !(rdes0 & 0x8000) ||
+                               ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+                               skb = rxptr->rx_skb_ptr;
+               
+                               /* Good packet, send to upper layer */
+                               /* Shorst packet used new SKB */
+                               if ( (rxlen < RX_COPY_SIZE) &&
+                                       ( (skb = dev_alloc_skb(rxlen + 2) )
+                                       != NULL) ) {
+                                       /* size less than COPY_SIZE, allocate a rxlen SKB */
+                                       skb->dev = dev;
+                                       skb_reserve(skb, 2); /* 16byte align */
+                                       memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+                                       uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+                               } else {
+                                       skb->dev = dev;
+                                       skb_put(skb, rxlen);
+                               }
+                               skb->protocol = eth_type_trans(skb, dev);
+                               netif_rx(skb);
+                               dev->last_rx = jiffies;
+                               db->stats.rx_packets++;
+                               db->stats.rx_bytes += rxlen;
+                               
+                       } else {
+                               /* Reuse SKB buffer when the packet is error */
+                               ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+                               uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+                       }
+               }
+
+               rxptr = rxptr->next_rx_desc;
+       }
+
+       db->rx_ready_ptr = rxptr;
+}
+
+
+/*
+ *     Get statistics from driver.
+ */
+
+static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+
+       ULI526X_DBUG(0, "uli526x_get_stats", 0);
+       return &db->stats;
+}
+
+
+/*
+ * Set ULI526X multicast address
+ */
+
+static void uli526x_set_filter_mode(struct net_device * dev)
+{
+       struct uli526x_board_info *db = dev->priv;
+       unsigned long flags;
+
+       ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
+       spin_lock_irqsave(&db->lock, flags);
+
+       if (dev->flags & IFF_PROMISC) {
+               ULI526X_DBUG(0, "Enable PROM Mode", 0);
+               db->cr6_data |= CR6_PM | CR6_PBF;
+               update_cr6(db->cr6_data, db->ioaddr);
+               spin_unlock_irqrestore(&db->lock, flags);
+               return;
+       }
+
+       if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
+               ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+               db->cr6_data &= ~(CR6_PM | CR6_PBF);
+               db->cr6_data |= CR6_PAM;
+               spin_unlock_irqrestore(&db->lock, flags);
+               return;
+       }
+
+       ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
+       send_filter_frame(dev, dev->mc_count);  /* M5261/M5263 */
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void
+ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+{
+       ecmd->supported = (SUPPORTED_10baseT_Half |
+                          SUPPORTED_10baseT_Full |
+                          SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_MII);
+               
+       ecmd->advertising = (ADVERTISED_10baseT_Half |
+                          ADVERTISED_10baseT_Full |
+                          ADVERTISED_100baseT_Half |
+                          ADVERTISED_100baseT_Full |
+                          ADVERTISED_Autoneg |
+                          ADVERTISED_MII);
+
+
+       ecmd->port = PORT_MII;
+       ecmd->phy_address = db->phy_addr;
+
+       ecmd->transceiver = XCVR_EXTERNAL;
+               
+       ecmd->speed = 10;
+       ecmd->duplex = DUPLEX_HALF;
+       
+       if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+       {
+               ecmd->speed = 100;               
+       }
+       if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+       {
+               ecmd->duplex = DUPLEX_FULL;
+       }
+       if(db->link_failed)
+       {
+               ecmd->speed = -1;
+               ecmd->duplex = -1;      
+       }
+       
+       if (db->media_mode & ULI526X_AUTO)
+       {       
+               ecmd->autoneg = AUTONEG_ENABLE;
+       }
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       struct uli526x_board_info *np = netdev_priv(dev);
+
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       if (np->pdev)
+               strcpy(info->bus_info, pci_name(np->pdev));
+       else
+               sprintf(info->bus_info, "EISA 0x%lx %d",
+                       dev->base_addr, dev->irq);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+       struct uli526x_board_info *np = netdev_priv(dev);
+       
+       ULi_ethtool_gset(np, cmd);
+       
+       return 0;
+}
+
+static u32 netdev_get_link(struct net_device *dev) {
+       struct uli526x_board_info *np = netdev_priv(dev);
+               
+       if(np->link_failed)
+               return 0;
+       else
+               return 1;
+}
+
+static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       wol->supported = WAKE_PHY | WAKE_MAGIC;
+       wol->wolopts = 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+       .get_drvinfo            = netdev_get_drvinfo,
+       .get_settings           = netdev_get_settings,
+       .get_link               = netdev_get_link,
+       .get_wol                = uli526x_get_wol,
+};
+
+/*
+ *     A periodic timer routine
+ *     Dynamic media sense, allocate Rx buffer...
+ */
+
+static void uli526x_timer(unsigned long data)
+{
+       u32 tmp_cr8;
+       unsigned char tmp_cr12=0;
+       struct net_device *dev = (struct net_device *) data;
+       struct uli526x_board_info *db = netdev_priv(dev);
+       unsigned long flags;
+       u8 TmpSpeed=10;
+       
+       //ULI526X_DBUG(0, "uli526x_timer()", 0);
+       spin_lock_irqsave(&db->lock, flags);
+
+       
+       /* Dynamic reset ULI526X : system error or transmit time-out */
+       tmp_cr8 = inl(db->ioaddr + DCR8);
+       if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+               db->reset_cr8++;
+               db->wait_reset = 1;
+       }
+       db->interval_rx_cnt = 0;
+
+       /* TX polling kick monitor */
+       if ( db->tx_packet_cnt &&
+            time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+               outl(0x1, dev->base_addr + DCR1);   // Tx polling again 
+
+               // TX Timeout 
+               if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+                       db->reset_TXtimeout++;
+                       db->wait_reset = 1;
+                       printk( "%s: Tx timeout - resetting\n",
+                              dev->name);
+               }
+       }
+
+       if (db->wait_reset) {
+               ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+               db->reset_count++;
+               uli526x_dynamic_reset(dev);
+               db->timer.expires = ULI526X_TIMER_WUT;
+               add_timer(&db->timer);
+               spin_unlock_irqrestore(&db->lock, flags);
+               return;
+       }
+
+       /* Link status check, Dynamic media type change */
+       if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+               tmp_cr12 = 3;
+
+       if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+               /* Link Failed */
+               ULI526X_DBUG(0, "Link Failed", tmp_cr12);
+               netif_carrier_off(dev);
+               printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+               db->link_failed = 1;
+
+               /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+               /* AUTO don't need */
+               if ( !(db->media_mode & 0x8) )
+                       phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+
+               /* AUTO mode, if INT phyxcer link failed, select EXT device */
+               if (db->media_mode & ULI526X_AUTO) {
+                       db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
+                       update_cr6(db->cr6_data, db->ioaddr);
+               }
+       } else
+               if ((tmp_cr12 & 0x3) && db->link_failed) {
+                       ULI526X_DBUG(0, "Link link OK", tmp_cr12);
+                       db->link_failed = 0;
+
+                       /* Auto Sense Speed */
+                       if ( (db->media_mode & ULI526X_AUTO) &&
+                               uli526x_sense_speed(db) )
+                               db->link_failed = 1;
+                       uli526x_process_mode(db);
+                       
+                       if(db->link_failed==0)
+                       {
+                               if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+                               {
+                                       TmpSpeed = 100;
+                               }
+                               if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+                               {
+                                       printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+                               }
+                               else
+                               {
+                                       printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+                               }
+                               netif_carrier_on(dev);
+                       }
+                       /* SHOW_MEDIA_TYPE(db->op_mode); */
+               }
+               else if(!(tmp_cr12 & 0x3) && db->link_failed)
+               {
+                       if(db->init==1)
+                       {
+                               printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+                               netif_carrier_off(dev);
+                       }
+               }
+               db->init=0;
+
+       /* Timer active again */
+       db->timer.expires = ULI526X_TIMER_WUT;
+       add_timer(&db->timer);
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ *     Dynamic reset the ULI526X board
+ *     Stop ULI526X board
+ *     Free Tx/Rx allocated memory
+ *     Reset ULI526X board
+ *     Re-initialize ULI526X board
+ */
+
+static void uli526x_dynamic_reset(struct net_device *dev)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+
+       ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
+
+       /* Sopt MAC controller */
+       db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
+       update_cr6(db->cr6_data, dev->base_addr);
+       outl(0, dev->base_addr + DCR7);         /* Disable Interrupt */
+       outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+       /* Disable upper layer interface */
+       netif_stop_queue(dev);
+
+       /* Free Rx Allocate buffer */
+       uli526x_free_rxbuffer(db);
+
+       /* system variable init */
+       db->tx_packet_cnt = 0;
+       db->rx_avail_cnt = 0;
+       db->link_failed = 1;
+       db->init=1;
+       db->wait_reset = 0;
+
+       /* Re-initialize ULI526X board */
+       uli526x_init(dev);
+
+       /* Restart upper layer interface */
+       netif_wake_queue(dev);
+}
+
+
+/*
+ *     free all allocated rx buffer
+ */
+
+static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
+{
+       ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
+
+       /* free allocated rx buffer */
+       while (db->rx_avail_cnt) {
+               dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+               db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+               db->rx_avail_cnt--;
+       }
+}
+
+
+/*
+ *     Reuse the SK buffer
+ */
+
+static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
+{
+       struct rx_desc *rxptr = db->rx_insert_ptr;
+
+       if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+               rxptr->rx_skb_ptr = skb;
+               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               wmb();
+               rxptr->rdes0 = cpu_to_le32(0x80000000);
+               db->rx_avail_cnt++;
+               db->rx_insert_ptr = rxptr->next_rx_desc;
+       } else
+               ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ *     Initialize transmit/Receive descriptor
+ *     Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+{
+       struct tx_desc *tmp_tx;
+       struct rx_desc *tmp_rx;
+       unsigned char *tmp_buf;
+       dma_addr_t tmp_tx_dma, tmp_rx_dma;
+       dma_addr_t tmp_buf_dma;
+       int i;
+
+       ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
+
+       /* tx descriptor start pointer */
+       db->tx_insert_ptr = db->first_tx_desc;
+       db->tx_remove_ptr = db->first_tx_desc;
+       outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+
+       /* rx descriptor start pointer */
+       db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
+       db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+       db->rx_insert_ptr = db->first_rx_desc;
+       db->rx_ready_ptr = db->first_rx_desc;
+       outl(db->first_rx_desc_dma, ioaddr + DCR3);     /* RX DESC address */
+
+       /* Init Transmit chain */
+       tmp_buf = db->buf_pool_start;
+       tmp_buf_dma = db->buf_pool_dma_start;
+       tmp_tx_dma = db->first_tx_desc_dma;
+       for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+               tmp_tx->tx_buf_ptr = tmp_buf;
+               tmp_tx->tdes0 = cpu_to_le32(0);
+               tmp_tx->tdes1 = cpu_to_le32(0x81000000);        /* IC, chain */
+               tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+               tmp_tx_dma += sizeof(struct tx_desc);
+               tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+               tmp_tx->next_tx_desc = tmp_tx + 1;
+               tmp_buf = tmp_buf + TX_BUF_ALLOC;
+               tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+       }
+       (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+       tmp_tx->next_tx_desc = db->first_tx_desc;
+
+        /* Init Receive descriptor chain */
+       tmp_rx_dma=db->first_rx_desc_dma;
+       for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+               tmp_rx->rdes0 = cpu_to_le32(0);
+               tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+               tmp_rx_dma += sizeof(struct rx_desc);
+               tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+               tmp_rx->next_rx_desc = tmp_rx + 1;
+       }
+       (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+       tmp_rx->next_rx_desc = db->first_rx_desc;
+
+       /* pre-allocate Rx buffer */
+       allocate_rx_buffer(db);
+}
+
+
+/*
+ *     Update CR6 value
+ *     Firstly stop ULI526X, then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+
+       outl(cr6_data, ioaddr + DCR6);
+       udelay(5);
+}
+
+
+/*
+ *     Send a setup frame for M5261/M5263
+ *     This setup frame initialize ULI526X address filter mode
+ */
+
+static void send_filter_frame(struct net_device *dev, int mc_cnt)
+{
+       struct uli526x_board_info *db = netdev_priv(dev);
+       struct dev_mc_list *mcptr;
+       struct tx_desc *txptr;
+       u16 * addrptr;
+       u32 * suptr;
+       int i;
+
+       ULI526X_DBUG(0, "send_filter_frame()", 0);
+
+       txptr = db->tx_insert_ptr;
+       suptr = (u32 *) txptr->tx_buf_ptr;
+
+       /* Node address */
+       addrptr = (u16 *) dev->dev_addr;
+       *suptr++ = addrptr[0];
+       *suptr++ = addrptr[1];
+       *suptr++ = addrptr[2];
+
+       /* broadcast address */
+       *suptr++ = 0xffff;
+       *suptr++ = 0xffff;
+       *suptr++ = 0xffff;
+
+       /* fit the multicast address */
+       for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+               addrptr = (u16 *) mcptr->dmi_addr;
+               *suptr++ = addrptr[0];
+               *suptr++ = addrptr[1];
+               *suptr++ = addrptr[2];
+       }
+
+       for (; i<14; i++) {
+               *suptr++ = 0xffff;
+               *suptr++ = 0xffff;
+               *suptr++ = 0xffff;
+       }
+
+       /* prepare the setup frame */
+       db->tx_insert_ptr = txptr->next_tx_desc;
+       txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+       /* Resource Check and Send the setup packet */
+       if (db->tx_packet_cnt < TX_DESC_CNT) {
+               /* Resource Empty */
+               db->tx_packet_cnt++;
+               txptr->tdes0 = cpu_to_le32(0x80000000);
+               update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
+               update_cr6(db->cr6_data, dev->base_addr);
+               dev->trans_start = jiffies;
+       } else
+               printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+}
+
+
+/*
+ *     Allocate rx buffer,
+ *     As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct uli526x_board_info *db)
+{
+       struct rx_desc *rxptr;
+       struct sk_buff *skb;
+
+       rxptr = db->rx_insert_ptr;
+
+       while(db->rx_avail_cnt < RX_DESC_CNT) {
+               if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+                       break;
+               rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               wmb();
+               rxptr->rdes0 = cpu_to_le32(0x80000000);
+               rxptr = rxptr->next_rx_desc;
+               db->rx_avail_cnt++;
+       }
+
+       db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ *     Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+       int i;
+       u16 srom_data = 0;
+       long cr9_ioaddr = ioaddr + DCR9;
+
+       outl(CR9_SROM_READ, cr9_ioaddr);
+       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+       /* Send the Read Command 110b */
+       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+       SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+       /* Send the offset */
+       for (i = 5; i >= 0; i--) {
+               srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+               SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+       }
+
+       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+       for (i = 16; i > 0; i--) {
+               outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+               udelay(5);
+               srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+               outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+               udelay(5);
+       }
+
+       outl(CR9_SROM_READ, cr9_ioaddr);
+       return srom_data;
+}
+
+
+/*
+ *     Auto sense the media mode
+ */
+
+static u8 uli526x_sense_speed(struct uli526x_board_info * db)
+{
+       u8 ErrFlag = 0;
+       u16 phy_mode;
+
+       phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+       phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+       if ( (phy_mode & 0x24) == 0x24 ) {
+               
+               phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+               if(phy_mode&0x8000)
+                       phy_mode = 0x8000;
+               else if(phy_mode&0x4000)
+                       phy_mode = 0x4000;
+               else if(phy_mode&0x2000)
+                       phy_mode = 0x2000;
+               else
+                       phy_mode = 0x1000;
+               
+               /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+               switch (phy_mode) {
+               case 0x1000: db->op_mode = ULI526X_10MHF; break;
+               case 0x2000: db->op_mode = ULI526X_10MFD; break;
+               case 0x4000: db->op_mode = ULI526X_100MHF; break;
+               case 0x8000: db->op_mode = ULI526X_100MFD; break;
+               default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
+               }
+       } else {
+               db->op_mode = ULI526X_10MHF;
+               ULI526X_DBUG(0, "Link Failed :", phy_mode);
+               ErrFlag = 1;
+       }
+
+       return ErrFlag;
+}
+
+
+/*
+ *     Set 10/100 phyxcer capability
+ *     AUTO mode : phyxcer register4 is NIC capability
+ *     Force mode: phyxcer register4 is the force media
+ */
+
+static void uli526x_set_phyxcer(struct uli526x_board_info *db)
+{
+       u16 phy_reg;
+       
+       /* Phyxcer capability setting */
+       phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+       if (db->media_mode & ULI526X_AUTO) {
+               /* AUTO Mode */
+               phy_reg |= db->PHY_reg4;
+       } else {
+               /* Force Mode */
+               switch(db->media_mode) {
+               case ULI526X_10MHF: phy_reg |= 0x20; break;
+               case ULI526X_10MFD: phy_reg |= 0x40; break;
+               case ULI526X_100MHF: phy_reg |= 0x80; break;
+               case ULI526X_100MFD: phy_reg |= 0x100; break;
+               }
+               
+       }
+
+       /* Write new capability to Phyxcer Reg4 */
+       if ( !(phy_reg & 0x01e0)) {
+               phy_reg|=db->PHY_reg4;
+               db->media_mode|=ULI526X_AUTO;
+       }
+       phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+       /* Restart Auto-Negotiation */
+       phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+       udelay(50);
+}
+
+
+/*
+ *     Process op-mode
+       AUTO mode : PHY controller in Auto-negotiation Mode
+ *     Force mode: PHY controller in force mode with HUB
+ *                     N-way force capability with SWITCH
+ */
+
+static void uli526x_process_mode(struct uli526x_board_info *db)
+{
+       u16 phy_reg;
+
+       /* Full Duplex Mode Check */
+       if (db->op_mode & 0x4)
+               db->cr6_data |= CR6_FDM;        /* Set Full Duplex Bit */
+       else
+               db->cr6_data &= ~CR6_FDM;       /* Clear Full Duplex Bit */
+
+       update_cr6(db->cr6_data, db->ioaddr);
+
+       /* 10/100M phyxcer force mode need */
+       if ( !(db->media_mode & 0x8)) {
+               /* Forece Mode */
+               phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+               if ( !(phy_reg & 0x1) ) {
+                       /* parter without N-Way capability */
+                       phy_reg = 0x0;
+                       switch(db->op_mode) {
+                       case ULI526X_10MHF: phy_reg = 0x0; break;
+                       case ULI526X_10MFD: phy_reg = 0x100; break;
+                       case ULI526X_100MHF: phy_reg = 0x2000; break;
+                       case ULI526X_100MFD: phy_reg = 0x2100; break;
+                       }
+                       phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+                               phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+               }
+       }
+}
+
+
+/*
+ *     Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+{
+       u16 i;
+       unsigned long ioaddr;
+
+       if(chip_id == PCI_ULI5263_ID)
+       {
+               phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
+               return;
+       }
+       /* M5261/M5263 Chip */
+       ioaddr = iobase + DCR9;
+
+       /* Send 33 synchronization clock to Phy controller */
+       for (i = 0; i < 35; i++)
+               phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+       /* Send start command(01) to Phy */
+       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+       /* Send write command(01) to Phy */
+       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+       /* Send Phy address */
+       for (i = 0x10; i > 0; i = i >> 1)
+               phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+       /* Send register address */
+       for (i = 0x10; i > 0; i = i >> 1)
+               phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+       /* written trasnition */
+       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+       /* Write a word data to PHY controller */
+       for ( i = 0x8000; i > 0; i >>= 1)
+               phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+       
+}
+
+
+/*
+ *     Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+       int i;
+       u16 phy_data;
+       unsigned long ioaddr;
+
+       if(chip_id == PCI_ULI5263_ID)
+               return phy_readby_cr10(iobase, phy_addr, offset);
+       /* M5261/M5263 Chip */
+       ioaddr = iobase + DCR9;
+       
+       /* Send 33 synchronization clock to Phy controller */
+       for (i = 0; i < 35; i++)
+               phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+       /* Send start command(01) to Phy */
+       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+       /* Send read command(10) to Phy */
+       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+       /* Send Phy address */
+       for (i = 0x10; i > 0; i = i >> 1)
+               phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+       /* Send register address */
+       for (i = 0x10; i > 0; i = i >> 1)
+               phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+       /* Skip transition state */
+       phy_read_1bit(ioaddr, chip_id);
+
+       /* read 16bit data */
+       for (phy_data = 0, i = 0; i < 16; i++) {
+               phy_data <<= 1;
+               phy_data |= phy_read_1bit(ioaddr, chip_id);
+       }
+
+       return phy_data;
+}
+
+static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+{
+       unsigned long ioaddr,cr10_value;
+       
+       ioaddr = iobase + DCR10;
+       cr10_value = phy_addr;
+       cr10_value = (cr10_value<<5) + offset;
+       cr10_value = (cr10_value<<16) + 0x08000000;
+       outl(cr10_value,ioaddr);
+       udelay(1);
+       while(1)
+       {
+               cr10_value = inl(ioaddr);
+               if(cr10_value&0x10000000)
+                       break;
+       }
+       return (cr10_value&0x0ffff);
+}
+
+static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+{
+       unsigned long ioaddr,cr10_value;
+       
+       ioaddr = iobase + DCR10;
+       cr10_value = phy_addr;
+       cr10_value = (cr10_value<<5) + offset;
+       cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
+       outl(cr10_value,ioaddr);
+       udelay(1);
+}
+/*
+ *     Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+{
+       outl(phy_data , ioaddr);                        /* MII Clock Low */
+       udelay(1);
+       outl(phy_data  | MDCLKH, ioaddr);       /* MII Clock High */
+       udelay(1);
+       outl(phy_data , ioaddr);                        /* MII Clock Low */
+       udelay(1);
+}
+
+
+/*
+ *     Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+{
+       u16 phy_data;
+       
+       outl(0x50000 , ioaddr);
+       udelay(1);
+       phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+       outl(0x40000 , ioaddr);
+       udelay(1);
+
+       return phy_data;
+}
+
+
+static struct pci_device_id uli526x_pci_tbl[] = {
+       { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
+       { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
+
+
+static struct pci_driver uli526x_driver = {
+       .name           = "uli526x",
+       .id_table       = uli526x_pci_tbl,
+       .probe          = uli526x_init_one,
+       .remove         = __devexit_p(uli526x_remove_one),
+};
+
+MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
+MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM(mode, "i");
+MODULE_PARM(cr6set, "i");
+MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+/*     Description:
+ *     when user used insmod to add module, system invoked init_module()
+ *     to register the services.
+ */
+
+static int __init uli526x_init_module(void)
+{
+       int rc;
+
+       printk(version);
+       printed_version = 1;
+
+       ULI526X_DBUG(0, "init_module() ", debug);
+
+       if (debug)
+               uli526x_debug = debug;  /* set debug flag */
+       if (cr6set)
+               uli526x_cr6_user_set = cr6set;
+
+       switch(mode) {
+       case ULI526X_10MHF:
+       case ULI526X_100MHF:
+       case ULI526X_10MFD:
+       case ULI526X_100MFD:
+               uli526x_media_mode = mode;
+               break;
+       default:uli526x_media_mode = ULI526X_AUTO;
+               break;
+       }
+
+       rc = pci_module_init(&uli526x_driver);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+
+/*
+ *     Description:
+ *     when user used rmmod to delete module, system invoked clean_module()
+ *     to un-register all registered services.
+ */
+
+static void __exit uli526x_cleanup_module(void)
+{
+       ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+       pci_unregister_driver(&uli526x_driver);
+}
+
+module_init(uli526x_init_module);
+module_exit(uli526x_cleanup_module);
index 499a5325f67f75aca4ff64b6ae0f480e2e479344..d513c1634006bb1d6eb2c6b30f10a871edff164d 100644 (file)
 #define PCI_DEVICE_ID_ENE_1225         0x1225
 #define PCI_DEVICE_ID_ENE_1410         0x1410
 #define PCI_DEVICE_ID_ENE_1420         0x1420
+#define PCI_VENDOR_ID_CHELSIO          0x1425
 
 #define PCI_VENDOR_ID_SYBA             0x1592
 #define PCI_DEVICE_ID_SYBA_2P_EPP      0x0782