2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,5,6 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
116 * We suspect that on some hardware no TX done interrupts are generated.
117 * This means recovery from netif_stop_queue only happens if the hw timer
118 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
119 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
120 * If your hardware reliably generates tx done interrupts, then you can remove
121 * DEV_NEED_TIMERIRQ from the driver_data flags.
122 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
123 * superfluous timer interrupts from the nic.
125 #ifdef CONFIG_FORCEDETH_NAPI
126 #define DRIVERNAPI "-NAPI"
130 #define FORCEDETH_VERSION "0.59"
131 #define DRV_NAME "forcedeth"
133 #include <linux/module.h>
134 #include <linux/types.h>
135 #include <linux/pci.h>
136 #include <linux/interrupt.h>
137 #include <linux/netdevice.h>
138 #include <linux/etherdevice.h>
139 #include <linux/delay.h>
140 #include <linux/spinlock.h>
141 #include <linux/ethtool.h>
142 #include <linux/timer.h>
143 #include <linux/skbuff.h>
144 #include <linux/mii.h>
145 #include <linux/random.h>
146 #include <linux/init.h>
147 #include <linux/if_vlan.h>
148 #include <linux/dma-mapping.h>
152 #include <asm/uaccess.h>
153 #include <asm/system.h>
156 #define dprintk printk
158 #define dprintk(x...) do { } while (0)
166 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
167 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
168 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
169 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
170 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
171 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
172 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
173 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
174 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
175 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
176 #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
177 #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
178 #define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */
181 NvRegIrqStatus = 0x000,
182 #define NVREG_IRQSTAT_MIIEVENT 0x040
183 #define NVREG_IRQSTAT_MASK 0x81ff
184 NvRegIrqMask = 0x004,
185 #define NVREG_IRQ_RX_ERROR 0x0001
186 #define NVREG_IRQ_RX 0x0002
187 #define NVREG_IRQ_RX_NOBUF 0x0004
188 #define NVREG_IRQ_TX_ERR 0x0008
189 #define NVREG_IRQ_TX_OK 0x0010
190 #define NVREG_IRQ_TIMER 0x0020
191 #define NVREG_IRQ_LINK 0x0040
192 #define NVREG_IRQ_RX_FORCED 0x0080
193 #define NVREG_IRQ_TX_FORCED 0x0100
194 #define NVREG_IRQ_RECOVER_ERROR 0x8000
195 #define NVREG_IRQMASK_THROUGHPUT 0x00df
196 #define NVREG_IRQMASK_CPU 0x0040
197 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
198 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
199 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
201 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
202 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
203 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
205 NvRegUnknownSetupReg6 = 0x008,
206 #define NVREG_UNKSETUP6_VAL 3
209 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
212 NvRegPollingInterval = 0x00c,
213 #define NVREG_POLL_DEFAULT_THROUGHPUT 970
214 #define NVREG_POLL_DEFAULT_CPU 13
215 NvRegMSIMap0 = 0x020,
216 NvRegMSIMap1 = 0x024,
217 NvRegMSIIrqMask = 0x030,
218 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
220 #define NVREG_MISC1_PAUSE_TX 0x01
221 #define NVREG_MISC1_HD 0x02
222 #define NVREG_MISC1_FORCE 0x3b0f3c
224 NvRegMacReset = 0x3c,
225 #define NVREG_MAC_RESET_ASSERT 0x0F3
226 NvRegTransmitterControl = 0x084,
227 #define NVREG_XMITCTL_START 0x01
228 #define NVREG_XMITCTL_MGMT_ST 0x40000000
229 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
230 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
231 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
232 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
233 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
234 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
235 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
236 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
237 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
238 NvRegTransmitterStatus = 0x088,
239 #define NVREG_XMITSTAT_BUSY 0x01
241 NvRegPacketFilterFlags = 0x8c,
242 #define NVREG_PFF_PAUSE_RX 0x08
243 #define NVREG_PFF_ALWAYS 0x7F0000
244 #define NVREG_PFF_PROMISC 0x80
245 #define NVREG_PFF_MYADDR 0x20
246 #define NVREG_PFF_LOOPBACK 0x10
248 NvRegOffloadConfig = 0x90,
249 #define NVREG_OFFLOAD_HOMEPHY 0x601
250 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
251 NvRegReceiverControl = 0x094,
252 #define NVREG_RCVCTL_START 0x01
253 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
254 NvRegReceiverStatus = 0x98,
255 #define NVREG_RCVSTAT_BUSY 0x01
257 NvRegRandomSeed = 0x9c,
258 #define NVREG_RNDSEED_MASK 0x00ff
259 #define NVREG_RNDSEED_FORCE 0x7f00
260 #define NVREG_RNDSEED_FORCE2 0x2d00
261 #define NVREG_RNDSEED_FORCE3 0x7400
263 NvRegTxDeferral = 0xA0,
264 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
265 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
266 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
267 NvRegRxDeferral = 0xA4,
268 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
269 NvRegMacAddrA = 0xA8,
270 NvRegMacAddrB = 0xAC,
271 NvRegMulticastAddrA = 0xB0,
272 #define NVREG_MCASTADDRA_FORCE 0x01
273 NvRegMulticastAddrB = 0xB4,
274 NvRegMulticastMaskA = 0xB8,
275 NvRegMulticastMaskB = 0xBC,
277 NvRegPhyInterface = 0xC0,
278 #define PHY_RGMII 0x10000000
280 NvRegTxRingPhysAddr = 0x100,
281 NvRegRxRingPhysAddr = 0x104,
282 NvRegRingSizes = 0x108,
283 #define NVREG_RINGSZ_TXSHIFT 0
284 #define NVREG_RINGSZ_RXSHIFT 16
285 NvRegTransmitPoll = 0x10c,
286 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
287 NvRegLinkSpeed = 0x110,
288 #define NVREG_LINKSPEED_FORCE 0x10000
289 #define NVREG_LINKSPEED_10 1000
290 #define NVREG_LINKSPEED_100 100
291 #define NVREG_LINKSPEED_1000 50
292 #define NVREG_LINKSPEED_MASK (0xFFF)
293 NvRegUnknownSetupReg5 = 0x130,
294 #define NVREG_UNKSETUP5_BIT31 (1<<31)
295 NvRegTxWatermark = 0x13c,
296 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
297 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
298 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
299 NvRegTxRxControl = 0x144,
300 #define NVREG_TXRXCTL_KICK 0x0001
301 #define NVREG_TXRXCTL_BIT1 0x0002
302 #define NVREG_TXRXCTL_BIT2 0x0004
303 #define NVREG_TXRXCTL_IDLE 0x0008
304 #define NVREG_TXRXCTL_RESET 0x0010
305 #define NVREG_TXRXCTL_RXCHECK 0x0400
306 #define NVREG_TXRXCTL_DESC_1 0
307 #define NVREG_TXRXCTL_DESC_2 0x002100
308 #define NVREG_TXRXCTL_DESC_3 0xc02200
309 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
310 #define NVREG_TXRXCTL_VLANINS 0x00080
311 NvRegTxRingPhysAddrHigh = 0x148,
312 NvRegRxRingPhysAddrHigh = 0x14C,
313 NvRegTxPauseFrame = 0x170,
314 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
315 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
316 NvRegMIIStatus = 0x180,
317 #define NVREG_MIISTAT_ERROR 0x0001
318 #define NVREG_MIISTAT_LINKCHANGE 0x0008
319 #define NVREG_MIISTAT_MASK 0x000f
320 #define NVREG_MIISTAT_MASK2 0x000f
321 NvRegMIIMask = 0x184,
322 #define NVREG_MII_LINKCHANGE 0x0008
324 NvRegAdapterControl = 0x188,
325 #define NVREG_ADAPTCTL_START 0x02
326 #define NVREG_ADAPTCTL_LINKUP 0x04
327 #define NVREG_ADAPTCTL_PHYVALID 0x40000
328 #define NVREG_ADAPTCTL_RUNNING 0x100000
329 #define NVREG_ADAPTCTL_PHYSHIFT 24
330 NvRegMIISpeed = 0x18c,
331 #define NVREG_MIISPEED_BIT8 (1<<8)
332 #define NVREG_MIIDELAY 5
333 NvRegMIIControl = 0x190,
334 #define NVREG_MIICTL_INUSE 0x08000
335 #define NVREG_MIICTL_WRITE 0x00400
336 #define NVREG_MIICTL_ADDRSHIFT 5
337 NvRegMIIData = 0x194,
338 NvRegWakeUpFlags = 0x200,
339 #define NVREG_WAKEUPFLAGS_VAL 0x7770
340 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
341 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
342 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
343 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
344 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
345 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
346 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
347 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
348 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
349 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
351 NvRegPatternCRC = 0x204,
352 NvRegPatternMask = 0x208,
353 NvRegPowerCap = 0x268,
354 #define NVREG_POWERCAP_D3SUPP (1<<30)
355 #define NVREG_POWERCAP_D2SUPP (1<<26)
356 #define NVREG_POWERCAP_D1SUPP (1<<25)
357 NvRegPowerState = 0x26c,
358 #define NVREG_POWERSTATE_POWEREDUP 0x8000
359 #define NVREG_POWERSTATE_VALID 0x0100
360 #define NVREG_POWERSTATE_MASK 0x0003
361 #define NVREG_POWERSTATE_D0 0x0000
362 #define NVREG_POWERSTATE_D1 0x0001
363 #define NVREG_POWERSTATE_D2 0x0002
364 #define NVREG_POWERSTATE_D3 0x0003
366 NvRegTxZeroReXmt = 0x284,
367 NvRegTxOneReXmt = 0x288,
368 NvRegTxManyReXmt = 0x28c,
369 NvRegTxLateCol = 0x290,
370 NvRegTxUnderflow = 0x294,
371 NvRegTxLossCarrier = 0x298,
372 NvRegTxExcessDef = 0x29c,
373 NvRegTxRetryErr = 0x2a0,
374 NvRegRxFrameErr = 0x2a4,
375 NvRegRxExtraByte = 0x2a8,
376 NvRegRxLateCol = 0x2ac,
378 NvRegRxFrameTooLong = 0x2b4,
379 NvRegRxOverflow = 0x2b8,
380 NvRegRxFCSErr = 0x2bc,
381 NvRegRxFrameAlignErr = 0x2c0,
382 NvRegRxLenErr = 0x2c4,
383 NvRegRxUnicast = 0x2c8,
384 NvRegRxMulticast = 0x2cc,
385 NvRegRxBroadcast = 0x2d0,
387 NvRegTxFrame = 0x2d8,
389 NvRegTxPause = 0x2e0,
390 NvRegRxPause = 0x2e4,
391 NvRegRxDropFrame = 0x2e8,
392 NvRegVlanControl = 0x300,
393 #define NVREG_VLANCONTROL_ENABLE 0x2000
394 NvRegMSIXMap0 = 0x3e0,
395 NvRegMSIXMap1 = 0x3e4,
396 NvRegMSIXIrqStatus = 0x3f0,
398 NvRegPowerState2 = 0x600,
399 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
400 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
403 /* Big endian: should work, but is untested */
409 struct ring_desc_ex {
417 struct ring_desc* orig;
418 struct ring_desc_ex* ex;
421 #define FLAG_MASK_V1 0xffff0000
422 #define FLAG_MASK_V2 0xffffc000
423 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
424 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
426 #define NV_TX_LASTPACKET (1<<16)
427 #define NV_TX_RETRYERROR (1<<19)
428 #define NV_TX_FORCED_INTERRUPT (1<<24)
429 #define NV_TX_DEFERRED (1<<26)
430 #define NV_TX_CARRIERLOST (1<<27)
431 #define NV_TX_LATECOLLISION (1<<28)
432 #define NV_TX_UNDERFLOW (1<<29)
433 #define NV_TX_ERROR (1<<30)
434 #define NV_TX_VALID (1<<31)
436 #define NV_TX2_LASTPACKET (1<<29)
437 #define NV_TX2_RETRYERROR (1<<18)
438 #define NV_TX2_FORCED_INTERRUPT (1<<30)
439 #define NV_TX2_DEFERRED (1<<25)
440 #define NV_TX2_CARRIERLOST (1<<26)
441 #define NV_TX2_LATECOLLISION (1<<27)
442 #define NV_TX2_UNDERFLOW (1<<28)
443 /* error and valid are the same for both */
444 #define NV_TX2_ERROR (1<<30)
445 #define NV_TX2_VALID (1<<31)
446 #define NV_TX2_TSO (1<<28)
447 #define NV_TX2_TSO_SHIFT 14
448 #define NV_TX2_TSO_MAX_SHIFT 14
449 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
450 #define NV_TX2_CHECKSUM_L3 (1<<27)
451 #define NV_TX2_CHECKSUM_L4 (1<<26)
453 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
455 #define NV_RX_DESCRIPTORVALID (1<<16)
456 #define NV_RX_MISSEDFRAME (1<<17)
457 #define NV_RX_SUBSTRACT1 (1<<18)
458 #define NV_RX_ERROR1 (1<<23)
459 #define NV_RX_ERROR2 (1<<24)
460 #define NV_RX_ERROR3 (1<<25)
461 #define NV_RX_ERROR4 (1<<26)
462 #define NV_RX_CRCERR (1<<27)
463 #define NV_RX_OVERFLOW (1<<28)
464 #define NV_RX_FRAMINGERR (1<<29)
465 #define NV_RX_ERROR (1<<30)
466 #define NV_RX_AVAIL (1<<31)
468 #define NV_RX2_CHECKSUMMASK (0x1C000000)
469 #define NV_RX2_CHECKSUMOK1 (0x10000000)
470 #define NV_RX2_CHECKSUMOK2 (0x14000000)
471 #define NV_RX2_CHECKSUMOK3 (0x18000000)
472 #define NV_RX2_DESCRIPTORVALID (1<<29)
473 #define NV_RX2_SUBSTRACT1 (1<<25)
474 #define NV_RX2_ERROR1 (1<<18)
475 #define NV_RX2_ERROR2 (1<<19)
476 #define NV_RX2_ERROR3 (1<<20)
477 #define NV_RX2_ERROR4 (1<<21)
478 #define NV_RX2_CRCERR (1<<22)
479 #define NV_RX2_OVERFLOW (1<<23)
480 #define NV_RX2_FRAMINGERR (1<<24)
481 /* error and avail are the same for both */
482 #define NV_RX2_ERROR (1<<30)
483 #define NV_RX2_AVAIL (1<<31)
485 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
486 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
488 /* Miscelaneous hardware related defines: */
489 #define NV_PCI_REGSZ_VER1 0x270
490 #define NV_PCI_REGSZ_VER2 0x604
492 /* various timeout delays: all in usec */
493 #define NV_TXRX_RESET_DELAY 4
494 #define NV_TXSTOP_DELAY1 10
495 #define NV_TXSTOP_DELAY1MAX 500000
496 #define NV_TXSTOP_DELAY2 100
497 #define NV_RXSTOP_DELAY1 10
498 #define NV_RXSTOP_DELAY1MAX 500000
499 #define NV_RXSTOP_DELAY2 100
500 #define NV_SETUP5_DELAY 5
501 #define NV_SETUP5_DELAYMAX 50000
502 #define NV_POWERUP_DELAY 5
503 #define NV_POWERUP_DELAYMAX 5000
504 #define NV_MIIBUSY_DELAY 50
505 #define NV_MIIPHY_DELAY 10
506 #define NV_MIIPHY_DELAYMAX 10000
507 #define NV_MAC_RESET_DELAY 64
509 #define NV_WAKEUPPATTERNS 5
510 #define NV_WAKEUPMASKENTRIES 4
512 /* General driver defaults */
513 #define NV_WATCHDOG_TIMEO (5*HZ)
515 #define RX_RING_DEFAULT 128
516 #define TX_RING_DEFAULT 256
517 #define RX_RING_MIN 128
518 #define TX_RING_MIN 64
519 #define RING_MAX_DESC_VER_1 1024
520 #define RING_MAX_DESC_VER_2_3 16384
522 * Difference between the get and put pointers for the tx ring.
523 * This is used to throttle the amount of data outstanding in the
526 #define TX_LIMIT_DIFFERENCE 1
528 /* rx/tx mac addr + type + vlan + align + slack*/
529 #define NV_RX_HEADERS (64)
530 /* even more slack. */
531 #define NV_RX_ALLOC_PAD (64)
533 /* maximum mtu size */
534 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
535 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
537 #define OOM_REFILL (1+HZ/20)
538 #define POLL_WAIT (1+HZ/100)
539 #define LINK_TIMEOUT (3*HZ)
540 #define STATS_INTERVAL (10*HZ)
544 * The nic supports three different descriptor types:
545 * - DESC_VER_1: Original
546 * - DESC_VER_2: support for jumbo frames.
547 * - DESC_VER_3: 64-bit format.
554 #define PHY_OUI_MARVELL 0x5043
555 #define PHY_OUI_CICADA 0x03f1
556 #define PHYID1_OUI_MASK 0x03ff
557 #define PHYID1_OUI_SHFT 6
558 #define PHYID2_OUI_MASK 0xfc00
559 #define PHYID2_OUI_SHFT 10
560 #define PHYID2_MODEL_MASK 0x03f0
561 #define PHY_MODEL_MARVELL_E3016 0x220
562 #define PHY_MARVELL_E3016_INITMASK 0x0300
563 #define PHY_INIT1 0x0f000
564 #define PHY_INIT2 0x0e00
565 #define PHY_INIT3 0x01000
566 #define PHY_INIT4 0x0200
567 #define PHY_INIT5 0x0004
568 #define PHY_INIT6 0x02000
569 #define PHY_GIGABIT 0x0100
571 #define PHY_TIMEOUT 0x1
572 #define PHY_ERROR 0x2
576 #define PHY_HALF 0x100
578 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
579 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
580 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
581 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
582 #define NV_PAUSEFRAME_RX_REQ 0x0010
583 #define NV_PAUSEFRAME_TX_REQ 0x0020
584 #define NV_PAUSEFRAME_AUTONEG 0x0040
586 /* MSI/MSI-X defines */
587 #define NV_MSI_X_MAX_VECTORS 8
588 #define NV_MSI_X_VECTORS_MASK 0x000f
589 #define NV_MSI_CAPABLE 0x0010
590 #define NV_MSI_X_CAPABLE 0x0020
591 #define NV_MSI_ENABLED 0x0040
592 #define NV_MSI_X_ENABLED 0x0080
594 #define NV_MSI_X_VECTOR_ALL 0x0
595 #define NV_MSI_X_VECTOR_RX 0x0
596 #define NV_MSI_X_VECTOR_TX 0x1
597 #define NV_MSI_X_VECTOR_OTHER 0x2
600 struct nv_ethtool_str {
601 char name[ETH_GSTRING_LEN];
604 static const struct nv_ethtool_str nv_estats_str[] = {
609 { "tx_late_collision" },
610 { "tx_fifo_errors" },
611 { "tx_carrier_errors" },
612 { "tx_excess_deferral" },
613 { "tx_retry_error" },
617 { "rx_frame_error" },
619 { "rx_late_collision" },
621 { "rx_frame_too_long" },
622 { "rx_over_errors" },
624 { "rx_frame_align_error" },
625 { "rx_length_error" },
633 { "rx_errors_total" }
636 struct nv_ethtool_stats {
641 u64 tx_late_collision;
643 u64 tx_carrier_errors;
644 u64 tx_excess_deferral;
651 u64 rx_late_collision;
653 u64 rx_frame_too_long;
656 u64 rx_frame_align_error;
669 #define NV_TEST_COUNT_BASE 3
670 #define NV_TEST_COUNT_EXTENDED 4
672 static const struct nv_ethtool_str nv_etests_str[] = {
673 { "link (online/offline)" },
674 { "register (offline) " },
675 { "interrupt (offline) " },
676 { "loopback (offline) " }
679 struct register_test {
684 static const struct register_test nv_registers_test[] = {
685 { NvRegUnknownSetupReg6, 0x01 },
686 { NvRegMisc1, 0x03c },
687 { NvRegOffloadConfig, 0x03ff },
688 { NvRegMulticastAddrA, 0xffffffff },
689 { NvRegTxWatermark, 0x0ff },
690 { NvRegWakeUpFlags, 0x07777 },
697 unsigned int dma_len;
702 * All hardware access under dev->priv->lock, except the performance
704 * - rx is (pseudo-) lockless: it relies on the single-threading provided
705 * by the arch code for interrupts.
706 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
707 * needs dev->priv->lock :-(
708 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
711 /* in dev: base, irq */
716 * Locking: spin_lock(&np->lock); */
717 struct net_device_stats stats;
718 struct nv_ethtool_stats estats;
726 unsigned int phy_oui;
727 unsigned int phy_model;
732 /* General data: RO fields */
733 dma_addr_t ring_addr;
734 struct pci_dev *pci_dev;
747 /* rx specific fields.
748 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
750 union ring_type get_rx, put_rx, first_rx, last_rx;
751 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
752 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
753 struct nv_skb_map *rx_skb;
755 union ring_type rx_ring;
756 unsigned int rx_buf_sz;
757 unsigned int pkt_limit;
758 struct timer_list oom_kick;
759 struct timer_list nic_poll;
760 struct timer_list stats_poll;
764 /* media detection workaround.
765 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
768 unsigned long link_timeout;
770 * tx specific fields.
772 union ring_type get_tx, put_tx, first_tx, last_tx;
773 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
774 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
775 struct nv_skb_map *tx_skb;
777 union ring_type tx_ring;
784 struct vlan_group *vlangrp;
786 /* msi/msi-x fields */
788 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
795 * Maximum number of loops until we assume that a bit in the irq mask
796 * is stuck. Overridable with module param.
798 static int max_interrupt_work = 5;
801 * Optimization can be either throuput mode or cpu mode
803 * Throughput Mode: Every tx and rx packet will generate an interrupt.
804 * CPU Mode: Interrupts are controlled by a timer.
807 NV_OPTIMIZATION_MODE_THROUGHPUT,
808 NV_OPTIMIZATION_MODE_CPU
810 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
813 * Poll interval for timer irq
815 * This interval determines how frequent an interrupt is generated.
816 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
817 * Min = 0, and Max = 65535
819 static int poll_interval = -1;
828 static int msi = NV_MSI_INT_ENABLED;
834 NV_MSIX_INT_DISABLED,
837 static int msix = NV_MSIX_INT_ENABLED;
843 NV_DMA_64BIT_DISABLED,
846 static int dma_64bit = NV_DMA_64BIT_ENABLED;
848 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
850 return netdev_priv(dev);
853 static inline u8 __iomem *get_hwbase(struct net_device *dev)
855 return ((struct fe_priv *)netdev_priv(dev))->base;
858 static inline void pci_push(u8 __iomem *base)
860 /* force out pending posted writes */
864 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
866 return le32_to_cpu(prd->flaglen)
867 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
870 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
872 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
875 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
876 int delay, int delaymax, const char *msg)
878 u8 __iomem *base = get_hwbase(dev);
889 } while ((readl(base + offset) & mask) != target);
893 #define NV_SETUP_RX_RING 0x01
894 #define NV_SETUP_TX_RING 0x02
896 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
898 struct fe_priv *np = get_nvpriv(dev);
899 u8 __iomem *base = get_hwbase(dev);
901 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
902 if (rxtx_flags & NV_SETUP_RX_RING) {
903 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
905 if (rxtx_flags & NV_SETUP_TX_RING) {
906 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
909 if (rxtx_flags & NV_SETUP_RX_RING) {
910 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
911 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
913 if (rxtx_flags & NV_SETUP_TX_RING) {
914 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
915 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
920 static void free_rings(struct net_device *dev)
922 struct fe_priv *np = get_nvpriv(dev);
924 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
925 if (np->rx_ring.orig)
926 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
927 np->rx_ring.orig, np->ring_addr);
930 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
931 np->rx_ring.ex, np->ring_addr);
939 static int using_multi_irqs(struct net_device *dev)
941 struct fe_priv *np = get_nvpriv(dev);
943 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
944 ((np->msi_flags & NV_MSI_X_ENABLED) &&
945 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
951 static void nv_enable_irq(struct net_device *dev)
953 struct fe_priv *np = get_nvpriv(dev);
955 if (!using_multi_irqs(dev)) {
956 if (np->msi_flags & NV_MSI_X_ENABLED)
957 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
959 enable_irq(dev->irq);
961 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
962 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
963 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
967 static void nv_disable_irq(struct net_device *dev)
969 struct fe_priv *np = get_nvpriv(dev);
971 if (!using_multi_irqs(dev)) {
972 if (np->msi_flags & NV_MSI_X_ENABLED)
973 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
975 disable_irq(dev->irq);
977 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
978 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
979 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
983 /* In MSIX mode, a write to irqmask behaves as XOR */
984 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
986 u8 __iomem *base = get_hwbase(dev);
988 writel(mask, base + NvRegIrqMask);
991 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
993 struct fe_priv *np = get_nvpriv(dev);
994 u8 __iomem *base = get_hwbase(dev);
996 if (np->msi_flags & NV_MSI_X_ENABLED) {
997 writel(mask, base + NvRegIrqMask);
999 if (np->msi_flags & NV_MSI_ENABLED)
1000 writel(0, base + NvRegMSIIrqMask);
1001 writel(0, base + NvRegIrqMask);
1005 #define MII_READ (-1)
1006 /* mii_rw: read/write a register on the PHY.
1008 * Caller must guarantee serialization
1010 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1012 u8 __iomem *base = get_hwbase(dev);
1016 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1018 reg = readl(base + NvRegMIIControl);
1019 if (reg & NVREG_MIICTL_INUSE) {
1020 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1021 udelay(NV_MIIBUSY_DELAY);
1024 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1025 if (value != MII_READ) {
1026 writel(value, base + NvRegMIIData);
1027 reg |= NVREG_MIICTL_WRITE;
1029 writel(reg, base + NvRegMIIControl);
1031 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1032 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1033 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1034 dev->name, miireg, addr);
1036 } else if (value != MII_READ) {
1037 /* it was a write operation - fewer failures are detectable */
1038 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1039 dev->name, value, miireg, addr);
1041 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1042 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1043 dev->name, miireg, addr);
1046 retval = readl(base + NvRegMIIData);
1047 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1048 dev->name, miireg, addr, retval);
1054 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1056 struct fe_priv *np = netdev_priv(dev);
1058 unsigned int tries = 0;
1060 miicontrol = BMCR_RESET | bmcr_setup;
1061 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1065 /* wait for 500ms */
1068 /* must wait till reset is deasserted */
1069 while (miicontrol & BMCR_RESET) {
1071 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1072 /* FIXME: 100 tries seem excessive */
1079 static int phy_init(struct net_device *dev)
1081 struct fe_priv *np = get_nvpriv(dev);
1082 u8 __iomem *base = get_hwbase(dev);
1083 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1085 /* phy errata for E3016 phy */
1086 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1087 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1088 reg &= ~PHY_MARVELL_E3016_INITMASK;
1089 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1090 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1095 /* set advertise register */
1096 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1097 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1098 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1099 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1103 /* get phy interface type */
1104 phyinterface = readl(base + NvRegPhyInterface);
1106 /* see if gigabit phy */
1107 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1108 if (mii_status & PHY_GIGABIT) {
1109 np->gigabit = PHY_GIGABIT;
1110 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1111 mii_control_1000 &= ~ADVERTISE_1000HALF;
1112 if (phyinterface & PHY_RGMII)
1113 mii_control_1000 |= ADVERTISE_1000FULL;
1115 mii_control_1000 &= ~ADVERTISE_1000FULL;
1117 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1118 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1125 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1126 mii_control |= BMCR_ANENABLE;
1129 * (certain phys need bmcr to be setup with reset)
1131 if (phy_reset(dev, mii_control)) {
1132 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1136 /* phy vendor specific configuration */
1137 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1138 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1139 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
1140 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
1141 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1142 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1145 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1146 phy_reserved |= PHY_INIT5;
1147 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1148 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1152 if (np->phy_oui == PHY_OUI_CICADA) {
1153 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1154 phy_reserved |= PHY_INIT6;
1155 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1156 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1160 /* some phys clear out pause advertisment on reset, set it back */
1161 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1163 /* restart auto negotiation */
1164 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1165 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1166 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1173 static void nv_start_rx(struct net_device *dev)
1175 struct fe_priv *np = netdev_priv(dev);
1176 u8 __iomem *base = get_hwbase(dev);
1177 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1179 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1180 /* Already running? Stop it. */
1181 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1182 rx_ctrl &= ~NVREG_RCVCTL_START;
1183 writel(rx_ctrl, base + NvRegReceiverControl);
1186 writel(np->linkspeed, base + NvRegLinkSpeed);
1188 rx_ctrl |= NVREG_RCVCTL_START;
1190 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1191 writel(rx_ctrl, base + NvRegReceiverControl);
1192 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1193 dev->name, np->duplex, np->linkspeed);
1197 static void nv_stop_rx(struct net_device *dev)
1199 struct fe_priv *np = netdev_priv(dev);
1200 u8 __iomem *base = get_hwbase(dev);
1201 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1203 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1204 if (!np->mac_in_use)
1205 rx_ctrl &= ~NVREG_RCVCTL_START;
1207 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1208 writel(rx_ctrl, base + NvRegReceiverControl);
1209 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1210 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1211 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1213 udelay(NV_RXSTOP_DELAY2);
1214 if (!np->mac_in_use)
1215 writel(0, base + NvRegLinkSpeed);
1218 static void nv_start_tx(struct net_device *dev)
1220 struct fe_priv *np = netdev_priv(dev);
1221 u8 __iomem *base = get_hwbase(dev);
1222 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1224 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1225 tx_ctrl |= NVREG_XMITCTL_START;
1227 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1228 writel(tx_ctrl, base + NvRegTransmitterControl);
1232 static void nv_stop_tx(struct net_device *dev)
1234 struct fe_priv *np = netdev_priv(dev);
1235 u8 __iomem *base = get_hwbase(dev);
1236 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1238 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1239 if (!np->mac_in_use)
1240 tx_ctrl &= ~NVREG_XMITCTL_START;
1242 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1243 writel(tx_ctrl, base + NvRegTransmitterControl);
1244 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1245 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1246 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1248 udelay(NV_TXSTOP_DELAY2);
1249 if (!np->mac_in_use)
1250 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1251 base + NvRegTransmitPoll);
1254 static void nv_txrx_reset(struct net_device *dev)
1256 struct fe_priv *np = netdev_priv(dev);
1257 u8 __iomem *base = get_hwbase(dev);
1259 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1260 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1262 udelay(NV_TXRX_RESET_DELAY);
1263 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1267 static void nv_mac_reset(struct net_device *dev)
1269 struct fe_priv *np = netdev_priv(dev);
1270 u8 __iomem *base = get_hwbase(dev);
1272 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1273 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1275 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1277 udelay(NV_MAC_RESET_DELAY);
1278 writel(0, base + NvRegMacReset);
1280 udelay(NV_MAC_RESET_DELAY);
1281 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1286 * nv_get_stats: dev->get_stats function
1287 * Get latest stats value from the nic.
1288 * Called with read_lock(&dev_base_lock) held for read -
1289 * only synchronized against unregister_netdevice.
1291 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1293 struct fe_priv *np = netdev_priv(dev);
1295 /* It seems that the nic always generates interrupts and doesn't
1296 * accumulate errors internally. Thus the current values in np->stats
1297 * are already up to date.
1303 * nv_alloc_rx: fill rx ring entries.
1304 * Return 1 if the allocations for the skbs failed and the
1305 * rx engine is without Available descriptors
1307 static int nv_alloc_rx(struct net_device *dev)
1309 struct fe_priv *np = netdev_priv(dev);
1310 struct ring_desc* less_rx;
1312 less_rx = np->get_rx.orig;
1313 if (less_rx-- == np->first_rx.orig)
1314 less_rx = np->last_rx.orig;
1316 while (np->put_rx.orig != less_rx) {
1317 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1320 np->put_rx_ctx->skb = skb;
1321 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1322 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1323 np->put_rx_ctx->dma_len = skb->end-skb->data;
1324 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1326 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1327 if (np->put_rx.orig++ == np->last_rx.orig)
1328 np->put_rx.orig = np->first_rx.orig;
1329 if (np->put_rx_ctx++ == np->last_rx_ctx)
1330 np->put_rx_ctx = np->first_rx_ctx;
1338 static int nv_alloc_rx_optimized(struct net_device *dev)
1340 struct fe_priv *np = netdev_priv(dev);
1341 struct ring_desc_ex* less_rx;
1343 less_rx = np->get_rx.ex;
1344 if (less_rx-- == np->first_rx.ex)
1345 less_rx = np->last_rx.ex;
1347 while (np->put_rx.ex != less_rx) {
1348 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1351 np->put_rx_ctx->skb = skb;
1352 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1353 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1354 np->put_rx_ctx->dma_len = skb->end-skb->data;
1355 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1356 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1358 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1359 if (np->put_rx.ex++ == np->last_rx.ex)
1360 np->put_rx.ex = np->first_rx.ex;
1361 if (np->put_rx_ctx++ == np->last_rx_ctx)
1362 np->put_rx_ctx = np->first_rx_ctx;
1370 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1371 #ifdef CONFIG_FORCEDETH_NAPI
1372 static void nv_do_rx_refill(unsigned long data)
1374 struct net_device *dev = (struct net_device *) data;
1376 /* Just reschedule NAPI rx processing */
1377 netif_rx_schedule(dev);
1380 static void nv_do_rx_refill(unsigned long data)
1382 struct net_device *dev = (struct net_device *) data;
1383 struct fe_priv *np = netdev_priv(dev);
1386 if (!using_multi_irqs(dev)) {
1387 if (np->msi_flags & NV_MSI_X_ENABLED)
1388 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1390 disable_irq(dev->irq);
1392 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1394 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1395 retcode = nv_alloc_rx(dev);
1397 retcode = nv_alloc_rx_optimized(dev);
1399 spin_lock_irq(&np->lock);
1400 if (!np->in_shutdown)
1401 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1402 spin_unlock_irq(&np->lock);
1404 if (!using_multi_irqs(dev)) {
1405 if (np->msi_flags & NV_MSI_X_ENABLED)
1406 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1408 enable_irq(dev->irq);
1410 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1415 static void nv_init_rx(struct net_device *dev)
1417 struct fe_priv *np = netdev_priv(dev);
1419 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1420 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1421 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1423 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1424 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1425 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1427 for (i = 0; i < np->rx_ring_size; i++) {
1428 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1429 np->rx_ring.orig[i].flaglen = 0;
1430 np->rx_ring.orig[i].buf = 0;
1432 np->rx_ring.ex[i].flaglen = 0;
1433 np->rx_ring.ex[i].txvlan = 0;
1434 np->rx_ring.ex[i].bufhigh = 0;
1435 np->rx_ring.ex[i].buflow = 0;
1437 np->rx_skb[i].skb = NULL;
1438 np->rx_skb[i].dma = 0;
1442 static void nv_init_tx(struct net_device *dev)
1444 struct fe_priv *np = netdev_priv(dev);
1446 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1447 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1448 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1450 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1451 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1452 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1454 for (i = 0; i < np->tx_ring_size; i++) {
1455 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1456 np->tx_ring.orig[i].flaglen = 0;
1457 np->tx_ring.orig[i].buf = 0;
1459 np->tx_ring.ex[i].flaglen = 0;
1460 np->tx_ring.ex[i].txvlan = 0;
1461 np->tx_ring.ex[i].bufhigh = 0;
1462 np->tx_ring.ex[i].buflow = 0;
1464 np->tx_skb[i].skb = NULL;
1465 np->tx_skb[i].dma = 0;
1469 static int nv_init_ring(struct net_device *dev)
1471 struct fe_priv *np = netdev_priv(dev);
1475 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1476 return nv_alloc_rx(dev);
1478 return nv_alloc_rx_optimized(dev);
1481 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1483 struct fe_priv *np = netdev_priv(dev);
1486 pci_unmap_page(np->pci_dev, tx_skb->dma,
1492 dev_kfree_skb_any(tx_skb->skb);
1500 static void nv_drain_tx(struct net_device *dev)
1502 struct fe_priv *np = netdev_priv(dev);
1505 for (i = 0; i < np->tx_ring_size; i++) {
1506 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1507 np->tx_ring.orig[i].flaglen = 0;
1508 np->tx_ring.orig[i].buf = 0;
1510 np->tx_ring.ex[i].flaglen = 0;
1511 np->tx_ring.ex[i].txvlan = 0;
1512 np->tx_ring.ex[i].bufhigh = 0;
1513 np->tx_ring.ex[i].buflow = 0;
1515 if (nv_release_txskb(dev, &np->tx_skb[i]))
1516 np->stats.tx_dropped++;
1520 static void nv_drain_rx(struct net_device *dev)
1522 struct fe_priv *np = netdev_priv(dev);
1525 for (i = 0; i < np->rx_ring_size; i++) {
1526 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1527 np->rx_ring.orig[i].flaglen = 0;
1528 np->rx_ring.orig[i].buf = 0;
1530 np->rx_ring.ex[i].flaglen = 0;
1531 np->rx_ring.ex[i].txvlan = 0;
1532 np->rx_ring.ex[i].bufhigh = 0;
1533 np->rx_ring.ex[i].buflow = 0;
1536 if (np->rx_skb[i].skb) {
1537 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1538 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1539 PCI_DMA_FROMDEVICE);
1540 dev_kfree_skb(np->rx_skb[i].skb);
1541 np->rx_skb[i].skb = NULL;
1546 static void drain_ring(struct net_device *dev)
1552 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1554 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1558 * nv_start_xmit: dev->hard_start_xmit function
1559 * Called with netif_tx_lock held.
1561 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1563 struct fe_priv *np = netdev_priv(dev);
1565 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1566 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1570 u32 size = skb->len-skb->data_len;
1571 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1573 u32 tx_flags_vlan = 0;
1574 struct ring_desc* put_tx;
1575 struct ring_desc* start_tx;
1576 struct ring_desc* prev_tx;
1577 struct nv_skb_map* prev_tx_ctx;
1579 /* add fragments to entries count */
1580 for (i = 0; i < fragments; i++) {
1581 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1582 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1585 empty_slots = nv_get_empty_tx_slots(np);
1586 if ((empty_slots - np->tx_limit_stop) <= entries) {
1587 spin_lock_irq(&np->lock);
1588 netif_stop_queue(dev);
1589 spin_unlock_irq(&np->lock);
1590 return NETDEV_TX_BUSY;
1593 start_tx = put_tx = np->put_tx.orig;
1595 /* setup the header buffer */
1598 prev_tx_ctx = np->put_tx_ctx;
1599 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1600 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1602 np->put_tx_ctx->dma_len = bcnt;
1603 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1604 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1605 tx_flags = np->tx_flags;
1608 if (put_tx++ == np->last_tx.orig)
1609 put_tx = np->first_tx.orig;
1610 if (np->put_tx_ctx++ == np->last_tx_ctx)
1611 np->put_tx_ctx = np->first_tx_ctx;
1614 /* setup the fragments */
1615 for (i = 0; i < fragments; i++) {
1616 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1617 u32 size = frag->size;
1622 prev_tx_ctx = np->put_tx_ctx;
1623 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1624 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1626 np->put_tx_ctx->dma_len = bcnt;
1628 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1629 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1632 if (put_tx++ == np->last_tx.orig)
1633 put_tx = np->first_tx.orig;
1634 if (np->put_tx_ctx++ == np->last_tx_ctx)
1635 np->put_tx_ctx = np->first_tx_ctx;
1639 /* set last fragment flag */
1640 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1642 /* save skb in this slot's context area */
1643 prev_tx_ctx->skb = skb;
1645 if (skb_is_gso(skb))
1646 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1648 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1649 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1652 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1653 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1656 spin_lock_irq(&np->lock);
1659 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1660 np->put_tx.orig = put_tx;
1662 spin_unlock_irq(&np->lock);
1664 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1665 dev->name, entries, tx_flags_extra);
1668 for (j=0; j<64; j++) {
1670 dprintk("\n%03x:", j);
1671 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1676 dev->trans_start = jiffies;
1677 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1678 pci_push(get_hwbase(dev));
1679 return NETDEV_TX_OK;
1682 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1684 struct fe_priv *np = netdev_priv(dev);
1686 u32 tx_flags_extra = NV_TX2_LASTPACKET;
1687 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1691 u32 size = skb->len-skb->data_len;
1692 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1694 u32 tx_flags_vlan = 0;
1695 struct ring_desc_ex* put_tx;
1696 struct ring_desc_ex* start_tx;
1697 struct ring_desc_ex* prev_tx;
1698 struct nv_skb_map* prev_tx_ctx;
1700 /* add fragments to entries count */
1701 for (i = 0; i < fragments; i++) {
1702 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1703 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1706 empty_slots = nv_get_empty_tx_slots(np);
1707 if ((empty_slots - np->tx_limit_stop) <= entries) {
1708 spin_lock_irq(&np->lock);
1709 netif_stop_queue(dev);
1710 spin_unlock_irq(&np->lock);
1711 return NETDEV_TX_BUSY;
1714 start_tx = put_tx = np->put_tx.ex;
1716 /* setup the header buffer */
1719 prev_tx_ctx = np->put_tx_ctx;
1720 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1721 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1723 np->put_tx_ctx->dma_len = bcnt;
1724 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1725 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1726 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1727 tx_flags = np->tx_flags;
1730 if (put_tx++ == np->last_tx.ex)
1731 put_tx = np->first_tx.ex;
1732 if (np->put_tx_ctx++ == np->last_tx_ctx)
1733 np->put_tx_ctx = np->first_tx_ctx;
1736 /* setup the fragments */
1737 for (i = 0; i < fragments; i++) {
1738 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1739 u32 size = frag->size;
1744 prev_tx_ctx = np->put_tx_ctx;
1745 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1746 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1748 np->put_tx_ctx->dma_len = bcnt;
1750 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1751 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1752 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1755 if (put_tx++ == np->last_tx.ex)
1756 put_tx = np->first_tx.ex;
1757 if (np->put_tx_ctx++ == np->last_tx_ctx)
1758 np->put_tx_ctx = np->first_tx_ctx;
1762 /* set last fragment flag */
1763 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1765 /* save skb in this slot's context area */
1766 prev_tx_ctx->skb = skb;
1768 if (skb_is_gso(skb))
1769 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1771 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1772 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1775 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1776 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1779 spin_lock_irq(&np->lock);
1782 start_tx->txvlan = cpu_to_le32(tx_flags_vlan);
1783 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1784 np->put_tx.ex = put_tx;
1786 spin_unlock_irq(&np->lock);
1788 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1789 dev->name, entries, tx_flags_extra);
1792 for (j=0; j<64; j++) {
1794 dprintk("\n%03x:", j);
1795 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1800 dev->trans_start = jiffies;
1801 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1802 pci_push(get_hwbase(dev));
1803 return NETDEV_TX_OK;
1807 * nv_tx_done: check for completed packets, release the skbs.
1809 * Caller must own np->lock.
1811 static void nv_tx_done(struct net_device *dev)
1813 struct fe_priv *np = netdev_priv(dev);
1815 struct sk_buff *skb;
1817 while (np->get_tx.orig != np->put_tx.orig) {
1818 flags = le32_to_cpu(np->get_tx.orig->flaglen);
1820 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1822 if (flags & NV_TX_VALID)
1824 if (np->desc_ver == DESC_VER_1) {
1825 if (flags & NV_TX_LASTPACKET) {
1826 skb = np->get_tx_ctx->skb;
1827 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1828 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1829 if (flags & NV_TX_UNDERFLOW)
1830 np->stats.tx_fifo_errors++;
1831 if (flags & NV_TX_CARRIERLOST)
1832 np->stats.tx_carrier_errors++;
1833 np->stats.tx_errors++;
1835 np->stats.tx_packets++;
1836 np->stats.tx_bytes += skb->len;
1840 if (flags & NV_TX2_LASTPACKET) {
1841 skb = np->get_tx_ctx->skb;
1842 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1843 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1844 if (flags & NV_TX2_UNDERFLOW)
1845 np->stats.tx_fifo_errors++;
1846 if (flags & NV_TX2_CARRIERLOST)
1847 np->stats.tx_carrier_errors++;
1848 np->stats.tx_errors++;
1850 np->stats.tx_packets++;
1851 np->stats.tx_bytes += skb->len;
1855 nv_release_txskb(dev, np->get_tx_ctx);
1856 if (np->get_tx.orig++ == np->last_tx.orig)
1857 np->get_tx.orig = np->first_tx.orig;
1858 if (np->get_tx_ctx++ == np->last_tx_ctx)
1859 np->get_tx_ctx = np->first_tx_ctx;
1861 if (nv_get_empty_tx_slots(np) > np->tx_limit_start)
1862 netif_wake_queue(dev);
1865 static void nv_tx_done_optimized(struct net_device *dev)
1867 struct fe_priv *np = netdev_priv(dev);
1869 struct sk_buff *skb;
1871 while (np->get_tx.ex == np->put_tx.ex) {
1872 flags = le32_to_cpu(np->get_tx.ex->flaglen);
1874 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1876 if (flags & NV_TX_VALID)
1878 if (flags & NV_TX2_LASTPACKET) {
1879 skb = np->get_tx_ctx->skb;
1880 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1881 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1882 if (flags & NV_TX2_UNDERFLOW)
1883 np->stats.tx_fifo_errors++;
1884 if (flags & NV_TX2_CARRIERLOST)
1885 np->stats.tx_carrier_errors++;
1886 np->stats.tx_errors++;
1888 np->stats.tx_packets++;
1889 np->stats.tx_bytes += skb->len;
1892 nv_release_txskb(dev, np->get_tx_ctx);
1893 if (np->get_tx.ex++ == np->last_tx.ex)
1894 np->get_tx.ex = np->first_tx.ex;
1895 if (np->get_tx_ctx++ == np->last_tx_ctx)
1896 np->get_tx_ctx = np->first_tx_ctx;
1898 if (nv_get_empty_tx_slots(np) > np->tx_limit_start)
1899 netif_wake_queue(dev);
1903 * nv_tx_timeout: dev->tx_timeout function
1904 * Called with netif_tx_lock held.
1906 static void nv_tx_timeout(struct net_device *dev)
1908 struct fe_priv *np = netdev_priv(dev);
1909 u8 __iomem *base = get_hwbase(dev);
1912 if (np->msi_flags & NV_MSI_X_ENABLED)
1913 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1915 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1917 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1922 printk(KERN_INFO "%s: Ring at %lx\n",
1923 dev->name, (unsigned long)np->ring_addr);
1924 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1925 for (i=0;i<=np->register_size;i+= 32) {
1926 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1928 readl(base + i + 0), readl(base + i + 4),
1929 readl(base + i + 8), readl(base + i + 12),
1930 readl(base + i + 16), readl(base + i + 20),
1931 readl(base + i + 24), readl(base + i + 28));
1933 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1934 for (i=0;i<np->tx_ring_size;i+= 4) {
1935 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1936 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1938 le32_to_cpu(np->tx_ring.orig[i].buf),
1939 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1940 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1941 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1942 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1943 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1944 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1945 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1947 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1949 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1950 le32_to_cpu(np->tx_ring.ex[i].buflow),
1951 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1952 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1953 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1954 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1955 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1956 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1957 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1958 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1959 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1960 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1965 spin_lock_irq(&np->lock);
1967 /* 1) stop tx engine */
1970 /* 2) check that the packets were not sent already: */
1971 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1974 nv_tx_done_optimized(dev);
1976 /* 3) if there are dead entries: clear everything */
1977 if (np->get_tx_ctx != np->put_tx_ctx) {
1978 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1981 setup_hw_rings(dev, NV_SETUP_TX_RING);
1982 netif_wake_queue(dev);
1985 /* 4) restart tx engine */
1987 spin_unlock_irq(&np->lock);
1991 * Called when the nic notices a mismatch between the actual data len on the
1992 * wire and the len indicated in the 802 header
1994 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1996 int hdrlen; /* length of the 802 header */
1997 int protolen; /* length as stored in the proto field */
1999 /* 1) calculate len according to header */
2000 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2001 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2004 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2007 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2008 dev->name, datalen, protolen, hdrlen);
2009 if (protolen > ETH_DATA_LEN)
2010 return datalen; /* Value in proto field not a len, no checks possible */
2013 /* consistency checks: */
2014 if (datalen > ETH_ZLEN) {
2015 if (datalen >= protolen) {
2016 /* more data on wire than in 802 header, trim of
2019 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2020 dev->name, protolen);
2023 /* less data on wire than mentioned in header.
2024 * Discard the packet.
2026 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2031 /* short packet. Accept only if 802 values are also short */
2032 if (protolen > ETH_ZLEN) {
2033 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2037 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2038 dev->name, datalen);
2043 static int nv_rx_process(struct net_device *dev, int limit)
2045 struct fe_priv *np = netdev_priv(dev);
2050 for (count = 0; count < limit; ++count) {
2051 struct sk_buff *skb;
2054 if (np->get_rx.orig == np->put_rx.orig)
2055 break; /* we scanned the whole ring - do not continue */
2056 flags = le32_to_cpu(np->get_rx.orig->flaglen);
2057 len = nv_descr_getlength(np->get_rx.orig, np->desc_ver);
2059 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2062 if (flags & NV_RX_AVAIL)
2063 break; /* still owned by hardware, */
2066 * the packet is for us - immediately tear down the pci mapping.
2067 * TODO: check if a prefetch of the first cacheline improves
2070 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2071 np->get_rx_ctx->dma_len,
2072 PCI_DMA_FROMDEVICE);
2073 skb = np->get_rx_ctx->skb;
2074 np->get_rx_ctx->skb = NULL;
2078 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2079 for (j=0; j<64; j++) {
2081 dprintk("\n%03x:", j);
2082 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2086 /* look at what we actually got: */
2087 if (np->desc_ver == DESC_VER_1) {
2088 if (!(flags & NV_RX_DESCRIPTORVALID)) {
2093 if (flags & NV_RX_ERROR) {
2094 if (flags & NV_RX_MISSEDFRAME) {
2095 np->stats.rx_missed_errors++;
2096 np->stats.rx_errors++;
2100 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
2101 np->stats.rx_errors++;
2105 if (flags & NV_RX_CRCERR) {
2106 np->stats.rx_crc_errors++;
2107 np->stats.rx_errors++;
2111 if (flags & NV_RX_OVERFLOW) {
2112 np->stats.rx_over_errors++;
2113 np->stats.rx_errors++;
2117 if (flags & NV_RX_ERROR4) {
2118 len = nv_getlen(dev, skb->data, len);
2120 np->stats.rx_errors++;
2125 /* framing errors are soft errors. */
2126 if (flags & NV_RX_FRAMINGERR) {
2127 if (flags & NV_RX_SUBSTRACT1) {
2133 if (!(flags & NV_RX2_DESCRIPTORVALID)) {
2138 if (flags & NV_RX2_ERROR) {
2139 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
2140 np->stats.rx_errors++;
2144 if (flags & NV_RX2_CRCERR) {
2145 np->stats.rx_crc_errors++;
2146 np->stats.rx_errors++;
2150 if (flags & NV_RX2_OVERFLOW) {
2151 np->stats.rx_over_errors++;
2152 np->stats.rx_errors++;
2156 if (flags & NV_RX2_ERROR4) {
2157 len = nv_getlen(dev, skb->data, len);
2159 np->stats.rx_errors++;
2164 /* framing errors are soft errors */
2165 if (flags & NV_RX2_FRAMINGERR) {
2166 if (flags & NV_RX2_SUBSTRACT1) {
2172 flags &= NV_RX2_CHECKSUMMASK;
2173 if (flags == NV_RX2_CHECKSUMOK1 ||
2174 flags == NV_RX2_CHECKSUMOK2 ||
2175 flags == NV_RX2_CHECKSUMOK3) {
2176 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
2177 skb->ip_summed = CHECKSUM_UNNECESSARY;
2179 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
2183 /* got a valid packet - forward it to the network core */
2185 skb->protocol = eth_type_trans(skb, dev);
2186 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2187 dev->name, len, skb->protocol);
2188 #ifdef CONFIG_FORCEDETH_NAPI
2189 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2190 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2191 vlanflags & NV_RX3_VLAN_TAG_MASK);
2193 netif_receive_skb(skb);
2195 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2196 vlan_hwaccel_rx(skb, np->vlangrp,
2197 vlanflags & NV_RX3_VLAN_TAG_MASK);
2201 dev->last_rx = jiffies;
2202 np->stats.rx_packets++;
2203 np->stats.rx_bytes += len;
2205 if (np->get_rx.orig++ == np->last_rx.orig)
2206 np->get_rx.orig = np->first_rx.orig;
2207 if (np->get_rx_ctx++ == np->last_rx_ctx)
2208 np->get_rx_ctx = np->first_rx_ctx;
2214 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2216 struct fe_priv *np = netdev_priv(dev);
2221 for (count = 0; count < limit; ++count) {
2222 struct sk_buff *skb;
2225 if (np->get_rx.ex == np->put_rx.ex)
2226 break; /* we scanned the whole ring - do not continue */
2227 flags = le32_to_cpu(np->get_rx.ex->flaglen);
2228 len = nv_descr_getlength_ex(np->get_rx.ex, np->desc_ver);
2229 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2231 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2234 if (flags & NV_RX_AVAIL)
2235 break; /* still owned by hardware, */
2238 * the packet is for us - immediately tear down the pci mapping.
2239 * TODO: check if a prefetch of the first cacheline improves
2242 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2243 np->get_rx_ctx->dma_len,
2244 PCI_DMA_FROMDEVICE);
2245 skb = np->get_rx_ctx->skb;
2246 np->get_rx_ctx->skb = NULL;
2250 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2251 for (j=0; j<64; j++) {
2253 dprintk("\n%03x:", j);
2254 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2258 /* look at what we actually got: */
2259 if (!(flags & NV_RX2_DESCRIPTORVALID)) {
2264 if (flags & NV_RX2_ERROR) {
2265 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
2266 np->stats.rx_errors++;
2270 if (flags & NV_RX2_CRCERR) {
2271 np->stats.rx_crc_errors++;
2272 np->stats.rx_errors++;
2276 if (flags & NV_RX2_OVERFLOW) {
2277 np->stats.rx_over_errors++;
2278 np->stats.rx_errors++;
2282 if (flags & NV_RX2_ERROR4) {
2283 len = nv_getlen(dev, skb->data, len);
2285 np->stats.rx_errors++;
2290 /* framing errors are soft errors */
2291 if (flags & NV_RX2_FRAMINGERR) {
2292 if (flags & NV_RX2_SUBSTRACT1) {
2298 flags &= NV_RX2_CHECKSUMMASK;
2299 if (flags == NV_RX2_CHECKSUMOK1 ||
2300 flags == NV_RX2_CHECKSUMOK2 ||
2301 flags == NV_RX2_CHECKSUMOK3) {
2302 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
2303 skb->ip_summed = CHECKSUM_UNNECESSARY;
2305 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
2308 /* got a valid packet - forward it to the network core */
2310 skb->protocol = eth_type_trans(skb, dev);
2311 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2312 dev->name, len, skb->protocol);
2313 #ifdef CONFIG_FORCEDETH_NAPI
2314 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2315 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2316 vlanflags & NV_RX3_VLAN_TAG_MASK);
2318 netif_receive_skb(skb);
2320 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2321 vlan_hwaccel_rx(skb, np->vlangrp,
2322 vlanflags & NV_RX3_VLAN_TAG_MASK);
2326 dev->last_rx = jiffies;
2327 np->stats.rx_packets++;
2328 np->stats.rx_bytes += len;
2330 if (np->get_rx.ex++ == np->last_rx.ex)
2331 np->get_rx.ex = np->first_rx.ex;
2332 if (np->get_rx_ctx++ == np->last_rx_ctx)
2333 np->get_rx_ctx = np->first_rx_ctx;
2339 static void set_bufsize(struct net_device *dev)
2341 struct fe_priv *np = netdev_priv(dev);
2343 if (dev->mtu <= ETH_DATA_LEN)
2344 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2346 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2350 * nv_change_mtu: dev->change_mtu function
2351 * Called with dev_base_lock held for read.
2353 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2355 struct fe_priv *np = netdev_priv(dev);
2358 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2364 /* return early if the buffer sizes will not change */
2365 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2367 if (old_mtu == new_mtu)
2370 /* synchronized against open : rtnl_lock() held by caller */
2371 if (netif_running(dev)) {
2372 u8 __iomem *base = get_hwbase(dev);
2374 * It seems that the nic preloads valid ring entries into an
2375 * internal buffer. The procedure for flushing everything is
2376 * guessed, there is probably a simpler approach.
2377 * Changing the MTU is a rare event, it shouldn't matter.
2379 nv_disable_irq(dev);
2380 netif_tx_lock_bh(dev);
2381 spin_lock(&np->lock);
2386 /* drain rx queue */
2389 /* reinit driver view of the rx queue */
2391 if (nv_init_ring(dev)) {
2392 if (!np->in_shutdown)
2393 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2395 /* reinit nic view of the rx queue */
2396 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2397 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2398 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2399 base + NvRegRingSizes);
2401 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2404 /* restart rx engine */
2407 spin_unlock(&np->lock);
2408 netif_tx_unlock_bh(dev);
2414 static void nv_copy_mac_to_hw(struct net_device *dev)
2416 u8 __iomem *base = get_hwbase(dev);
2419 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2420 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2421 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2423 writel(mac[0], base + NvRegMacAddrA);
2424 writel(mac[1], base + NvRegMacAddrB);
2428 * nv_set_mac_address: dev->set_mac_address function
2429 * Called with rtnl_lock() held.
2431 static int nv_set_mac_address(struct net_device *dev, void *addr)
2433 struct fe_priv *np = netdev_priv(dev);
2434 struct sockaddr *macaddr = (struct sockaddr*)addr;
2436 if (!is_valid_ether_addr(macaddr->sa_data))
2437 return -EADDRNOTAVAIL;
2439 /* synchronized against open : rtnl_lock() held by caller */
2440 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2442 if (netif_running(dev)) {
2443 netif_tx_lock_bh(dev);
2444 spin_lock_irq(&np->lock);
2446 /* stop rx engine */
2449 /* set mac address */
2450 nv_copy_mac_to_hw(dev);
2452 /* restart rx engine */
2454 spin_unlock_irq(&np->lock);
2455 netif_tx_unlock_bh(dev);
2457 nv_copy_mac_to_hw(dev);
2463 * nv_set_multicast: dev->set_multicast function
2464 * Called with netif_tx_lock held.
2466 static void nv_set_multicast(struct net_device *dev)
2468 struct fe_priv *np = netdev_priv(dev);
2469 u8 __iomem *base = get_hwbase(dev);
2472 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2474 memset(addr, 0, sizeof(addr));
2475 memset(mask, 0, sizeof(mask));
2477 if (dev->flags & IFF_PROMISC) {
2478 pff |= NVREG_PFF_PROMISC;
2480 pff |= NVREG_PFF_MYADDR;
2482 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2486 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2487 if (dev->flags & IFF_ALLMULTI) {
2488 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2490 struct dev_mc_list *walk;
2492 walk = dev->mc_list;
2493 while (walk != NULL) {
2495 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2496 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2504 addr[0] = alwaysOn[0];
2505 addr[1] = alwaysOn[1];
2506 mask[0] = alwaysOn[0] | alwaysOff[0];
2507 mask[1] = alwaysOn[1] | alwaysOff[1];
2510 addr[0] |= NVREG_MCASTADDRA_FORCE;
2511 pff |= NVREG_PFF_ALWAYS;
2512 spin_lock_irq(&np->lock);
2514 writel(addr[0], base + NvRegMulticastAddrA);
2515 writel(addr[1], base + NvRegMulticastAddrB);
2516 writel(mask[0], base + NvRegMulticastMaskA);
2517 writel(mask[1], base + NvRegMulticastMaskB);
2518 writel(pff, base + NvRegPacketFilterFlags);
2519 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2522 spin_unlock_irq(&np->lock);
2525 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2527 struct fe_priv *np = netdev_priv(dev);
2528 u8 __iomem *base = get_hwbase(dev);
2530 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2532 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2533 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2534 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2535 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2536 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2538 writel(pff, base + NvRegPacketFilterFlags);
2541 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2542 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2543 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2544 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2545 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2546 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2548 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2549 writel(regmisc, base + NvRegMisc1);
2555 * nv_update_linkspeed: Setup the MAC according to the link partner
2556 * @dev: Network device to be configured
2558 * The function queries the PHY and checks if there is a link partner.
2559 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2560 * set to 10 MBit HD.
2562 * The function returns 0 if there is no link partner and 1 if there is
2563 * a good link partner.
2565 static int nv_update_linkspeed(struct net_device *dev)
2567 struct fe_priv *np = netdev_priv(dev);
2568 u8 __iomem *base = get_hwbase(dev);
2571 int adv_lpa, adv_pause, lpa_pause;
2572 int newls = np->linkspeed;
2573 int newdup = np->duplex;
2576 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2578 /* BMSR_LSTATUS is latched, read it twice:
2579 * we want the current value.
2581 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2582 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2584 if (!(mii_status & BMSR_LSTATUS)) {
2585 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2587 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2593 if (np->autoneg == 0) {
2594 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2595 dev->name, np->fixed_mode);
2596 if (np->fixed_mode & LPA_100FULL) {
2597 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2599 } else if (np->fixed_mode & LPA_100HALF) {
2600 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2602 } else if (np->fixed_mode & LPA_10FULL) {
2603 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2606 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2612 /* check auto negotiation is complete */
2613 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2614 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2615 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2618 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2622 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2623 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2624 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2625 dev->name, adv, lpa);
2628 if (np->gigabit == PHY_GIGABIT) {
2629 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2630 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2632 if ((control_1000 & ADVERTISE_1000FULL) &&
2633 (status_1000 & LPA_1000FULL)) {
2634 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2636 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2642 /* FIXME: handle parallel detection properly */
2643 adv_lpa = lpa & adv;
2644 if (adv_lpa & LPA_100FULL) {
2645 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2647 } else if (adv_lpa & LPA_100HALF) {
2648 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2650 } else if (adv_lpa & LPA_10FULL) {
2651 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2653 } else if (adv_lpa & LPA_10HALF) {
2654 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2657 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2658 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2663 if (np->duplex == newdup && np->linkspeed == newls)
2666 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2667 dev->name, np->linkspeed, np->duplex, newls, newdup);
2669 np->duplex = newdup;
2670 np->linkspeed = newls;
2672 if (np->gigabit == PHY_GIGABIT) {
2673 phyreg = readl(base + NvRegRandomSeed);
2674 phyreg &= ~(0x3FF00);
2675 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2676 phyreg |= NVREG_RNDSEED_FORCE3;
2677 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2678 phyreg |= NVREG_RNDSEED_FORCE2;
2679 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2680 phyreg |= NVREG_RNDSEED_FORCE;
2681 writel(phyreg, base + NvRegRandomSeed);
2684 phyreg = readl(base + NvRegPhyInterface);
2685 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2686 if (np->duplex == 0)
2688 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2690 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2692 writel(phyreg, base + NvRegPhyInterface);
2694 if (phyreg & PHY_RGMII) {
2695 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2696 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2698 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2700 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2702 writel(txreg, base + NvRegTxDeferral);
2704 if (np->desc_ver == DESC_VER_1) {
2705 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2707 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2708 txreg = NVREG_TX_WM_DESC2_3_1000;
2710 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2712 writel(txreg, base + NvRegTxWatermark);
2714 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2717 writel(np->linkspeed, base + NvRegLinkSpeed);
2721 /* setup pause frame */
2722 if (np->duplex != 0) {
2723 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2724 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2725 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2727 switch (adv_pause) {
2728 case ADVERTISE_PAUSE_CAP:
2729 if (lpa_pause & LPA_PAUSE_CAP) {
2730 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2731 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2732 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2735 case ADVERTISE_PAUSE_ASYM:
2736 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2738 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2741 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2742 if (lpa_pause & LPA_PAUSE_CAP)
2744 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2745 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2746 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2748 if (lpa_pause == LPA_PAUSE_ASYM)
2750 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2755 pause_flags = np->pause_flags;
2758 nv_update_pause(dev, pause_flags);
2763 static void nv_linkchange(struct net_device *dev)
2765 if (nv_update_linkspeed(dev)) {
2766 if (!netif_carrier_ok(dev)) {
2767 netif_carrier_on(dev);
2768 printk(KERN_INFO "%s: link up.\n", dev->name);
2772 if (netif_carrier_ok(dev)) {
2773 netif_carrier_off(dev);
2774 printk(KERN_INFO "%s: link down.\n", dev->name);
2780 static void nv_link_irq(struct net_device *dev)
2782 u8 __iomem *base = get_hwbase(dev);
2785 miistat = readl(base + NvRegMIIStatus);
2786 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2787 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2789 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2791 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2794 static irqreturn_t nv_nic_irq(int foo, void *data)
2796 struct net_device *dev = (struct net_device *) data;
2797 struct fe_priv *np = netdev_priv(dev);
2798 u8 __iomem *base = get_hwbase(dev);
2802 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2805 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2806 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2807 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2809 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2810 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2813 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2814 if (!(events & np->irqmask))
2817 spin_lock(&np->lock);
2819 spin_unlock(&np->lock);
2821 if (events & NVREG_IRQ_LINK) {
2822 spin_lock(&np->lock);
2824 spin_unlock(&np->lock);
2826 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2827 spin_lock(&np->lock);
2829 spin_unlock(&np->lock);
2830 np->link_timeout = jiffies + LINK_TIMEOUT;
2832 if (events & (NVREG_IRQ_TX_ERR)) {
2833 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2836 if (events & (NVREG_IRQ_UNKNOWN)) {
2837 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2840 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
2841 spin_lock(&np->lock);
2842 /* disable interrupts on the nic */
2843 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2844 writel(0, base + NvRegIrqMask);
2846 writel(np->irqmask, base + NvRegIrqMask);
2849 if (!np->in_shutdown) {
2850 np->nic_poll_irq = np->irqmask;
2851 np->recover_error = 1;
2852 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2854 spin_unlock(&np->lock);
2857 #ifdef CONFIG_FORCEDETH_NAPI
2858 if (events & NVREG_IRQ_RX_ALL) {
2859 netif_rx_schedule(dev);
2861 /* Disable furthur receive irq's */
2862 spin_lock(&np->lock);
2863 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2865 if (np->msi_flags & NV_MSI_X_ENABLED)
2866 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2868 writel(np->irqmask, base + NvRegIrqMask);
2869 spin_unlock(&np->lock);
2872 nv_rx_process(dev, dev->weight);
2873 if (nv_alloc_rx(dev)) {
2874 spin_lock(&np->lock);
2875 if (!np->in_shutdown)
2876 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2877 spin_unlock(&np->lock);
2880 if (i > max_interrupt_work) {
2881 spin_lock(&np->lock);
2882 /* disable interrupts on the nic */
2883 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2884 writel(0, base + NvRegIrqMask);
2886 writel(np->irqmask, base + NvRegIrqMask);
2889 if (!np->in_shutdown) {
2890 np->nic_poll_irq = np->irqmask;
2891 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2893 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2894 spin_unlock(&np->lock);
2899 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2901 return IRQ_RETVAL(i);
2904 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2906 struct net_device *dev = (struct net_device *) data;
2907 struct fe_priv *np = netdev_priv(dev);
2908 u8 __iomem *base = get_hwbase(dev);
2912 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2915 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2916 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2917 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2919 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2920 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2923 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2924 if (!(events & np->irqmask))
2927 spin_lock(&np->lock);
2928 nv_tx_done_optimized(dev);
2929 spin_unlock(&np->lock);
2931 if (events & NVREG_IRQ_LINK) {
2932 spin_lock(&np->lock);
2934 spin_unlock(&np->lock);
2936 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2937 spin_lock(&np->lock);
2939 spin_unlock(&np->lock);
2940 np->link_timeout = jiffies + LINK_TIMEOUT;
2942 if (events & (NVREG_IRQ_TX_ERR)) {
2943 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2946 if (events & (NVREG_IRQ_UNKNOWN)) {
2947 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2950 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
2951 spin_lock(&np->lock);
2952 /* disable interrupts on the nic */
2953 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2954 writel(0, base + NvRegIrqMask);
2956 writel(np->irqmask, base + NvRegIrqMask);
2959 if (!np->in_shutdown) {
2960 np->nic_poll_irq = np->irqmask;
2961 np->recover_error = 1;
2962 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2964 spin_unlock(&np->lock);
2968 #ifdef CONFIG_FORCEDETH_NAPI
2969 if (events & NVREG_IRQ_RX_ALL) {
2970 netif_rx_schedule(dev);
2972 /* Disable furthur receive irq's */
2973 spin_lock(&np->lock);
2974 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2976 if (np->msi_flags & NV_MSI_X_ENABLED)
2977 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2979 writel(np->irqmask, base + NvRegIrqMask);
2980 spin_unlock(&np->lock);
2983 nv_rx_process_optimized(dev, dev->weight);
2984 if (nv_alloc_rx_optimized(dev)) {
2985 spin_lock(&np->lock);
2986 if (!np->in_shutdown)
2987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2988 spin_unlock(&np->lock);
2991 if (i > max_interrupt_work) {
2992 spin_lock(&np->lock);
2993 /* disable interrupts on the nic */
2994 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2995 writel(0, base + NvRegIrqMask);
2997 writel(np->irqmask, base + NvRegIrqMask);
3000 if (!np->in_shutdown) {
3001 np->nic_poll_irq = np->irqmask;
3002 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3004 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3005 spin_unlock(&np->lock);
3010 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3012 return IRQ_RETVAL(i);
3015 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3017 struct net_device *dev = (struct net_device *) data;
3018 struct fe_priv *np = netdev_priv(dev);
3019 u8 __iomem *base = get_hwbase(dev);
3022 unsigned long flags;
3024 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3027 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3028 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3030 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3031 if (!(events & np->irqmask))
3034 spin_lock_irqsave(&np->lock, flags);
3035 nv_tx_done_optimized(dev);
3036 spin_unlock_irqrestore(&np->lock, flags);
3038 if (events & (NVREG_IRQ_TX_ERR)) {
3039 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3042 if (i > max_interrupt_work) {
3043 spin_lock_irqsave(&np->lock, flags);
3044 /* disable interrupts on the nic */
3045 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3048 if (!np->in_shutdown) {
3049 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3050 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3052 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3053 spin_unlock_irqrestore(&np->lock, flags);
3058 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3060 return IRQ_RETVAL(i);
3063 #ifdef CONFIG_FORCEDETH_NAPI
3064 static int nv_napi_poll(struct net_device *dev, int *budget)
3066 int pkts, limit = min(*budget, dev->quota);
3067 struct fe_priv *np = netdev_priv(dev);
3068 u8 __iomem *base = get_hwbase(dev);
3069 unsigned long flags;
3071 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3072 pkts = nv_rx_process(dev, limit);
3074 pkts = nv_rx_process_optimized(dev, limit);
3076 if (nv_alloc_rx(dev)) {
3077 spin_lock_irqsave(&np->lock, flags);
3078 if (!np->in_shutdown)
3079 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3080 spin_unlock_irqrestore(&np->lock, flags);
3084 /* all done, no more packets present */
3085 netif_rx_complete(dev);
3087 /* re-enable receive interrupts */
3088 spin_lock_irqsave(&np->lock, flags);
3090 np->irqmask |= NVREG_IRQ_RX_ALL;
3091 if (np->msi_flags & NV_MSI_X_ENABLED)
3092 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3094 writel(np->irqmask, base + NvRegIrqMask);
3096 spin_unlock_irqrestore(&np->lock, flags);
3099 /* used up our quantum, so reschedule */
3107 #ifdef CONFIG_FORCEDETH_NAPI
3108 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3110 struct net_device *dev = (struct net_device *) data;
3111 u8 __iomem *base = get_hwbase(dev);
3114 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3115 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3118 netif_rx_schedule(dev);
3119 /* disable receive interrupts on the nic */
3120 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3126 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3128 struct net_device *dev = (struct net_device *) data;
3129 struct fe_priv *np = netdev_priv(dev);
3130 u8 __iomem *base = get_hwbase(dev);
3133 unsigned long flags;
3135 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3138 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3139 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3141 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3142 if (!(events & np->irqmask))
3145 nv_rx_process_optimized(dev, dev->weight);
3146 if (nv_alloc_rx_optimized(dev)) {
3147 spin_lock_irqsave(&np->lock, flags);
3148 if (!np->in_shutdown)
3149 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3150 spin_unlock_irqrestore(&np->lock, flags);
3153 if (i > max_interrupt_work) {
3154 spin_lock_irqsave(&np->lock, flags);
3155 /* disable interrupts on the nic */
3156 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3159 if (!np->in_shutdown) {
3160 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3161 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3163 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3164 spin_unlock_irqrestore(&np->lock, flags);
3168 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3170 return IRQ_RETVAL(i);
3174 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3176 struct net_device *dev = (struct net_device *) data;
3177 struct fe_priv *np = netdev_priv(dev);
3178 u8 __iomem *base = get_hwbase(dev);
3181 unsigned long flags;
3183 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3186 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3187 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3189 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3190 if (!(events & np->irqmask))
3193 if (events & NVREG_IRQ_LINK) {
3194 spin_lock_irqsave(&np->lock, flags);
3196 spin_unlock_irqrestore(&np->lock, flags);
3198 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3199 spin_lock_irqsave(&np->lock, flags);
3201 spin_unlock_irqrestore(&np->lock, flags);
3202 np->link_timeout = jiffies + LINK_TIMEOUT;
3204 if (events & NVREG_IRQ_RECOVER_ERROR) {
3205 spin_lock_irq(&np->lock);
3206 /* disable interrupts on the nic */
3207 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3210 if (!np->in_shutdown) {
3211 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3212 np->recover_error = 1;
3213 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3215 spin_unlock_irq(&np->lock);
3218 if (events & (NVREG_IRQ_UNKNOWN)) {
3219 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3222 if (i > max_interrupt_work) {
3223 spin_lock_irqsave(&np->lock, flags);
3224 /* disable interrupts on the nic */
3225 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3228 if (!np->in_shutdown) {
3229 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3230 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3232 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3233 spin_unlock_irqrestore(&np->lock, flags);
3238 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3240 return IRQ_RETVAL(i);
3243 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3245 struct net_device *dev = (struct net_device *) data;
3246 struct fe_priv *np = netdev_priv(dev);
3247 u8 __iomem *base = get_hwbase(dev);
3250 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3252 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3253 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3254 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3256 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3257 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3260 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3261 if (!(events & NVREG_IRQ_TIMER))
3262 return IRQ_RETVAL(0);
3264 spin_lock(&np->lock);
3266 spin_unlock(&np->lock);
3268 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3270 return IRQ_RETVAL(1);
3273 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3275 u8 __iomem *base = get_hwbase(dev);
3279 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3280 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3281 * the remaining 8 interrupts.
3283 for (i = 0; i < 8; i++) {
3284 if ((irqmask >> i) & 0x1) {
3285 msixmap |= vector << (i << 2);
3288 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3291 for (i = 0; i < 8; i++) {
3292 if ((irqmask >> (i + 8)) & 0x1) {
3293 msixmap |= vector << (i << 2);
3296 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3299 static int nv_request_irq(struct net_device *dev, int intr_test)
3301 struct fe_priv *np = get_nvpriv(dev);
3302 u8 __iomem *base = get_hwbase(dev);
3305 irqreturn_t (*handler)(int foo, void *data);
3308 handler = nv_nic_irq_test;
3310 if (np->desc_ver == DESC_VER_3)
3311 handler = nv_nic_irq_optimized;
3313 handler = nv_nic_irq;
3316 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3317 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3318 np->msi_x_entry[i].entry = i;
3320 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3321 np->msi_flags |= NV_MSI_X_ENABLED;
3322 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3323 /* Request irq for rx handling */
3324 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3325 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3326 pci_disable_msix(np->pci_dev);
3327 np->msi_flags &= ~NV_MSI_X_ENABLED;
3330 /* Request irq for tx handling */
3331 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3332 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3333 pci_disable_msix(np->pci_dev);
3334 np->msi_flags &= ~NV_MSI_X_ENABLED;
3337 /* Request irq for link and timer handling */
3338 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3339 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3340 pci_disable_msix(np->pci_dev);
3341 np->msi_flags &= ~NV_MSI_X_ENABLED;
3344 /* map interrupts to their respective vector */
3345 writel(0, base + NvRegMSIXMap0);
3346 writel(0, base + NvRegMSIXMap1);
3347 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3348 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3349 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3351 /* Request irq for all interrupts */
3352 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3353 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3354 pci_disable_msix(np->pci_dev);
3355 np->msi_flags &= ~NV_MSI_X_ENABLED;
3359 /* map interrupts to vector 0 */
3360 writel(0, base + NvRegMSIXMap0);
3361 writel(0, base + NvRegMSIXMap1);
3365 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3366 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3367 np->msi_flags |= NV_MSI_ENABLED;
3368 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3369 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3370 pci_disable_msi(np->pci_dev);
3371 np->msi_flags &= ~NV_MSI_ENABLED;
3375 /* map interrupts to vector 0 */
3376 writel(0, base + NvRegMSIMap0);
3377 writel(0, base + NvRegMSIMap1);
3378 /* enable msi vector 0 */
3379 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3383 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3390 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3392 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3397 static void nv_free_irq(struct net_device *dev)
3399 struct fe_priv *np = get_nvpriv(dev);
3402 if (np->msi_flags & NV_MSI_X_ENABLED) {
3403 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3404 free_irq(np->msi_x_entry[i].vector, dev);
3406 pci_disable_msix(np->pci_dev);
3407 np->msi_flags &= ~NV_MSI_X_ENABLED;
3409 free_irq(np->pci_dev->irq, dev);
3410 if (np->msi_flags & NV_MSI_ENABLED) {
3411 pci_disable_msi(np->pci_dev);
3412 np->msi_flags &= ~NV_MSI_ENABLED;
3417 static void nv_do_nic_poll(unsigned long data)
3419 struct net_device *dev = (struct net_device *) data;
3420 struct fe_priv *np = netdev_priv(dev);
3421 u8 __iomem *base = get_hwbase(dev);
3425 * First disable irq(s) and then
3426 * reenable interrupts on the nic, we have to do this before calling
3427 * nv_nic_irq because that may decide to do otherwise
3430 if (!using_multi_irqs(dev)) {
3431 if (np->msi_flags & NV_MSI_X_ENABLED)
3432 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3434 disable_irq_lockdep(dev->irq);
3437 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3438 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3439 mask |= NVREG_IRQ_RX_ALL;
3441 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3442 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3443 mask |= NVREG_IRQ_TX_ALL;
3445 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3446 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3447 mask |= NVREG_IRQ_OTHER;
3450 np->nic_poll_irq = 0;
3452 if (np->recover_error) {
3453 np->recover_error = 0;
3454 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3455 if (netif_running(dev)) {
3456 netif_tx_lock_bh(dev);
3457 spin_lock(&np->lock);
3462 /* drain rx queue */
3465 /* reinit driver view of the rx queue */
3467 if (nv_init_ring(dev)) {
3468 if (!np->in_shutdown)
3469 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3471 /* reinit nic view of the rx queue */
3472 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3473 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3474 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3475 base + NvRegRingSizes);
3477 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3480 /* restart rx engine */
3483 spin_unlock(&np->lock);
3484 netif_tx_unlock_bh(dev);
3488 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3490 writel(mask, base + NvRegIrqMask);
3493 if (!using_multi_irqs(dev)) {
3495 if (np->msi_flags & NV_MSI_X_ENABLED)
3496 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3498 enable_irq_lockdep(dev->irq);
3500 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3501 nv_nic_irq_rx(0, dev);
3502 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3504 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3505 nv_nic_irq_tx(0, dev);
3506 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3508 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3509 nv_nic_irq_other(0, dev);
3510 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3515 #ifdef CONFIG_NET_POLL_CONTROLLER
3516 static void nv_poll_controller(struct net_device *dev)
3518 nv_do_nic_poll((unsigned long) dev);
3522 static void nv_do_stats_poll(unsigned long data)
3524 struct net_device *dev = (struct net_device *) data;
3525 struct fe_priv *np = netdev_priv(dev);
3526 u8 __iomem *base = get_hwbase(dev);
3528 np->estats.tx_bytes += readl(base + NvRegTxCnt);
3529 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
3530 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
3531 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
3532 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
3533 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
3534 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
3535 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
3536 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
3537 np->estats.tx_deferral += readl(base + NvRegTxDef);
3538 np->estats.tx_packets += readl(base + NvRegTxFrame);
3539 np->estats.tx_pause += readl(base + NvRegTxPause);
3540 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
3541 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
3542 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
3543 np->estats.rx_runt += readl(base + NvRegRxRunt);
3544 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
3545 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
3546 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
3547 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
3548 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
3549 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
3550 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
3551 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
3552 np->estats.rx_bytes += readl(base + NvRegRxCnt);
3553 np->estats.rx_pause += readl(base + NvRegRxPause);
3554 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
3555 np->estats.rx_packets =
3556 np->estats.rx_unicast +
3557 np->estats.rx_multicast +
3558 np->estats.rx_broadcast;
3559 np->estats.rx_errors_total =
3560 np->estats.rx_crc_errors +
3561 np->estats.rx_over_errors +
3562 np->estats.rx_frame_error +
3563 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
3564 np->estats.rx_late_collision +
3565 np->estats.rx_runt +
3566 np->estats.rx_frame_too_long;
3568 if (!np->in_shutdown)
3569 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3572 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3574 struct fe_priv *np = netdev_priv(dev);
3575 strcpy(info->driver, "forcedeth");
3576 strcpy(info->version, FORCEDETH_VERSION);
3577 strcpy(info->bus_info, pci_name(np->pci_dev));
3580 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3582 struct fe_priv *np = netdev_priv(dev);
3583 wolinfo->supported = WAKE_MAGIC;
3585 spin_lock_irq(&np->lock);
3587 wolinfo->wolopts = WAKE_MAGIC;
3588 spin_unlock_irq(&np->lock);
3591 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3593 struct fe_priv *np = netdev_priv(dev);
3594 u8 __iomem *base = get_hwbase(dev);
3597 if (wolinfo->wolopts == 0) {
3599 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3601 flags = NVREG_WAKEUPFLAGS_ENABLE;
3603 if (netif_running(dev)) {
3604 spin_lock_irq(&np->lock);
3605 writel(flags, base + NvRegWakeUpFlags);
3606 spin_unlock_irq(&np->lock);
3611 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3613 struct fe_priv *np = netdev_priv(dev);
3616 spin_lock_irq(&np->lock);
3617 ecmd->port = PORT_MII;
3618 if (!netif_running(dev)) {
3619 /* We do not track link speed / duplex setting if the
3620 * interface is disabled. Force a link check */
3621 if (nv_update_linkspeed(dev)) {
3622 if (!netif_carrier_ok(dev))
3623 netif_carrier_on(dev);
3625 if (netif_carrier_ok(dev))
3626 netif_carrier_off(dev);
3630 if (netif_carrier_ok(dev)) {
3631 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3632 case NVREG_LINKSPEED_10:
3633 ecmd->speed = SPEED_10;
3635 case NVREG_LINKSPEED_100:
3636 ecmd->speed = SPEED_100;
3638 case NVREG_LINKSPEED_1000:
3639 ecmd->speed = SPEED_1000;
3642 ecmd->duplex = DUPLEX_HALF;
3644 ecmd->duplex = DUPLEX_FULL;
3650 ecmd->autoneg = np->autoneg;
3652 ecmd->advertising = ADVERTISED_MII;
3654 ecmd->advertising |= ADVERTISED_Autoneg;
3655 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3656 if (adv & ADVERTISE_10HALF)
3657 ecmd->advertising |= ADVERTISED_10baseT_Half;
3658 if (adv & ADVERTISE_10FULL)
3659 ecmd->advertising |= ADVERTISED_10baseT_Full;
3660 if (adv & ADVERTISE_100HALF)
3661 ecmd->advertising |= ADVERTISED_100baseT_Half;
3662 if (adv & ADVERTISE_100FULL)
3663 ecmd->advertising |= ADVERTISED_100baseT_Full;
3664 if (np->gigabit == PHY_GIGABIT) {
3665 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3666 if (adv & ADVERTISE_1000FULL)
3667 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3670 ecmd->supported = (SUPPORTED_Autoneg |
3671 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3672 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3674 if (np->gigabit == PHY_GIGABIT)
3675 ecmd->supported |= SUPPORTED_1000baseT_Full;
3677 ecmd->phy_address = np->phyaddr;
3678 ecmd->transceiver = XCVR_EXTERNAL;
3680 /* ignore maxtxpkt, maxrxpkt for now */
3681 spin_unlock_irq(&np->lock);
3685 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3687 struct fe_priv *np = netdev_priv(dev);
3689 if (ecmd->port != PORT_MII)
3691 if (ecmd->transceiver != XCVR_EXTERNAL)
3693 if (ecmd->phy_address != np->phyaddr) {
3694 /* TODO: support switching between multiple phys. Should be
3695 * trivial, but not enabled due to lack of test hardware. */
3698 if (ecmd->autoneg == AUTONEG_ENABLE) {
3701 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3702 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3703 if (np->gigabit == PHY_GIGABIT)
3704 mask |= ADVERTISED_1000baseT_Full;
3706 if ((ecmd->advertising & mask) == 0)
3709 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3710 /* Note: autonegotiation disable, speed 1000 intentionally
3711 * forbidden - noone should need that. */
3713 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3715 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3721 netif_carrier_off(dev);
3722 if (netif_running(dev)) {
3723 nv_disable_irq(dev);
3724 netif_tx_lock_bh(dev);
3725 spin_lock(&np->lock);
3729 spin_unlock(&np->lock);
3730 netif_tx_unlock_bh(dev);
3733 if (ecmd->autoneg == AUTONEG_ENABLE) {
3738 /* advertise only what has been requested */
3739 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3740 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3741 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3742 adv |= ADVERTISE_10HALF;
3743 if (ecmd->advertising & ADVERTISED_10baseT_Full)
3744 adv |= ADVERTISE_10FULL;
3745 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3746 adv |= ADVERTISE_100HALF;
3747 if (ecmd->advertising & ADVERTISED_100baseT_Full)
3748 adv |= ADVERTISE_100FULL;
3749 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3750 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3751 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3752 adv |= ADVERTISE_PAUSE_ASYM;
3753 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3755 if (np->gigabit == PHY_GIGABIT) {
3756 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3757 adv &= ~ADVERTISE_1000FULL;
3758 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3759 adv |= ADVERTISE_1000FULL;
3760 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3763 if (netif_running(dev))
3764 printk(KERN_INFO "%s: link down.\n", dev->name);
3765 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3766 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3767 bmcr |= BMCR_ANENABLE;
3768 /* reset the phy in order for settings to stick,
3769 * and cause autoneg to start */
3770 if (phy_reset(dev, bmcr)) {
3771 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3775 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3776 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3783 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3784 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3785 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3786 adv |= ADVERTISE_10HALF;
3787 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3788 adv |= ADVERTISE_10FULL;
3789 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3790 adv |= ADVERTISE_100HALF;
3791 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3792 adv |= ADVERTISE_100FULL;
3793 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3794 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3795 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3796 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3798 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3799 adv |= ADVERTISE_PAUSE_ASYM;
3800 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3802 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3803 np->fixed_mode = adv;
3805 if (np->gigabit == PHY_GIGABIT) {
3806 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3807 adv &= ~ADVERTISE_1000FULL;
3808 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3811 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3812 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3813 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3814 bmcr |= BMCR_FULLDPLX;
3815 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3816 bmcr |= BMCR_SPEED100;
3817 if (np->phy_oui == PHY_OUI_MARVELL) {
3818 /* reset the phy in order for forced mode settings to stick */
3819 if (phy_reset(dev, bmcr)) {
3820 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3824 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3825 if (netif_running(dev)) {
3826 /* Wait a bit and then reconfigure the nic. */
3833 if (netif_running(dev)) {
3842 #define FORCEDETH_REGS_VER 1
3844 static int nv_get_regs_len(struct net_device *dev)
3846 struct fe_priv *np = netdev_priv(dev);
3847 return np->register_size;
3850 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
3852 struct fe_priv *np = netdev_priv(dev);
3853 u8 __iomem *base = get_hwbase(dev);
3857 regs->version = FORCEDETH_REGS_VER;
3858 spin_lock_irq(&np->lock);
3859 for (i = 0;i <= np->register_size/sizeof(u32); i++)
3860 rbuf[i] = readl(base + i*sizeof(u32));
3861 spin_unlock_irq(&np->lock);
3864 static int nv_nway_reset(struct net_device *dev)
3866 struct fe_priv *np = netdev_priv(dev);
3872 netif_carrier_off(dev);
3873 if (netif_running(dev)) {
3874 nv_disable_irq(dev);
3875 netif_tx_lock_bh(dev);
3876 spin_lock(&np->lock);
3880 spin_unlock(&np->lock);
3881 netif_tx_unlock_bh(dev);
3882 printk(KERN_INFO "%s: link down.\n", dev->name);
3885 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3886 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3887 bmcr |= BMCR_ANENABLE;
3888 /* reset the phy in order for settings to stick*/
3889 if (phy_reset(dev, bmcr)) {
3890 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3894 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3895 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3898 if (netif_running(dev)) {
3911 static int nv_set_tso(struct net_device *dev, u32 value)
3913 struct fe_priv *np = netdev_priv(dev);
3915 if ((np->driver_data & DEV_HAS_CHECKSUM))
3916 return ethtool_op_set_tso(dev, value);
3921 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3923 struct fe_priv *np = netdev_priv(dev);
3925 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3926 ring->rx_mini_max_pending = 0;
3927 ring->rx_jumbo_max_pending = 0;
3928 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3930 ring->rx_pending = np->rx_ring_size;
3931 ring->rx_mini_pending = 0;
3932 ring->rx_jumbo_pending = 0;
3933 ring->tx_pending = np->tx_ring_size;
3936 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3938 struct fe_priv *np = netdev_priv(dev);
3939 u8 __iomem *base = get_hwbase(dev);
3940 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3941 dma_addr_t ring_addr;
3943 if (ring->rx_pending < RX_RING_MIN ||
3944 ring->tx_pending < TX_RING_MIN ||
3945 ring->rx_mini_pending != 0 ||
3946 ring->rx_jumbo_pending != 0 ||
3947 (np->desc_ver == DESC_VER_1 &&
3948 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
3949 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
3950 (np->desc_ver != DESC_VER_1 &&
3951 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
3952 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
3956 /* allocate new rings */
3957 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3958 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3959 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3962 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3963 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3966 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3967 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3968 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3969 /* fall back to old rings */
3970 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3972 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3973 rxtx_ring, ring_addr);
3976 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3977 rxtx_ring, ring_addr);
3986 if (netif_running(dev)) {
3987 nv_disable_irq(dev);
3988 netif_tx_lock_bh(dev);
3989 spin_lock(&np->lock);
4001 /* set new values */
4002 np->rx_ring_size = ring->rx_pending;
4003 np->tx_ring_size = ring->tx_pending;
4004 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
4005 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
4006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4007 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4008 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4010 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4011 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4013 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4014 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4015 np->ring_addr = ring_addr;
4017 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4018 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4020 if (netif_running(dev)) {
4021 /* reinit driver view of the queues */
4023 if (nv_init_ring(dev)) {
4024 if (!np->in_shutdown)
4025 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4028 /* reinit nic view of the queues */
4029 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4030 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4031 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4032 base + NvRegRingSizes);
4034 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4037 /* restart engines */
4040 spin_unlock(&np->lock);
4041 netif_tx_unlock_bh(dev);
4049 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4051 struct fe_priv *np = netdev_priv(dev);
4053 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4054 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4055 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4058 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4060 struct fe_priv *np = netdev_priv(dev);
4063 if ((!np->autoneg && np->duplex == 0) ||
4064 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4065 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4069 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4070 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4074 netif_carrier_off(dev);
4075 if (netif_running(dev)) {
4076 nv_disable_irq(dev);
4077 netif_tx_lock_bh(dev);
4078 spin_lock(&np->lock);
4082 spin_unlock(&np->lock);
4083 netif_tx_unlock_bh(dev);
4086 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4087 if (pause->rx_pause)
4088 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4089 if (pause->tx_pause)
4090 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4092 if (np->autoneg && pause->autoneg) {
4093 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4095 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4096 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4097 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4098 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4099 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4100 adv |= ADVERTISE_PAUSE_ASYM;
4101 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4103 if (netif_running(dev))
4104 printk(KERN_INFO "%s: link down.\n", dev->name);
4105 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4106 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4107 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4109 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4110 if (pause->rx_pause)
4111 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4112 if (pause->tx_pause)
4113 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4115 if (!netif_running(dev))
4116 nv_update_linkspeed(dev);
4118 nv_update_pause(dev, np->pause_flags);
4121 if (netif_running(dev)) {
4129 static u32 nv_get_rx_csum(struct net_device *dev)
4131 struct fe_priv *np = netdev_priv(dev);
4132 return (np->rx_csum) != 0;
4135 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4137 struct fe_priv *np = netdev_priv(dev);
4138 u8 __iomem *base = get_hwbase(dev);
4141 if (np->driver_data & DEV_HAS_CHECKSUM) {
4144 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4147 /* vlan is dependent on rx checksum offload */
4148 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4149 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4151 if (netif_running(dev)) {
4152 spin_lock_irq(&np->lock);
4153 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4154 spin_unlock_irq(&np->lock);
4163 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4165 struct fe_priv *np = netdev_priv(dev);
4167 if (np->driver_data & DEV_HAS_CHECKSUM)
4168 return ethtool_op_set_tx_hw_csum(dev, data);
4173 static int nv_set_sg(struct net_device *dev, u32 data)
4175 struct fe_priv *np = netdev_priv(dev);
4177 if (np->driver_data & DEV_HAS_CHECKSUM)
4178 return ethtool_op_set_sg(dev, data);
4183 static int nv_get_stats_count(struct net_device *dev)
4185 struct fe_priv *np = netdev_priv(dev);
4187 if (np->driver_data & DEV_HAS_STATISTICS)
4188 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
4193 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4195 struct fe_priv *np = netdev_priv(dev);
4198 nv_do_stats_poll((unsigned long)dev);
4200 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
4203 static int nv_self_test_count(struct net_device *dev)
4205 struct fe_priv *np = netdev_priv(dev);
4207 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4208 return NV_TEST_COUNT_EXTENDED;
4210 return NV_TEST_COUNT_BASE;
4213 static int nv_link_test(struct net_device *dev)
4215 struct fe_priv *np = netdev_priv(dev);
4218 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4219 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4221 /* check phy link status */
4222 if (!(mii_status & BMSR_LSTATUS))
4228 static int nv_register_test(struct net_device *dev)
4230 u8 __iomem *base = get_hwbase(dev);
4232 u32 orig_read, new_read;
4235 orig_read = readl(base + nv_registers_test[i].reg);
4237 /* xor with mask to toggle bits */
4238 orig_read ^= nv_registers_test[i].mask;
4240 writel(orig_read, base + nv_registers_test[i].reg);
4242 new_read = readl(base + nv_registers_test[i].reg);
4244 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4247 /* restore original value */
4248 orig_read ^= nv_registers_test[i].mask;
4249 writel(orig_read, base + nv_registers_test[i].reg);
4251 } while (nv_registers_test[++i].reg != 0);
4256 static int nv_interrupt_test(struct net_device *dev)
4258 struct fe_priv *np = netdev_priv(dev);
4259 u8 __iomem *base = get_hwbase(dev);
4262 u32 save_msi_flags, save_poll_interval = 0;
4264 if (netif_running(dev)) {
4265 /* free current irq */
4267 save_poll_interval = readl(base+NvRegPollingInterval);
4270 /* flag to test interrupt handler */
4273 /* setup test irq */
4274 save_msi_flags = np->msi_flags;
4275 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4276 np->msi_flags |= 0x001; /* setup 1 vector */
4277 if (nv_request_irq(dev, 1))
4280 /* setup timer interrupt */
4281 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4282 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4284 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4286 /* wait for at least one interrupt */
4289 spin_lock_irq(&np->lock);
4291 /* flag should be set within ISR */
4292 testcnt = np->intr_test;
4296 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4297 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4298 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4300 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4302 spin_unlock_irq(&np->lock);
4306 np->msi_flags = save_msi_flags;
4308 if (netif_running(dev)) {
4309 writel(save_poll_interval, base + NvRegPollingInterval);
4310 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4311 /* restore original irq */
4312 if (nv_request_irq(dev, 0))
4319 static int nv_loopback_test(struct net_device *dev)
4321 struct fe_priv *np = netdev_priv(dev);
4322 u8 __iomem *base = get_hwbase(dev);
4323 struct sk_buff *tx_skb, *rx_skb;
4324 dma_addr_t test_dma_addr;
4325 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4327 int len, i, pkt_len;
4329 u32 filter_flags = 0;
4330 u32 misc1_flags = 0;
4333 if (netif_running(dev)) {
4334 nv_disable_irq(dev);
4335 filter_flags = readl(base + NvRegPacketFilterFlags);
4336 misc1_flags = readl(base + NvRegMisc1);
4341 /* reinit driver view of the rx queue */
4345 /* setup hardware for loopback */
4346 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4347 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4349 /* reinit nic view of the rx queue */
4350 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4351 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4352 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4353 base + NvRegRingSizes);
4356 /* restart rx engine */
4360 /* setup packet for tx */
4361 pkt_len = ETH_DATA_LEN;
4362 tx_skb = dev_alloc_skb(pkt_len);
4364 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4365 " of %s\n", dev->name);
4369 pkt_data = skb_put(tx_skb, pkt_len);
4370 for (i = 0; i < pkt_len; i++)
4371 pkt_data[i] = (u8)(i & 0xff);
4372 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4373 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
4375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4376 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4377 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4379 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
4380 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
4381 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4383 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4384 pci_push(get_hwbase(dev));
4388 /* check for rx of the packet */
4389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4390 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4391 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4394 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4395 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4398 if (flags & NV_RX_AVAIL) {
4400 } else if (np->desc_ver == DESC_VER_1) {
4401 if (flags & NV_RX_ERROR)
4404 if (flags & NV_RX2_ERROR) {
4410 if (len != pkt_len) {
4412 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4413 dev->name, len, pkt_len);
4415 rx_skb = np->rx_skb[0].skb;
4416 for (i = 0; i < pkt_len; i++) {
4417 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4419 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4426 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4429 pci_unmap_page(np->pci_dev, test_dma_addr,
4430 tx_skb->end-tx_skb->data,
4432 dev_kfree_skb_any(tx_skb);
4438 /* drain rx queue */
4442 if (netif_running(dev)) {
4443 writel(misc1_flags, base + NvRegMisc1);
4444 writel(filter_flags, base + NvRegPacketFilterFlags);
4451 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4453 struct fe_priv *np = netdev_priv(dev);
4454 u8 __iomem *base = get_hwbase(dev);
4456 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
4458 if (!nv_link_test(dev)) {
4459 test->flags |= ETH_TEST_FL_FAILED;
4463 if (test->flags & ETH_TEST_FL_OFFLINE) {
4464 if (netif_running(dev)) {
4465 netif_stop_queue(dev);
4466 netif_poll_disable(dev);
4467 netif_tx_lock_bh(dev);
4468 spin_lock_irq(&np->lock);
4469 nv_disable_hw_interrupts(dev, np->irqmask);
4470 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4471 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4473 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4479 /* drain rx queue */
4482 spin_unlock_irq(&np->lock);
4483 netif_tx_unlock_bh(dev);
4486 if (!nv_register_test(dev)) {
4487 test->flags |= ETH_TEST_FL_FAILED;
4491 result = nv_interrupt_test(dev);
4493 test->flags |= ETH_TEST_FL_FAILED;
4501 if (!nv_loopback_test(dev)) {
4502 test->flags |= ETH_TEST_FL_FAILED;
4506 if (netif_running(dev)) {
4507 /* reinit driver view of the rx queue */
4509 if (nv_init_ring(dev)) {
4510 if (!np->in_shutdown)
4511 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4513 /* reinit nic view of the rx queue */
4514 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4515 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4516 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4517 base + NvRegRingSizes);
4519 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4521 /* restart rx engine */
4524 netif_start_queue(dev);
4525 netif_poll_enable(dev);
4526 nv_enable_hw_interrupts(dev, np->irqmask);
4531 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4533 switch (stringset) {
4535 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
4538 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
4543 static const struct ethtool_ops ops = {
4544 .get_drvinfo = nv_get_drvinfo,
4545 .get_link = ethtool_op_get_link,
4546 .get_wol = nv_get_wol,
4547 .set_wol = nv_set_wol,
4548 .get_settings = nv_get_settings,
4549 .set_settings = nv_set_settings,
4550 .get_regs_len = nv_get_regs_len,
4551 .get_regs = nv_get_regs,
4552 .nway_reset = nv_nway_reset,
4553 .get_perm_addr = ethtool_op_get_perm_addr,
4554 .get_tso = ethtool_op_get_tso,
4555 .set_tso = nv_set_tso,
4556 .get_ringparam = nv_get_ringparam,
4557 .set_ringparam = nv_set_ringparam,
4558 .get_pauseparam = nv_get_pauseparam,
4559 .set_pauseparam = nv_set_pauseparam,
4560 .get_rx_csum = nv_get_rx_csum,
4561 .set_rx_csum = nv_set_rx_csum,
4562 .get_tx_csum = ethtool_op_get_tx_csum,
4563 .set_tx_csum = nv_set_tx_csum,
4564 .get_sg = ethtool_op_get_sg,
4565 .set_sg = nv_set_sg,
4566 .get_strings = nv_get_strings,
4567 .get_stats_count = nv_get_stats_count,
4568 .get_ethtool_stats = nv_get_ethtool_stats,
4569 .self_test_count = nv_self_test_count,
4570 .self_test = nv_self_test,
4573 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4575 struct fe_priv *np = get_nvpriv(dev);
4577 spin_lock_irq(&np->lock);
4579 /* save vlan group */
4583 /* enable vlan on MAC */
4584 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4586 /* disable vlan on MAC */
4587 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4588 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4591 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4593 spin_unlock_irq(&np->lock);
4596 static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
4601 /* The mgmt unit and driver use a semaphore to access the phy during init */
4602 static int nv_mgmt_acquire_sema(struct net_device *dev)
4604 u8 __iomem *base = get_hwbase(dev);
4606 u32 tx_ctrl, mgmt_sema;
4608 for (i = 0; i < 10; i++) {
4609 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4610 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4615 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4618 for (i = 0; i < 2; i++) {
4619 tx_ctrl = readl(base + NvRegTransmitterControl);
4620 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4621 writel(tx_ctrl, base + NvRegTransmitterControl);
4623 /* verify that semaphore was acquired */
4624 tx_ctrl = readl(base + NvRegTransmitterControl);
4625 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4626 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4635 static int nv_open(struct net_device *dev)
4637 struct fe_priv *np = netdev_priv(dev);
4638 u8 __iomem *base = get_hwbase(dev);
4642 dprintk(KERN_DEBUG "nv_open: begin\n");
4644 /* erase previous misconfiguration */
4645 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4647 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4648 writel(0, base + NvRegMulticastAddrB);
4649 writel(0, base + NvRegMulticastMaskA);
4650 writel(0, base + NvRegMulticastMaskB);
4651 writel(0, base + NvRegPacketFilterFlags);
4653 writel(0, base + NvRegTransmitterControl);
4654 writel(0, base + NvRegReceiverControl);
4656 writel(0, base + NvRegAdapterControl);
4658 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4659 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4661 /* initialize descriptor rings */
4663 oom = nv_init_ring(dev);
4665 writel(0, base + NvRegLinkSpeed);
4666 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4668 writel(0, base + NvRegUnknownSetupReg6);
4670 np->in_shutdown = 0;
4673 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4674 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4675 base + NvRegRingSizes);
4677 writel(np->linkspeed, base + NvRegLinkSpeed);
4678 if (np->desc_ver == DESC_VER_1)
4679 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4681 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
4682 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4683 writel(np->vlanctl_bits, base + NvRegVlanControl);
4685 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
4686 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4687 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4688 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4690 writel(0, base + NvRegMIIMask);
4691 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4692 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4694 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4695 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4696 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
4697 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4699 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4700 get_random_bytes(&i, sizeof(i));
4701 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
4702 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4703 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4704 if (poll_interval == -1) {
4705 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4706 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4708 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4711 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
4712 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4713 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4714 base + NvRegAdapterControl);
4715 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
4716 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
4718 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
4720 i = readl(base + NvRegPowerState);
4721 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4722 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4726 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4728 nv_disable_hw_interrupts(dev, np->irqmask);
4730 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4731 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4734 if (nv_request_irq(dev, 0)) {
4738 /* ask for interrupts */
4739 nv_enable_hw_interrupts(dev, np->irqmask);
4741 spin_lock_irq(&np->lock);
4742 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4743 writel(0, base + NvRegMulticastAddrB);
4744 writel(0, base + NvRegMulticastMaskA);
4745 writel(0, base + NvRegMulticastMaskB);
4746 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4747 /* One manual link speed update: Interrupts are enabled, future link
4748 * speed changes cause interrupts and are handled by nv_link_irq().
4752 miistat = readl(base + NvRegMIIStatus);
4753 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4754 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4756 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4759 ret = nv_update_linkspeed(dev);
4762 netif_start_queue(dev);
4763 netif_poll_enable(dev);
4766 netif_carrier_on(dev);
4768 printk("%s: no link during initialization.\n", dev->name);
4769 netif_carrier_off(dev);
4772 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4774 /* start statistics timer */
4775 if (np->driver_data & DEV_HAS_STATISTICS)
4776 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4778 spin_unlock_irq(&np->lock);
4786 static int nv_close(struct net_device *dev)
4788 struct fe_priv *np = netdev_priv(dev);
4791 spin_lock_irq(&np->lock);
4792 np->in_shutdown = 1;
4793 spin_unlock_irq(&np->lock);
4794 netif_poll_disable(dev);
4795 synchronize_irq(dev->irq);
4797 del_timer_sync(&np->oom_kick);
4798 del_timer_sync(&np->nic_poll);
4799 del_timer_sync(&np->stats_poll);
4801 netif_stop_queue(dev);
4802 spin_lock_irq(&np->lock);
4807 /* disable interrupts on the nic or we will lock up */
4808 base = get_hwbase(dev);
4809 nv_disable_hw_interrupts(dev, np->irqmask);
4811 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4813 spin_unlock_irq(&np->lock);
4822 /* FIXME: power down nic */
4827 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4829 struct net_device *dev;
4834 u32 powerstate, txreg;
4835 u32 phystate_orig = 0, phystate;
4836 int phyinitialized = 0;
4838 dev = alloc_etherdev(sizeof(struct fe_priv));
4843 np = netdev_priv(dev);
4844 np->pci_dev = pci_dev;
4845 spin_lock_init(&np->lock);
4846 SET_MODULE_OWNER(dev);
4847 SET_NETDEV_DEV(dev, &pci_dev->dev);
4849 init_timer(&np->oom_kick);
4850 np->oom_kick.data = (unsigned long) dev;
4851 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
4852 init_timer(&np->nic_poll);
4853 np->nic_poll.data = (unsigned long) dev;
4854 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
4855 init_timer(&np->stats_poll);
4856 np->stats_poll.data = (unsigned long) dev;
4857 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
4859 err = pci_enable_device(pci_dev);
4861 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
4862 err, pci_name(pci_dev));
4866 pci_set_master(pci_dev);
4868 err = pci_request_regions(pci_dev, DRV_NAME);
4872 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
4873 np->register_size = NV_PCI_REGSZ_VER2;
4875 np->register_size = NV_PCI_REGSZ_VER1;
4879 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4880 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
4881 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
4882 pci_resource_len(pci_dev, i),
4883 pci_resource_flags(pci_dev, i));
4884 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
4885 pci_resource_len(pci_dev, i) >= np->register_size) {
4886 addr = pci_resource_start(pci_dev, i);
4890 if (i == DEVICE_COUNT_RESOURCE) {
4891 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
4896 /* copy of driver data */
4897 np->driver_data = id->driver_data;
4899 /* handle different descriptor versions */
4900 if (id->driver_data & DEV_HAS_HIGH_DMA) {
4901 /* packet format 3: supports 40-bit addressing */
4902 np->desc_ver = DESC_VER_3;
4903 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
4905 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
4906 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
4909 dev->features |= NETIF_F_HIGHDMA;
4910 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
4912 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
4913 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
4917 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
4918 /* packet format 2: supports jumbo frames */
4919 np->desc_ver = DESC_VER_2;
4920 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
4922 /* original packet format */
4923 np->desc_ver = DESC_VER_1;
4924 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
4927 np->pkt_limit = NV_PKTLIMIT_1;
4928 if (id->driver_data & DEV_HAS_LARGEDESC)
4929 np->pkt_limit = NV_PKTLIMIT_2;
4931 if (id->driver_data & DEV_HAS_CHECKSUM) {
4933 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4934 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4935 dev->features |= NETIF_F_TSO;
4938 np->vlanctl_bits = 0;
4939 if (id->driver_data & DEV_HAS_VLAN) {
4940 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
4941 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
4942 dev->vlan_rx_register = nv_vlan_rx_register;
4943 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
4947 if ((id->driver_data & DEV_HAS_MSI) && msi) {
4948 np->msi_flags |= NV_MSI_CAPABLE;
4950 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
4951 np->msi_flags |= NV_MSI_X_CAPABLE;
4954 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
4955 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
4956 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
4961 np->base = ioremap(addr, np->register_size);
4964 dev->base_addr = (unsigned long)np->base;
4966 dev->irq = pci_dev->irq;
4968 np->rx_ring_size = RX_RING_DEFAULT;
4969 np->tx_ring_size = TX_RING_DEFAULT;
4970 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
4971 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
4973 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4974 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
4975 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
4977 if (!np->rx_ring.orig)
4979 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4981 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
4982 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
4984 if (!np->rx_ring.ex)
4986 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4988 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4989 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4990 if (!np->rx_skb || !np->tx_skb)
4992 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4993 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4995 dev->open = nv_open;
4996 dev->stop = nv_close;
4997 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
4998 dev->hard_start_xmit = nv_start_xmit;
5000 dev->hard_start_xmit = nv_start_xmit_optimized;
5001 dev->get_stats = nv_get_stats;
5002 dev->change_mtu = nv_change_mtu;
5003 dev->set_mac_address = nv_set_mac_address;
5004 dev->set_multicast_list = nv_set_multicast;
5005 #ifdef CONFIG_NET_POLL_CONTROLLER
5006 dev->poll_controller = nv_poll_controller;
5009 #ifdef CONFIG_FORCEDETH_NAPI
5010 dev->poll = nv_napi_poll;
5012 SET_ETHTOOL_OPS(dev, &ops);
5013 dev->tx_timeout = nv_tx_timeout;
5014 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5016 pci_set_drvdata(pci_dev, dev);
5018 /* read the mac address */
5019 base = get_hwbase(dev);
5020 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5021 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5023 /* check the workaround bit for correct mac address order */
5024 txreg = readl(base + NvRegTransmitPoll);
5025 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5026 /* mac address is already in correct order */
5027 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5028 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5029 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5030 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5031 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5032 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5034 /* need to reverse mac address to correct order */
5035 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5036 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5037 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5038 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5039 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5040 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5041 /* set permanent address to be correct aswell */
5042 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5043 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5044 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5045 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5047 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5049 if (!is_valid_ether_addr(dev->perm_addr)) {
5051 * Bad mac address. At least one bios sets the mac address
5052 * to 01:23:45:67:89:ab
5054 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
5056 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5057 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5058 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
5059 dev->dev_addr[0] = 0x00;
5060 dev->dev_addr[1] = 0x00;
5061 dev->dev_addr[2] = 0x6c;
5062 get_random_bytes(&dev->dev_addr[3], 3);
5065 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
5066 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5067 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5069 /* set mac address */
5070 nv_copy_mac_to_hw(dev);
5073 writel(0, base + NvRegWakeUpFlags);
5076 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5078 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
5080 /* take phy and nic out of low power mode */
5081 powerstate = readl(base + NvRegPowerState2);
5082 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5083 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5084 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5085 revision_id >= 0xA3)
5086 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5087 writel(powerstate, base + NvRegPowerState2);
5090 if (np->desc_ver == DESC_VER_1) {
5091 np->tx_flags = NV_TX_VALID;
5093 np->tx_flags = NV_TX2_VALID;
5095 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5096 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5097 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5098 np->msi_flags |= 0x0003;
5100 np->irqmask = NVREG_IRQMASK_CPU;
5101 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5102 np->msi_flags |= 0x0001;
5105 if (id->driver_data & DEV_NEED_TIMERIRQ)
5106 np->irqmask |= NVREG_IRQ_TIMER;
5107 if (id->driver_data & DEV_NEED_LINKTIMER) {
5108 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5109 np->need_linktimer = 1;
5110 np->link_timeout = jiffies + LINK_TIMEOUT;
5112 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5113 np->need_linktimer = 0;
5116 /* clear phy state and temporarily halt phy interrupts */
5117 writel(0, base + NvRegMIIMask);
5118 phystate = readl(base + NvRegAdapterControl);
5119 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5121 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5122 writel(phystate, base + NvRegAdapterControl);
5124 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5126 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5127 /* management unit running on the mac? */
5128 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5129 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5130 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5131 for (i = 0; i < 5000; i++) {
5133 if (nv_mgmt_acquire_sema(dev)) {
5134 /* management unit setup the phy already? */
5135 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5136 NVREG_XMITCTL_SYNC_PHY_INIT) {
5137 /* phy is inited by mgmt unit */
5139 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5141 /* we need to init the phy */
5149 /* find a suitable phy */
5150 for (i = 1; i <= 32; i++) {
5152 int phyaddr = i & 0x1F;
5154 spin_lock_irq(&np->lock);
5155 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5156 spin_unlock_irq(&np->lock);
5157 if (id1 < 0 || id1 == 0xffff)
5159 spin_lock_irq(&np->lock);
5160 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5161 spin_unlock_irq(&np->lock);
5162 if (id2 < 0 || id2 == 0xffff)
5165 np->phy_model = id2 & PHYID2_MODEL_MASK;
5166 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5167 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5168 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5169 pci_name(pci_dev), id1, id2, phyaddr);
5170 np->phyaddr = phyaddr;
5171 np->phy_oui = id1 | id2;
5175 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
5180 if (!phyinitialized) {
5184 /* see if it is a gigabit phy */
5185 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5186 if (mii_status & PHY_GIGABIT) {
5187 np->gigabit = PHY_GIGABIT;
5191 /* set default link speed settings */
5192 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5196 err = register_netdev(dev);
5198 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
5201 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5202 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5209 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5210 pci_set_drvdata(pci_dev, NULL);
5214 iounmap(get_hwbase(dev));
5216 pci_release_regions(pci_dev);
5218 pci_disable_device(pci_dev);
5225 static void __devexit nv_remove(struct pci_dev *pci_dev)
5227 struct net_device *dev = pci_get_drvdata(pci_dev);
5228 struct fe_priv *np = netdev_priv(dev);
5229 u8 __iomem *base = get_hwbase(dev);
5231 unregister_netdev(dev);
5233 /* special op: write back the misordered MAC address - otherwise
5234 * the next nv_probe would see a wrong address.
5236 writel(np->orig_mac[0], base + NvRegMacAddrA);
5237 writel(np->orig_mac[1], base + NvRegMacAddrB);
5239 /* free all structures */
5241 iounmap(get_hwbase(dev));
5242 pci_release_regions(pci_dev);
5243 pci_disable_device(pci_dev);
5245 pci_set_drvdata(pci_dev, NULL);
5249 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5251 struct net_device *dev = pci_get_drvdata(pdev);
5252 struct fe_priv *np = netdev_priv(dev);
5254 if (!netif_running(dev))
5257 netif_device_detach(dev);
5262 pci_save_state(pdev);
5263 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5264 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5269 static int nv_resume(struct pci_dev *pdev)
5271 struct net_device *dev = pci_get_drvdata(pdev);
5274 if (!netif_running(dev))
5277 netif_device_attach(dev);
5279 pci_set_power_state(pdev, PCI_D0);
5280 pci_restore_state(pdev);
5281 pci_enable_wake(pdev, PCI_D0, 0);
5288 #define nv_suspend NULL
5289 #define nv_resume NULL
5290 #endif /* CONFIG_PM */
5292 static struct pci_device_id pci_tbl[] = {
5293 { /* nForce Ethernet Controller */
5294 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5295 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5297 { /* nForce2 Ethernet Controller */
5298 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5299 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5301 { /* nForce3 Ethernet Controller */
5302 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5303 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5305 { /* nForce3 Ethernet Controller */
5306 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5307 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5309 { /* nForce3 Ethernet Controller */
5310 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5311 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5313 { /* nForce3 Ethernet Controller */
5314 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5315 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5317 { /* nForce3 Ethernet Controller */
5318 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5319 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5321 { /* CK804 Ethernet Controller */
5322 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5323 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
5325 { /* CK804 Ethernet Controller */
5326 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5327 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
5329 { /* MCP04 Ethernet Controller */
5330 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5331 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
5333 { /* MCP04 Ethernet Controller */
5334 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5335 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
5337 { /* MCP51 Ethernet Controller */
5338 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5339 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
5341 { /* MCP51 Ethernet Controller */
5342 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5343 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
5345 { /* MCP55 Ethernet Controller */
5346 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5347 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5349 { /* MCP55 Ethernet Controller */
5350 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5351 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5353 { /* MCP61 Ethernet Controller */
5354 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5355 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5357 { /* MCP61 Ethernet Controller */
5358 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5359 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5361 { /* MCP61 Ethernet Controller */
5362 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5363 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5365 { /* MCP61 Ethernet Controller */
5366 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5367 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5369 { /* MCP65 Ethernet Controller */
5370 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5371 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5373 { /* MCP65 Ethernet Controller */
5374 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5375 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5377 { /* MCP65 Ethernet Controller */
5378 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5379 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5381 { /* MCP65 Ethernet Controller */
5382 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5383 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5385 { /* MCP67 Ethernet Controller */
5386 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5387 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5389 { /* MCP67 Ethernet Controller */
5390 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5391 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5393 { /* MCP67 Ethernet Controller */
5394 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5395 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5397 { /* MCP67 Ethernet Controller */
5398 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5399 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5404 static struct pci_driver driver = {
5405 .name = "forcedeth",
5406 .id_table = pci_tbl,
5408 .remove = __devexit_p(nv_remove),
5409 .suspend = nv_suspend,
5410 .resume = nv_resume,
5413 static int __init init_nic(void)
5415 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
5416 return pci_register_driver(&driver);
5419 static void __exit exit_nic(void)
5421 pci_unregister_driver(&driver);
5424 module_param(max_interrupt_work, int, 0);
5425 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5426 module_param(optimization_mode, int, 0);
5427 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5428 module_param(poll_interval, int, 0);
5429 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5430 module_param(msi, int, 0);
5431 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5432 module_param(msix, int, 0);
5433 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5434 module_param(dma_64bit, int, 0);
5435 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5437 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5438 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5439 MODULE_LICENSE("GPL");
5441 MODULE_DEVICE_TABLE(pci, pci_tbl);
5443 module_init(init_nic);
5444 module_exit(exit_nic);