2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4,5 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
113 * We suspect that on some hardware no TX done interrupts are generated.
114 * This means recovery from netif_stop_queue only happens if the hw timer
115 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
116 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
117 * If your hardware reliably generates tx done interrupts, then you can remove
118 * DEV_NEED_TIMERIRQ from the driver_data flags.
119 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
120 * superfluous timer interrupts from the nic.
122 #define FORCEDETH_VERSION "0.55"
123 #define DRV_NAME "forcedeth"
125 #include <linux/module.h>
126 #include <linux/types.h>
127 #include <linux/pci.h>
128 #include <linux/interrupt.h>
129 #include <linux/netdevice.h>
130 #include <linux/etherdevice.h>
131 #include <linux/delay.h>
132 #include <linux/spinlock.h>
133 #include <linux/ethtool.h>
134 #include <linux/timer.h>
135 #include <linux/skbuff.h>
136 #include <linux/mii.h>
137 #include <linux/random.h>
138 #include <linux/init.h>
139 #include <linux/if_vlan.h>
140 #include <linux/dma-mapping.h>
144 #include <asm/uaccess.h>
145 #include <asm/system.h>
148 #define dprintk printk
150 #define dprintk(x...) do { } while (0)
158 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
159 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
160 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
161 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
162 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
163 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
164 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
165 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
166 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
167 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
168 #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
169 #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
172 NvRegIrqStatus = 0x000,
173 #define NVREG_IRQSTAT_MIIEVENT 0x040
174 #define NVREG_IRQSTAT_MASK 0x1ff
175 NvRegIrqMask = 0x004,
176 #define NVREG_IRQ_RX_ERROR 0x0001
177 #define NVREG_IRQ_RX 0x0002
178 #define NVREG_IRQ_RX_NOBUF 0x0004
179 #define NVREG_IRQ_TX_ERR 0x0008
180 #define NVREG_IRQ_TX_OK 0x0010
181 #define NVREG_IRQ_TIMER 0x0020
182 #define NVREG_IRQ_LINK 0x0040
183 #define NVREG_IRQ_RX_FORCED 0x0080
184 #define NVREG_IRQ_TX_FORCED 0x0100
185 #define NVREG_IRQMASK_THROUGHPUT 0x00df
186 #define NVREG_IRQMASK_CPU 0x0040
187 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
188 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
189 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
191 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
192 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
193 NVREG_IRQ_TX_FORCED))
195 NvRegUnknownSetupReg6 = 0x008,
196 #define NVREG_UNKSETUP6_VAL 3
199 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
200 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
202 NvRegPollingInterval = 0x00c,
203 #define NVREG_POLL_DEFAULT_THROUGHPUT 970
204 #define NVREG_POLL_DEFAULT_CPU 13
205 NvRegMSIMap0 = 0x020,
206 NvRegMSIMap1 = 0x024,
207 NvRegMSIIrqMask = 0x030,
208 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
210 #define NVREG_MISC1_PAUSE_TX 0x01
211 #define NVREG_MISC1_HD 0x02
212 #define NVREG_MISC1_FORCE 0x3b0f3c
214 NvRegMacReset = 0x3c,
215 #define NVREG_MAC_RESET_ASSERT 0x0F3
216 NvRegTransmitterControl = 0x084,
217 #define NVREG_XMITCTL_START 0x01
218 NvRegTransmitterStatus = 0x088,
219 #define NVREG_XMITSTAT_BUSY 0x01
221 NvRegPacketFilterFlags = 0x8c,
222 #define NVREG_PFF_PAUSE_RX 0x08
223 #define NVREG_PFF_ALWAYS 0x7F0000
224 #define NVREG_PFF_PROMISC 0x80
225 #define NVREG_PFF_MYADDR 0x20
226 #define NVREG_PFF_LOOPBACK 0x10
228 NvRegOffloadConfig = 0x90,
229 #define NVREG_OFFLOAD_HOMEPHY 0x601
230 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
231 NvRegReceiverControl = 0x094,
232 #define NVREG_RCVCTL_START 0x01
233 NvRegReceiverStatus = 0x98,
234 #define NVREG_RCVSTAT_BUSY 0x01
236 NvRegRandomSeed = 0x9c,
237 #define NVREG_RNDSEED_MASK 0x00ff
238 #define NVREG_RNDSEED_FORCE 0x7f00
239 #define NVREG_RNDSEED_FORCE2 0x2d00
240 #define NVREG_RNDSEED_FORCE3 0x7400
242 NvRegUnknownSetupReg1 = 0xA0,
243 #define NVREG_UNKSETUP1_VAL 0x16070f
244 NvRegUnknownSetupReg2 = 0xA4,
245 #define NVREG_UNKSETUP2_VAL 0x16
246 NvRegMacAddrA = 0xA8,
247 NvRegMacAddrB = 0xAC,
248 NvRegMulticastAddrA = 0xB0,
249 #define NVREG_MCASTADDRA_FORCE 0x01
250 NvRegMulticastAddrB = 0xB4,
251 NvRegMulticastMaskA = 0xB8,
252 NvRegMulticastMaskB = 0xBC,
254 NvRegPhyInterface = 0xC0,
255 #define PHY_RGMII 0x10000000
257 NvRegTxRingPhysAddr = 0x100,
258 NvRegRxRingPhysAddr = 0x104,
259 NvRegRingSizes = 0x108,
260 #define NVREG_RINGSZ_TXSHIFT 0
261 #define NVREG_RINGSZ_RXSHIFT 16
262 NvRegUnknownTransmitterReg = 0x10c,
263 NvRegLinkSpeed = 0x110,
264 #define NVREG_LINKSPEED_FORCE 0x10000
265 #define NVREG_LINKSPEED_10 1000
266 #define NVREG_LINKSPEED_100 100
267 #define NVREG_LINKSPEED_1000 50
268 #define NVREG_LINKSPEED_MASK (0xFFF)
269 NvRegUnknownSetupReg5 = 0x130,
270 #define NVREG_UNKSETUP5_BIT31 (1<<31)
271 NvRegUnknownSetupReg3 = 0x13c,
272 #define NVREG_UNKSETUP3_VAL1 0x200010
273 NvRegTxRxControl = 0x144,
274 #define NVREG_TXRXCTL_KICK 0x0001
275 #define NVREG_TXRXCTL_BIT1 0x0002
276 #define NVREG_TXRXCTL_BIT2 0x0004
277 #define NVREG_TXRXCTL_IDLE 0x0008
278 #define NVREG_TXRXCTL_RESET 0x0010
279 #define NVREG_TXRXCTL_RXCHECK 0x0400
280 #define NVREG_TXRXCTL_DESC_1 0
281 #define NVREG_TXRXCTL_DESC_2 0x02100
282 #define NVREG_TXRXCTL_DESC_3 0x02200
283 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
284 #define NVREG_TXRXCTL_VLANINS 0x00080
285 NvRegTxRingPhysAddrHigh = 0x148,
286 NvRegRxRingPhysAddrHigh = 0x14C,
287 NvRegTxPauseFrame = 0x170,
288 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
289 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
290 NvRegMIIStatus = 0x180,
291 #define NVREG_MIISTAT_ERROR 0x0001
292 #define NVREG_MIISTAT_LINKCHANGE 0x0008
293 #define NVREG_MIISTAT_MASK 0x000f
294 #define NVREG_MIISTAT_MASK2 0x000f
295 NvRegUnknownSetupReg4 = 0x184,
296 #define NVREG_UNKSETUP4_VAL 8
298 NvRegAdapterControl = 0x188,
299 #define NVREG_ADAPTCTL_START 0x02
300 #define NVREG_ADAPTCTL_LINKUP 0x04
301 #define NVREG_ADAPTCTL_PHYVALID 0x40000
302 #define NVREG_ADAPTCTL_RUNNING 0x100000
303 #define NVREG_ADAPTCTL_PHYSHIFT 24
304 NvRegMIISpeed = 0x18c,
305 #define NVREG_MIISPEED_BIT8 (1<<8)
306 #define NVREG_MIIDELAY 5
307 NvRegMIIControl = 0x190,
308 #define NVREG_MIICTL_INUSE 0x08000
309 #define NVREG_MIICTL_WRITE 0x00400
310 #define NVREG_MIICTL_ADDRSHIFT 5
311 NvRegMIIData = 0x194,
312 NvRegWakeUpFlags = 0x200,
313 #define NVREG_WAKEUPFLAGS_VAL 0x7770
314 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
315 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
316 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
317 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
318 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
319 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
320 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
321 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
322 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
323 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
325 NvRegPatternCRC = 0x204,
326 NvRegPatternMask = 0x208,
327 NvRegPowerCap = 0x268,
328 #define NVREG_POWERCAP_D3SUPP (1<<30)
329 #define NVREG_POWERCAP_D2SUPP (1<<26)
330 #define NVREG_POWERCAP_D1SUPP (1<<25)
331 NvRegPowerState = 0x26c,
332 #define NVREG_POWERSTATE_POWEREDUP 0x8000
333 #define NVREG_POWERSTATE_VALID 0x0100
334 #define NVREG_POWERSTATE_MASK 0x0003
335 #define NVREG_POWERSTATE_D0 0x0000
336 #define NVREG_POWERSTATE_D1 0x0001
337 #define NVREG_POWERSTATE_D2 0x0002
338 #define NVREG_POWERSTATE_D3 0x0003
340 NvRegTxZeroReXmt = 0x284,
341 NvRegTxOneReXmt = 0x288,
342 NvRegTxManyReXmt = 0x28c,
343 NvRegTxLateCol = 0x290,
344 NvRegTxUnderflow = 0x294,
345 NvRegTxLossCarrier = 0x298,
346 NvRegTxExcessDef = 0x29c,
347 NvRegTxRetryErr = 0x2a0,
348 NvRegRxFrameErr = 0x2a4,
349 NvRegRxExtraByte = 0x2a8,
350 NvRegRxLateCol = 0x2ac,
352 NvRegRxFrameTooLong = 0x2b4,
353 NvRegRxOverflow = 0x2b8,
354 NvRegRxFCSErr = 0x2bc,
355 NvRegRxFrameAlignErr = 0x2c0,
356 NvRegRxLenErr = 0x2c4,
357 NvRegRxUnicast = 0x2c8,
358 NvRegRxMulticast = 0x2cc,
359 NvRegRxBroadcast = 0x2d0,
361 NvRegTxFrame = 0x2d8,
363 NvRegTxPause = 0x2e0,
364 NvRegRxPause = 0x2e4,
365 NvRegRxDropFrame = 0x2e8,
366 NvRegVlanControl = 0x300,
367 #define NVREG_VLANCONTROL_ENABLE 0x2000
368 NvRegMSIXMap0 = 0x3e0,
369 NvRegMSIXMap1 = 0x3e4,
370 NvRegMSIXIrqStatus = 0x3f0,
372 NvRegPowerState2 = 0x600,
373 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
374 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
377 /* Big endian: should work, but is untested */
383 struct ring_desc_ex {
384 u32 PacketBufferHigh;
390 typedef union _ring_type {
391 struct ring_desc* orig;
392 struct ring_desc_ex* ex;
395 #define FLAG_MASK_V1 0xffff0000
396 #define FLAG_MASK_V2 0xffffc000
397 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
398 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
400 #define NV_TX_LASTPACKET (1<<16)
401 #define NV_TX_RETRYERROR (1<<19)
402 #define NV_TX_FORCED_INTERRUPT (1<<24)
403 #define NV_TX_DEFERRED (1<<26)
404 #define NV_TX_CARRIERLOST (1<<27)
405 #define NV_TX_LATECOLLISION (1<<28)
406 #define NV_TX_UNDERFLOW (1<<29)
407 #define NV_TX_ERROR (1<<30)
408 #define NV_TX_VALID (1<<31)
410 #define NV_TX2_LASTPACKET (1<<29)
411 #define NV_TX2_RETRYERROR (1<<18)
412 #define NV_TX2_FORCED_INTERRUPT (1<<30)
413 #define NV_TX2_DEFERRED (1<<25)
414 #define NV_TX2_CARRIERLOST (1<<26)
415 #define NV_TX2_LATECOLLISION (1<<27)
416 #define NV_TX2_UNDERFLOW (1<<28)
417 /* error and valid are the same for both */
418 #define NV_TX2_ERROR (1<<30)
419 #define NV_TX2_VALID (1<<31)
420 #define NV_TX2_TSO (1<<28)
421 #define NV_TX2_TSO_SHIFT 14
422 #define NV_TX2_TSO_MAX_SHIFT 14
423 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
424 #define NV_TX2_CHECKSUM_L3 (1<<27)
425 #define NV_TX2_CHECKSUM_L4 (1<<26)
427 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
429 #define NV_RX_DESCRIPTORVALID (1<<16)
430 #define NV_RX_MISSEDFRAME (1<<17)
431 #define NV_RX_SUBSTRACT1 (1<<18)
432 #define NV_RX_ERROR1 (1<<23)
433 #define NV_RX_ERROR2 (1<<24)
434 #define NV_RX_ERROR3 (1<<25)
435 #define NV_RX_ERROR4 (1<<26)
436 #define NV_RX_CRCERR (1<<27)
437 #define NV_RX_OVERFLOW (1<<28)
438 #define NV_RX_FRAMINGERR (1<<29)
439 #define NV_RX_ERROR (1<<30)
440 #define NV_RX_AVAIL (1<<31)
442 #define NV_RX2_CHECKSUMMASK (0x1C000000)
443 #define NV_RX2_CHECKSUMOK1 (0x10000000)
444 #define NV_RX2_CHECKSUMOK2 (0x14000000)
445 #define NV_RX2_CHECKSUMOK3 (0x18000000)
446 #define NV_RX2_DESCRIPTORVALID (1<<29)
447 #define NV_RX2_SUBSTRACT1 (1<<25)
448 #define NV_RX2_ERROR1 (1<<18)
449 #define NV_RX2_ERROR2 (1<<19)
450 #define NV_RX2_ERROR3 (1<<20)
451 #define NV_RX2_ERROR4 (1<<21)
452 #define NV_RX2_CRCERR (1<<22)
453 #define NV_RX2_OVERFLOW (1<<23)
454 #define NV_RX2_FRAMINGERR (1<<24)
455 /* error and avail are the same for both */
456 #define NV_RX2_ERROR (1<<30)
457 #define NV_RX2_AVAIL (1<<31)
459 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
460 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
462 /* Miscelaneous hardware related defines: */
463 #define NV_PCI_REGSZ_VER1 0x270
464 #define NV_PCI_REGSZ_VER2 0x604
466 /* various timeout delays: all in usec */
467 #define NV_TXRX_RESET_DELAY 4
468 #define NV_TXSTOP_DELAY1 10
469 #define NV_TXSTOP_DELAY1MAX 500000
470 #define NV_TXSTOP_DELAY2 100
471 #define NV_RXSTOP_DELAY1 10
472 #define NV_RXSTOP_DELAY1MAX 500000
473 #define NV_RXSTOP_DELAY2 100
474 #define NV_SETUP5_DELAY 5
475 #define NV_SETUP5_DELAYMAX 50000
476 #define NV_POWERUP_DELAY 5
477 #define NV_POWERUP_DELAYMAX 5000
478 #define NV_MIIBUSY_DELAY 50
479 #define NV_MIIPHY_DELAY 10
480 #define NV_MIIPHY_DELAYMAX 10000
481 #define NV_MAC_RESET_DELAY 64
483 #define NV_WAKEUPPATTERNS 5
484 #define NV_WAKEUPMASKENTRIES 4
486 /* General driver defaults */
487 #define NV_WATCHDOG_TIMEO (5*HZ)
489 #define RX_RING_DEFAULT 128
490 #define TX_RING_DEFAULT 256
491 #define RX_RING_MIN 128
492 #define TX_RING_MIN 64
493 #define RING_MAX_DESC_VER_1 1024
494 #define RING_MAX_DESC_VER_2_3 16384
496 * Difference between the get and put pointers for the tx ring.
497 * This is used to throttle the amount of data outstanding in the
500 #define TX_LIMIT_DIFFERENCE 1
502 /* rx/tx mac addr + type + vlan + align + slack*/
503 #define NV_RX_HEADERS (64)
504 /* even more slack. */
505 #define NV_RX_ALLOC_PAD (64)
507 /* maximum mtu size */
508 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
509 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
511 #define OOM_REFILL (1+HZ/20)
512 #define POLL_WAIT (1+HZ/100)
513 #define LINK_TIMEOUT (3*HZ)
514 #define STATS_INTERVAL (10*HZ)
518 * The nic supports three different descriptor types:
519 * - DESC_VER_1: Original
520 * - DESC_VER_2: support for jumbo frames.
521 * - DESC_VER_3: 64-bit format.
528 #define PHY_OUI_MARVELL 0x5043
529 #define PHY_OUI_CICADA 0x03f1
530 #define PHYID1_OUI_MASK 0x03ff
531 #define PHYID1_OUI_SHFT 6
532 #define PHYID2_OUI_MASK 0xfc00
533 #define PHYID2_OUI_SHFT 10
534 #define PHY_INIT1 0x0f000
535 #define PHY_INIT2 0x0e00
536 #define PHY_INIT3 0x01000
537 #define PHY_INIT4 0x0200
538 #define PHY_INIT5 0x0004
539 #define PHY_INIT6 0x02000
540 #define PHY_GIGABIT 0x0100
542 #define PHY_TIMEOUT 0x1
543 #define PHY_ERROR 0x2
547 #define PHY_HALF 0x100
549 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
550 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
551 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
552 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
553 #define NV_PAUSEFRAME_RX_REQ 0x0010
554 #define NV_PAUSEFRAME_TX_REQ 0x0020
555 #define NV_PAUSEFRAME_AUTONEG 0x0040
557 /* MSI/MSI-X defines */
558 #define NV_MSI_X_MAX_VECTORS 8
559 #define NV_MSI_X_VECTORS_MASK 0x000f
560 #define NV_MSI_CAPABLE 0x0010
561 #define NV_MSI_X_CAPABLE 0x0020
562 #define NV_MSI_ENABLED 0x0040
563 #define NV_MSI_X_ENABLED 0x0080
565 #define NV_MSI_X_VECTOR_ALL 0x0
566 #define NV_MSI_X_VECTOR_RX 0x0
567 #define NV_MSI_X_VECTOR_TX 0x1
568 #define NV_MSI_X_VECTOR_OTHER 0x2
571 struct nv_ethtool_str {
572 char name[ETH_GSTRING_LEN];
575 static const struct nv_ethtool_str nv_estats_str[] = {
580 { "tx_late_collision" },
581 { "tx_fifo_errors" },
582 { "tx_carrier_errors" },
583 { "tx_excess_deferral" },
584 { "tx_retry_error" },
588 { "rx_frame_error" },
590 { "rx_late_collision" },
592 { "rx_frame_too_long" },
593 { "rx_over_errors" },
595 { "rx_frame_align_error" },
596 { "rx_length_error" },
604 { "rx_errors_total" }
607 struct nv_ethtool_stats {
612 u64 tx_late_collision;
614 u64 tx_carrier_errors;
615 u64 tx_excess_deferral;
622 u64 rx_late_collision;
624 u64 rx_frame_too_long;
627 u64 rx_frame_align_error;
640 #define NV_TEST_COUNT_BASE 3
641 #define NV_TEST_COUNT_EXTENDED 4
643 static const struct nv_ethtool_str nv_etests_str[] = {
644 { "link (online/offline)" },
645 { "register (offline) " },
646 { "interrupt (offline) " },
647 { "loopback (offline) " }
650 struct register_test {
655 static const struct register_test nv_registers_test[] = {
656 { NvRegUnknownSetupReg6, 0x01 },
657 { NvRegMisc1, 0x03c },
658 { NvRegOffloadConfig, 0x03ff },
659 { NvRegMulticastAddrA, 0xffffffff },
660 { NvRegUnknownSetupReg3, 0x0ff },
661 { NvRegWakeUpFlags, 0x07777 },
667 * All hardware access under dev->priv->lock, except the performance
669 * - rx is (pseudo-) lockless: it relies on the single-threading provided
670 * by the arch code for interrupts.
671 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
672 * needs dev->priv->lock :-(
673 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
676 /* in dev: base, irq */
681 * Locking: spin_lock(&np->lock); */
682 struct net_device_stats stats;
683 struct nv_ethtool_stats estats;
691 unsigned int phy_oui;
695 /* General data: RO fields */
696 dma_addr_t ring_addr;
697 struct pci_dev *pci_dev;
708 /* rx specific fields.
709 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
712 unsigned int cur_rx, refill_rx;
713 struct sk_buff **rx_skbuff;
715 unsigned int rx_buf_sz;
716 unsigned int pkt_limit;
717 struct timer_list oom_kick;
718 struct timer_list nic_poll;
719 struct timer_list stats_poll;
723 /* media detection workaround.
724 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
727 unsigned long link_timeout;
729 * tx specific fields.
732 unsigned int next_tx, nic_tx;
733 struct sk_buff **tx_skbuff;
735 unsigned int *tx_dma_len;
742 struct vlan_group *vlangrp;
744 /* msi/msi-x fields */
746 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
753 * Maximum number of loops until we assume that a bit in the irq mask
754 * is stuck. Overridable with module param.
756 static int max_interrupt_work = 5;
759 * Optimization can be either throuput mode or cpu mode
761 * Throughput Mode: Every tx and rx packet will generate an interrupt.
762 * CPU Mode: Interrupts are controlled by a timer.
764 #define NV_OPTIMIZATION_MODE_THROUGHPUT 0
765 #define NV_OPTIMIZATION_MODE_CPU 1
766 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
769 * Poll interval for timer irq
771 * This interval determines how frequent an interrupt is generated.
772 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
773 * Min = 0, and Max = 65535
775 static int poll_interval = -1;
778 * Disable MSI interrupts
780 static int disable_msi = 0;
783 * Disable MSIX interrupts
785 static int disable_msix = 0;
787 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
789 return netdev_priv(dev);
792 static inline u8 __iomem *get_hwbase(struct net_device *dev)
794 return ((struct fe_priv *)netdev_priv(dev))->base;
797 static inline void pci_push(u8 __iomem *base)
799 /* force out pending posted writes */
803 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
805 return le32_to_cpu(prd->FlagLen)
806 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
809 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
811 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
814 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
815 int delay, int delaymax, const char *msg)
817 u8 __iomem *base = get_hwbase(dev);
828 } while ((readl(base + offset) & mask) != target);
832 #define NV_SETUP_RX_RING 0x01
833 #define NV_SETUP_TX_RING 0x02
835 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
837 struct fe_priv *np = get_nvpriv(dev);
838 u8 __iomem *base = get_hwbase(dev);
840 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
841 if (rxtx_flags & NV_SETUP_RX_RING) {
842 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
844 if (rxtx_flags & NV_SETUP_TX_RING) {
845 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
848 if (rxtx_flags & NV_SETUP_RX_RING) {
849 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
850 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
852 if (rxtx_flags & NV_SETUP_TX_RING) {
853 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
854 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
859 static void free_rings(struct net_device *dev)
861 struct fe_priv *np = get_nvpriv(dev);
863 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
865 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
866 np->rx_ring.orig, np->ring_addr);
869 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
870 np->rx_ring.ex, np->ring_addr);
873 kfree(np->rx_skbuff);
877 kfree(np->tx_skbuff);
881 kfree(np->tx_dma_len);
884 static int using_multi_irqs(struct net_device *dev)
886 struct fe_priv *np = get_nvpriv(dev);
888 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
889 ((np->msi_flags & NV_MSI_X_ENABLED) &&
890 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
896 static void nv_enable_irq(struct net_device *dev)
898 struct fe_priv *np = get_nvpriv(dev);
900 if (!using_multi_irqs(dev)) {
901 if (np->msi_flags & NV_MSI_X_ENABLED)
902 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
904 enable_irq(dev->irq);
906 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
907 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
908 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
912 static void nv_disable_irq(struct net_device *dev)
914 struct fe_priv *np = get_nvpriv(dev);
916 if (!using_multi_irqs(dev)) {
917 if (np->msi_flags & NV_MSI_X_ENABLED)
918 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
920 disable_irq(dev->irq);
922 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
923 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
924 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
928 /* In MSIX mode, a write to irqmask behaves as XOR */
929 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
931 u8 __iomem *base = get_hwbase(dev);
933 writel(mask, base + NvRegIrqMask);
936 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
938 struct fe_priv *np = get_nvpriv(dev);
939 u8 __iomem *base = get_hwbase(dev);
941 if (np->msi_flags & NV_MSI_X_ENABLED) {
942 writel(mask, base + NvRegIrqMask);
944 if (np->msi_flags & NV_MSI_ENABLED)
945 writel(0, base + NvRegMSIIrqMask);
946 writel(0, base + NvRegIrqMask);
950 #define MII_READ (-1)
951 /* mii_rw: read/write a register on the PHY.
953 * Caller must guarantee serialization
955 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
957 u8 __iomem *base = get_hwbase(dev);
961 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
963 reg = readl(base + NvRegMIIControl);
964 if (reg & NVREG_MIICTL_INUSE) {
965 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
966 udelay(NV_MIIBUSY_DELAY);
969 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
970 if (value != MII_READ) {
971 writel(value, base + NvRegMIIData);
972 reg |= NVREG_MIICTL_WRITE;
974 writel(reg, base + NvRegMIIControl);
976 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
977 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
978 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
979 dev->name, miireg, addr);
981 } else if (value != MII_READ) {
982 /* it was a write operation - fewer failures are detectable */
983 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
984 dev->name, value, miireg, addr);
986 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
987 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
988 dev->name, miireg, addr);
991 retval = readl(base + NvRegMIIData);
992 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
993 dev->name, miireg, addr, retval);
999 static int phy_reset(struct net_device *dev)
1001 struct fe_priv *np = netdev_priv(dev);
1003 unsigned int tries = 0;
1005 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1006 miicontrol |= BMCR_RESET;
1007 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1011 /* wait for 500ms */
1014 /* must wait till reset is deasserted */
1015 while (miicontrol & BMCR_RESET) {
1017 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1018 /* FIXME: 100 tries seem excessive */
1025 static int phy_init(struct net_device *dev)
1027 struct fe_priv *np = get_nvpriv(dev);
1028 u8 __iomem *base = get_hwbase(dev);
1029 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1031 /* set advertise register */
1032 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1033 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1034 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1035 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1039 /* get phy interface type */
1040 phyinterface = readl(base + NvRegPhyInterface);
1042 /* see if gigabit phy */
1043 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1044 if (mii_status & PHY_GIGABIT) {
1045 np->gigabit = PHY_GIGABIT;
1046 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1047 mii_control_1000 &= ~ADVERTISE_1000HALF;
1048 if (phyinterface & PHY_RGMII)
1049 mii_control_1000 |= ADVERTISE_1000FULL;
1051 mii_control_1000 &= ~ADVERTISE_1000FULL;
1053 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1054 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1062 if (phy_reset(dev)) {
1063 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1067 /* phy vendor specific configuration */
1068 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1069 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1070 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
1071 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
1072 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1073 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1076 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1077 phy_reserved |= PHY_INIT5;
1078 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1079 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1083 if (np->phy_oui == PHY_OUI_CICADA) {
1084 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1085 phy_reserved |= PHY_INIT6;
1086 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1087 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1091 /* some phys clear out pause advertisment on reset, set it back */
1092 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1094 /* restart auto negotiation */
1095 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1096 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1097 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1104 static void nv_start_rx(struct net_device *dev)
1106 struct fe_priv *np = netdev_priv(dev);
1107 u8 __iomem *base = get_hwbase(dev);
1109 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1110 /* Already running? Stop it. */
1111 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
1112 writel(0, base + NvRegReceiverControl);
1115 writel(np->linkspeed, base + NvRegLinkSpeed);
1117 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
1118 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1119 dev->name, np->duplex, np->linkspeed);
1123 static void nv_stop_rx(struct net_device *dev)
1125 u8 __iomem *base = get_hwbase(dev);
1127 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1128 writel(0, base + NvRegReceiverControl);
1129 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1130 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1131 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1133 udelay(NV_RXSTOP_DELAY2);
1134 writel(0, base + NvRegLinkSpeed);
1137 static void nv_start_tx(struct net_device *dev)
1139 u8 __iomem *base = get_hwbase(dev);
1141 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1142 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
1146 static void nv_stop_tx(struct net_device *dev)
1148 u8 __iomem *base = get_hwbase(dev);
1150 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1151 writel(0, base + NvRegTransmitterControl);
1152 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1153 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1154 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1156 udelay(NV_TXSTOP_DELAY2);
1157 writel(0, base + NvRegUnknownTransmitterReg);
1160 static void nv_txrx_reset(struct net_device *dev)
1162 struct fe_priv *np = netdev_priv(dev);
1163 u8 __iomem *base = get_hwbase(dev);
1165 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1166 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1168 udelay(NV_TXRX_RESET_DELAY);
1169 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1173 static void nv_mac_reset(struct net_device *dev)
1175 struct fe_priv *np = netdev_priv(dev);
1176 u8 __iomem *base = get_hwbase(dev);
1178 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1179 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1181 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1183 udelay(NV_MAC_RESET_DELAY);
1184 writel(0, base + NvRegMacReset);
1186 udelay(NV_MAC_RESET_DELAY);
1187 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1192 * nv_get_stats: dev->get_stats function
1193 * Get latest stats value from the nic.
1194 * Called with read_lock(&dev_base_lock) held for read -
1195 * only synchronized against unregister_netdevice.
1197 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1199 struct fe_priv *np = netdev_priv(dev);
1201 /* It seems that the nic always generates interrupts and doesn't
1202 * accumulate errors internally. Thus the current values in np->stats
1203 * are already up to date.
1209 * nv_alloc_rx: fill rx ring entries.
1210 * Return 1 if the allocations for the skbs failed and the
1211 * rx engine is without Available descriptors
1213 static int nv_alloc_rx(struct net_device *dev)
1215 struct fe_priv *np = netdev_priv(dev);
1216 unsigned int refill_rx = np->refill_rx;
1219 while (np->cur_rx != refill_rx) {
1220 struct sk_buff *skb;
1222 nr = refill_rx % np->rx_ring_size;
1223 if (np->rx_skbuff[nr] == NULL) {
1225 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1230 np->rx_skbuff[nr] = skb;
1232 skb = np->rx_skbuff[nr];
1234 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1235 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1236 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1237 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
1239 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1241 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1242 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1244 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1246 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1247 dev->name, refill_rx);
1250 np->refill_rx = refill_rx;
1251 if (np->cur_rx - refill_rx == np->rx_ring_size)
1256 static void nv_do_rx_refill(unsigned long data)
1258 struct net_device *dev = (struct net_device *) data;
1259 struct fe_priv *np = netdev_priv(dev);
1261 if (!using_multi_irqs(dev)) {
1262 if (np->msi_flags & NV_MSI_X_ENABLED)
1263 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1265 disable_irq(dev->irq);
1267 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1269 if (nv_alloc_rx(dev)) {
1270 spin_lock_irq(&np->lock);
1271 if (!np->in_shutdown)
1272 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1273 spin_unlock_irq(&np->lock);
1275 if (!using_multi_irqs(dev)) {
1276 if (np->msi_flags & NV_MSI_X_ENABLED)
1277 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1279 enable_irq(dev->irq);
1281 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1285 static void nv_init_rx(struct net_device *dev)
1287 struct fe_priv *np = netdev_priv(dev);
1290 np->cur_rx = np->rx_ring_size;
1292 for (i = 0; i < np->rx_ring_size; i++)
1293 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1294 np->rx_ring.orig[i].FlagLen = 0;
1296 np->rx_ring.ex[i].FlagLen = 0;
1299 static void nv_init_tx(struct net_device *dev)
1301 struct fe_priv *np = netdev_priv(dev);
1304 np->next_tx = np->nic_tx = 0;
1305 for (i = 0; i < np->tx_ring_size; i++) {
1306 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1307 np->tx_ring.orig[i].FlagLen = 0;
1309 np->tx_ring.ex[i].FlagLen = 0;
1310 np->tx_skbuff[i] = NULL;
1315 static int nv_init_ring(struct net_device *dev)
1319 return nv_alloc_rx(dev);
1322 static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
1324 struct fe_priv *np = netdev_priv(dev);
1326 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
1329 if (np->tx_dma[skbnr]) {
1330 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1331 np->tx_dma_len[skbnr],
1333 np->tx_dma[skbnr] = 0;
1336 if (np->tx_skbuff[skbnr]) {
1337 dev_kfree_skb_any(np->tx_skbuff[skbnr]);
1338 np->tx_skbuff[skbnr] = NULL;
1345 static void nv_drain_tx(struct net_device *dev)
1347 struct fe_priv *np = netdev_priv(dev);
1350 for (i = 0; i < np->tx_ring_size; i++) {
1351 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1352 np->tx_ring.orig[i].FlagLen = 0;
1354 np->tx_ring.ex[i].FlagLen = 0;
1355 if (nv_release_txskb(dev, i))
1356 np->stats.tx_dropped++;
1360 static void nv_drain_rx(struct net_device *dev)
1362 struct fe_priv *np = netdev_priv(dev);
1364 for (i = 0; i < np->rx_ring_size; i++) {
1365 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1366 np->rx_ring.orig[i].FlagLen = 0;
1368 np->rx_ring.ex[i].FlagLen = 0;
1370 if (np->rx_skbuff[i]) {
1371 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1372 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1373 PCI_DMA_FROMDEVICE);
1374 dev_kfree_skb(np->rx_skbuff[i]);
1375 np->rx_skbuff[i] = NULL;
1380 static void drain_ring(struct net_device *dev)
1387 * nv_start_xmit: dev->hard_start_xmit function
1388 * Called with dev->xmit_lock held.
1390 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1392 struct fe_priv *np = netdev_priv(dev);
1394 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1395 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1396 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1397 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1401 u32 size = skb->len-skb->data_len;
1402 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1403 u32 tx_flags_vlan = 0;
1405 /* add fragments to entries count */
1406 for (i = 0; i < fragments; i++) {
1407 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1408 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1411 spin_lock_irq(&np->lock);
1413 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
1414 spin_unlock_irq(&np->lock);
1415 netif_stop_queue(dev);
1416 return NETDEV_TX_BUSY;
1419 /* setup the header buffer */
1421 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1422 nr = (nr + 1) % np->tx_ring_size;
1424 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1426 np->tx_dma_len[nr] = bcnt;
1428 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1429 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1430 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1432 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1433 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1434 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1436 tx_flags = np->tx_flags;
1441 /* setup the fragments */
1442 for (i = 0; i < fragments; i++) {
1443 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1444 u32 size = frag->size;
1448 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1449 nr = (nr + 1) % np->tx_ring_size;
1451 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1453 np->tx_dma_len[nr] = bcnt;
1455 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1456 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1457 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1459 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1460 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1461 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1468 /* set last fragment flag */
1469 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1470 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1472 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1475 np->tx_skbuff[nr] = skb;
1478 if (skb_shinfo(skb)->tso_size)
1479 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1482 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1485 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1486 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1490 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1491 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1493 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
1494 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1497 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
1498 dev->name, np->next_tx, entries, tx_flags_extra);
1501 for (j=0; j<64; j++) {
1503 dprintk("\n%03x:", j);
1504 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1509 np->next_tx += entries;
1511 dev->trans_start = jiffies;
1512 spin_unlock_irq(&np->lock);
1513 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1514 pci_push(get_hwbase(dev));
1515 return NETDEV_TX_OK;
1519 * nv_tx_done: check for completed packets, release the skbs.
1521 * Caller must own np->lock.
1523 static void nv_tx_done(struct net_device *dev)
1525 struct fe_priv *np = netdev_priv(dev);
1528 struct sk_buff *skb;
1530 while (np->nic_tx != np->next_tx) {
1531 i = np->nic_tx % np->tx_ring_size;
1533 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1534 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1536 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
1538 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
1539 dev->name, np->nic_tx, Flags);
1540 if (Flags & NV_TX_VALID)
1542 if (np->desc_ver == DESC_VER_1) {
1543 if (Flags & NV_TX_LASTPACKET) {
1544 skb = np->tx_skbuff[i];
1545 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1546 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1547 if (Flags & NV_TX_UNDERFLOW)
1548 np->stats.tx_fifo_errors++;
1549 if (Flags & NV_TX_CARRIERLOST)
1550 np->stats.tx_carrier_errors++;
1551 np->stats.tx_errors++;
1553 np->stats.tx_packets++;
1554 np->stats.tx_bytes += skb->len;
1558 if (Flags & NV_TX2_LASTPACKET) {
1559 skb = np->tx_skbuff[i];
1560 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1561 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1562 if (Flags & NV_TX2_UNDERFLOW)
1563 np->stats.tx_fifo_errors++;
1564 if (Flags & NV_TX2_CARRIERLOST)
1565 np->stats.tx_carrier_errors++;
1566 np->stats.tx_errors++;
1568 np->stats.tx_packets++;
1569 np->stats.tx_bytes += skb->len;
1573 nv_release_txskb(dev, i);
1576 if (np->next_tx - np->nic_tx < np->tx_limit_start)
1577 netif_wake_queue(dev);
1581 * nv_tx_timeout: dev->tx_timeout function
1582 * Called with dev->xmit_lock held.
1584 static void nv_tx_timeout(struct net_device *dev)
1586 struct fe_priv *np = netdev_priv(dev);
1587 u8 __iomem *base = get_hwbase(dev);
1590 if (np->msi_flags & NV_MSI_X_ENABLED)
1591 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1593 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1595 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1600 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1601 dev->name, (unsigned long)np->ring_addr,
1602 np->next_tx, np->nic_tx);
1603 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1604 for (i=0;i<=np->register_size;i+= 32) {
1605 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1607 readl(base + i + 0), readl(base + i + 4),
1608 readl(base + i + 8), readl(base + i + 12),
1609 readl(base + i + 16), readl(base + i + 20),
1610 readl(base + i + 24), readl(base + i + 28));
1612 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1613 for (i=0;i<np->tx_ring_size;i+= 4) {
1614 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1615 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1617 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1618 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1619 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1620 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1621 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1622 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1623 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1624 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1626 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1628 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1629 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1630 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1631 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1632 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1633 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1634 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1635 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1636 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1637 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1638 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1639 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1644 spin_lock_irq(&np->lock);
1646 /* 1) stop tx engine */
1649 /* 2) check that the packets were not sent already: */
1652 /* 3) if there are dead entries: clear everything */
1653 if (np->next_tx != np->nic_tx) {
1654 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1656 np->next_tx = np->nic_tx = 0;
1657 setup_hw_rings(dev, NV_SETUP_TX_RING);
1658 netif_wake_queue(dev);
1661 /* 4) restart tx engine */
1663 spin_unlock_irq(&np->lock);
1667 * Called when the nic notices a mismatch between the actual data len on the
1668 * wire and the len indicated in the 802 header
1670 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1672 int hdrlen; /* length of the 802 header */
1673 int protolen; /* length as stored in the proto field */
1675 /* 1) calculate len according to header */
1676 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
1677 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1680 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
1683 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1684 dev->name, datalen, protolen, hdrlen);
1685 if (protolen > ETH_DATA_LEN)
1686 return datalen; /* Value in proto field not a len, no checks possible */
1689 /* consistency checks: */
1690 if (datalen > ETH_ZLEN) {
1691 if (datalen >= protolen) {
1692 /* more data on wire than in 802 header, trim of
1695 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1696 dev->name, protolen);
1699 /* less data on wire than mentioned in header.
1700 * Discard the packet.
1702 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
1707 /* short packet. Accept only if 802 values are also short */
1708 if (protolen > ETH_ZLEN) {
1709 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
1713 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1714 dev->name, datalen);
1719 static void nv_rx_process(struct net_device *dev)
1721 struct fe_priv *np = netdev_priv(dev);
1726 struct sk_buff *skb;
1729 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1730 break; /* we scanned the whole ring - do not continue */
1732 i = np->cur_rx % np->rx_ring_size;
1733 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1734 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1735 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1737 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1738 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1739 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
1742 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1743 dev->name, np->cur_rx, Flags);
1745 if (Flags & NV_RX_AVAIL)
1746 break; /* still owned by hardware, */
1749 * the packet is for us - immediately tear down the pci mapping.
1750 * TODO: check if a prefetch of the first cacheline improves
1753 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1754 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1755 PCI_DMA_FROMDEVICE);
1759 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
1760 for (j=0; j<64; j++) {
1762 dprintk("\n%03x:", j);
1763 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
1767 /* look at what we actually got: */
1768 if (np->desc_ver == DESC_VER_1) {
1769 if (!(Flags & NV_RX_DESCRIPTORVALID))
1772 if (Flags & NV_RX_ERROR) {
1773 if (Flags & NV_RX_MISSEDFRAME) {
1774 np->stats.rx_missed_errors++;
1775 np->stats.rx_errors++;
1778 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1779 np->stats.rx_errors++;
1782 if (Flags & NV_RX_CRCERR) {
1783 np->stats.rx_crc_errors++;
1784 np->stats.rx_errors++;
1787 if (Flags & NV_RX_OVERFLOW) {
1788 np->stats.rx_over_errors++;
1789 np->stats.rx_errors++;
1792 if (Flags & NV_RX_ERROR4) {
1793 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1795 np->stats.rx_errors++;
1799 /* framing errors are soft errors. */
1800 if (Flags & NV_RX_FRAMINGERR) {
1801 if (Flags & NV_RX_SUBSTRACT1) {
1807 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1810 if (Flags & NV_RX2_ERROR) {
1811 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1812 np->stats.rx_errors++;
1815 if (Flags & NV_RX2_CRCERR) {
1816 np->stats.rx_crc_errors++;
1817 np->stats.rx_errors++;
1820 if (Flags & NV_RX2_OVERFLOW) {
1821 np->stats.rx_over_errors++;
1822 np->stats.rx_errors++;
1825 if (Flags & NV_RX2_ERROR4) {
1826 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1828 np->stats.rx_errors++;
1832 /* framing errors are soft errors */
1833 if (Flags & NV_RX2_FRAMINGERR) {
1834 if (Flags & NV_RX2_SUBSTRACT1) {
1839 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
1840 Flags &= NV_RX2_CHECKSUMMASK;
1841 if (Flags == NV_RX2_CHECKSUMOK1 ||
1842 Flags == NV_RX2_CHECKSUMOK2 ||
1843 Flags == NV_RX2_CHECKSUMOK3) {
1844 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1845 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1847 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1851 /* got a valid packet - forward it to the network core */
1852 skb = np->rx_skbuff[i];
1853 np->rx_skbuff[i] = NULL;
1856 skb->protocol = eth_type_trans(skb, dev);
1857 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1858 dev->name, np->cur_rx, len, skb->protocol);
1859 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
1860 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
1864 dev->last_rx = jiffies;
1865 np->stats.rx_packets++;
1866 np->stats.rx_bytes += len;
1872 static void set_bufsize(struct net_device *dev)
1874 struct fe_priv *np = netdev_priv(dev);
1876 if (dev->mtu <= ETH_DATA_LEN)
1877 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1879 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1883 * nv_change_mtu: dev->change_mtu function
1884 * Called with dev_base_lock held for read.
1886 static int nv_change_mtu(struct net_device *dev, int new_mtu)
1888 struct fe_priv *np = netdev_priv(dev);
1891 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1897 /* return early if the buffer sizes will not change */
1898 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1900 if (old_mtu == new_mtu)
1903 /* synchronized against open : rtnl_lock() held by caller */
1904 if (netif_running(dev)) {
1905 u8 __iomem *base = get_hwbase(dev);
1907 * It seems that the nic preloads valid ring entries into an
1908 * internal buffer. The procedure for flushing everything is
1909 * guessed, there is probably a simpler approach.
1910 * Changing the MTU is a rare event, it shouldn't matter.
1912 nv_disable_irq(dev);
1913 spin_lock_bh(&dev->xmit_lock);
1914 spin_lock(&np->lock);
1919 /* drain rx queue */
1922 /* reinit driver view of the rx queue */
1924 if (nv_init_ring(dev)) {
1925 if (!np->in_shutdown)
1926 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1928 /* reinit nic view of the rx queue */
1929 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1930 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
1931 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
1932 base + NvRegRingSizes);
1934 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1937 /* restart rx engine */
1940 spin_unlock(&np->lock);
1941 spin_unlock_bh(&dev->xmit_lock);
1947 static void nv_copy_mac_to_hw(struct net_device *dev)
1949 u8 __iomem *base = get_hwbase(dev);
1952 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1953 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1954 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1956 writel(mac[0], base + NvRegMacAddrA);
1957 writel(mac[1], base + NvRegMacAddrB);
1961 * nv_set_mac_address: dev->set_mac_address function
1962 * Called with rtnl_lock() held.
1964 static int nv_set_mac_address(struct net_device *dev, void *addr)
1966 struct fe_priv *np = netdev_priv(dev);
1967 struct sockaddr *macaddr = (struct sockaddr*)addr;
1969 if(!is_valid_ether_addr(macaddr->sa_data))
1970 return -EADDRNOTAVAIL;
1972 /* synchronized against open : rtnl_lock() held by caller */
1973 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1975 if (netif_running(dev)) {
1976 spin_lock_bh(&dev->xmit_lock);
1977 spin_lock_irq(&np->lock);
1979 /* stop rx engine */
1982 /* set mac address */
1983 nv_copy_mac_to_hw(dev);
1985 /* restart rx engine */
1987 spin_unlock_irq(&np->lock);
1988 spin_unlock_bh(&dev->xmit_lock);
1990 nv_copy_mac_to_hw(dev);
1996 * nv_set_multicast: dev->set_multicast function
1997 * Called with dev->xmit_lock held.
1999 static void nv_set_multicast(struct net_device *dev)
2001 struct fe_priv *np = netdev_priv(dev);
2002 u8 __iomem *base = get_hwbase(dev);
2005 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2007 memset(addr, 0, sizeof(addr));
2008 memset(mask, 0, sizeof(mask));
2010 if (dev->flags & IFF_PROMISC) {
2011 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2012 pff |= NVREG_PFF_PROMISC;
2014 pff |= NVREG_PFF_MYADDR;
2016 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2020 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2021 if (dev->flags & IFF_ALLMULTI) {
2022 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2024 struct dev_mc_list *walk;
2026 walk = dev->mc_list;
2027 while (walk != NULL) {
2029 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2030 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2038 addr[0] = alwaysOn[0];
2039 addr[1] = alwaysOn[1];
2040 mask[0] = alwaysOn[0] | alwaysOff[0];
2041 mask[1] = alwaysOn[1] | alwaysOff[1];
2044 addr[0] |= NVREG_MCASTADDRA_FORCE;
2045 pff |= NVREG_PFF_ALWAYS;
2046 spin_lock_irq(&np->lock);
2048 writel(addr[0], base + NvRegMulticastAddrA);
2049 writel(addr[1], base + NvRegMulticastAddrB);
2050 writel(mask[0], base + NvRegMulticastMaskA);
2051 writel(mask[1], base + NvRegMulticastMaskB);
2052 writel(pff, base + NvRegPacketFilterFlags);
2053 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2056 spin_unlock_irq(&np->lock);
2059 void nv_update_pause(struct net_device *dev, u32 pause_flags)
2061 struct fe_priv *np = netdev_priv(dev);
2062 u8 __iomem *base = get_hwbase(dev);
2064 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2066 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2067 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2068 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2069 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2070 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2072 writel(pff, base + NvRegPacketFilterFlags);
2075 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2076 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2077 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2078 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2079 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2080 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2082 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2083 writel(regmisc, base + NvRegMisc1);
2089 * nv_update_linkspeed: Setup the MAC according to the link partner
2090 * @dev: Network device to be configured
2092 * The function queries the PHY and checks if there is a link partner.
2093 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2094 * set to 10 MBit HD.
2096 * The function returns 0 if there is no link partner and 1 if there is
2097 * a good link partner.
2099 static int nv_update_linkspeed(struct net_device *dev)
2101 struct fe_priv *np = netdev_priv(dev);
2102 u8 __iomem *base = get_hwbase(dev);
2105 int adv_lpa, adv_pause, lpa_pause;
2106 int newls = np->linkspeed;
2107 int newdup = np->duplex;
2110 u32 control_1000, status_1000, phyreg, pause_flags;
2112 /* BMSR_LSTATUS is latched, read it twice:
2113 * we want the current value.
2115 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2116 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2118 if (!(mii_status & BMSR_LSTATUS)) {
2119 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2121 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2127 if (np->autoneg == 0) {
2128 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2129 dev->name, np->fixed_mode);
2130 if (np->fixed_mode & LPA_100FULL) {
2131 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2133 } else if (np->fixed_mode & LPA_100HALF) {
2134 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2136 } else if (np->fixed_mode & LPA_10FULL) {
2137 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2140 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2146 /* check auto negotiation is complete */
2147 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2148 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2149 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2152 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2156 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2157 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2158 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2159 dev->name, adv, lpa);
2162 if (np->gigabit == PHY_GIGABIT) {
2163 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2164 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2166 if ((control_1000 & ADVERTISE_1000FULL) &&
2167 (status_1000 & LPA_1000FULL)) {
2168 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2170 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2176 /* FIXME: handle parallel detection properly */
2177 adv_lpa = lpa & adv;
2178 if (adv_lpa & LPA_100FULL) {
2179 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2181 } else if (adv_lpa & LPA_100HALF) {
2182 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2184 } else if (adv_lpa & LPA_10FULL) {
2185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2187 } else if (adv_lpa & LPA_10HALF) {
2188 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2191 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2192 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2197 if (np->duplex == newdup && np->linkspeed == newls)
2200 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2201 dev->name, np->linkspeed, np->duplex, newls, newdup);
2203 np->duplex = newdup;
2204 np->linkspeed = newls;
2206 if (np->gigabit == PHY_GIGABIT) {
2207 phyreg = readl(base + NvRegRandomSeed);
2208 phyreg &= ~(0x3FF00);
2209 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2210 phyreg |= NVREG_RNDSEED_FORCE3;
2211 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2212 phyreg |= NVREG_RNDSEED_FORCE2;
2213 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2214 phyreg |= NVREG_RNDSEED_FORCE;
2215 writel(phyreg, base + NvRegRandomSeed);
2218 phyreg = readl(base + NvRegPhyInterface);
2219 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2220 if (np->duplex == 0)
2222 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2224 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2226 writel(phyreg, base + NvRegPhyInterface);
2228 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2231 writel(np->linkspeed, base + NvRegLinkSpeed);
2235 /* setup pause frame */
2236 if (np->duplex != 0) {
2237 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2238 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2239 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2241 switch (adv_pause) {
2242 case (ADVERTISE_PAUSE_CAP):
2243 if (lpa_pause & LPA_PAUSE_CAP) {
2244 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2245 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2246 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2249 case (ADVERTISE_PAUSE_ASYM):
2250 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2252 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2255 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
2256 if (lpa_pause & LPA_PAUSE_CAP)
2258 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2259 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2260 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2262 if (lpa_pause == LPA_PAUSE_ASYM)
2264 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2269 pause_flags = np->pause_flags;
2272 nv_update_pause(dev, pause_flags);
2277 static void nv_linkchange(struct net_device *dev)
2279 if (nv_update_linkspeed(dev)) {
2280 if (!netif_carrier_ok(dev)) {
2281 netif_carrier_on(dev);
2282 printk(KERN_INFO "%s: link up.\n", dev->name);
2286 if (netif_carrier_ok(dev)) {
2287 netif_carrier_off(dev);
2288 printk(KERN_INFO "%s: link down.\n", dev->name);
2294 static void nv_link_irq(struct net_device *dev)
2296 u8 __iomem *base = get_hwbase(dev);
2299 miistat = readl(base + NvRegMIIStatus);
2300 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2301 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2303 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2305 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2308 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2310 struct net_device *dev = (struct net_device *) data;
2311 struct fe_priv *np = netdev_priv(dev);
2312 u8 __iomem *base = get_hwbase(dev);
2316 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2319 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2320 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2321 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2323 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2324 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2327 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2328 if (!(events & np->irqmask))
2331 spin_lock(&np->lock);
2333 spin_unlock(&np->lock);
2336 if (nv_alloc_rx(dev)) {
2337 spin_lock(&np->lock);
2338 if (!np->in_shutdown)
2339 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2340 spin_unlock(&np->lock);
2343 if (events & NVREG_IRQ_LINK) {
2344 spin_lock(&np->lock);
2346 spin_unlock(&np->lock);
2348 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2349 spin_lock(&np->lock);
2351 spin_unlock(&np->lock);
2352 np->link_timeout = jiffies + LINK_TIMEOUT;
2354 if (events & (NVREG_IRQ_TX_ERR)) {
2355 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2358 if (events & (NVREG_IRQ_UNKNOWN)) {
2359 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2362 if (i > max_interrupt_work) {
2363 spin_lock(&np->lock);
2364 /* disable interrupts on the nic */
2365 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2366 writel(0, base + NvRegIrqMask);
2368 writel(np->irqmask, base + NvRegIrqMask);
2371 if (!np->in_shutdown) {
2372 np->nic_poll_irq = np->irqmask;
2373 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2375 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2376 spin_unlock(&np->lock);
2381 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2383 return IRQ_RETVAL(i);
2386 static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2388 struct net_device *dev = (struct net_device *) data;
2389 struct fe_priv *np = netdev_priv(dev);
2390 u8 __iomem *base = get_hwbase(dev);
2394 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
2397 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2398 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2400 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2401 if (!(events & np->irqmask))
2404 spin_lock_irq(&np->lock);
2406 spin_unlock_irq(&np->lock);
2408 if (events & (NVREG_IRQ_TX_ERR)) {
2409 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2412 if (i > max_interrupt_work) {
2413 spin_lock_irq(&np->lock);
2414 /* disable interrupts on the nic */
2415 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2418 if (!np->in_shutdown) {
2419 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
2420 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2422 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2423 spin_unlock_irq(&np->lock);
2428 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
2430 return IRQ_RETVAL(i);
2433 static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2435 struct net_device *dev = (struct net_device *) data;
2436 struct fe_priv *np = netdev_priv(dev);
2437 u8 __iomem *base = get_hwbase(dev);
2441 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
2444 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2445 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2447 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2448 if (!(events & np->irqmask))
2452 if (nv_alloc_rx(dev)) {
2453 spin_lock_irq(&np->lock);
2454 if (!np->in_shutdown)
2455 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2456 spin_unlock_irq(&np->lock);
2459 if (i > max_interrupt_work) {
2460 spin_lock_irq(&np->lock);
2461 /* disable interrupts on the nic */
2462 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2465 if (!np->in_shutdown) {
2466 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
2467 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2469 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2470 spin_unlock_irq(&np->lock);
2475 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2477 return IRQ_RETVAL(i);
2480 static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2482 struct net_device *dev = (struct net_device *) data;
2483 struct fe_priv *np = netdev_priv(dev);
2484 u8 __iomem *base = get_hwbase(dev);
2488 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
2491 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2492 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2494 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2495 if (!(events & np->irqmask))
2498 if (events & NVREG_IRQ_LINK) {
2499 spin_lock_irq(&np->lock);
2501 spin_unlock_irq(&np->lock);
2503 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2504 spin_lock_irq(&np->lock);
2506 spin_unlock_irq(&np->lock);
2507 np->link_timeout = jiffies + LINK_TIMEOUT;
2509 if (events & (NVREG_IRQ_UNKNOWN)) {
2510 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2513 if (i > max_interrupt_work) {
2514 spin_lock_irq(&np->lock);
2515 /* disable interrupts on the nic */
2516 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2519 if (!np->in_shutdown) {
2520 np->nic_poll_irq |= NVREG_IRQ_OTHER;
2521 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2523 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2524 spin_unlock_irq(&np->lock);
2529 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
2531 return IRQ_RETVAL(i);
2534 static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
2536 struct net_device *dev = (struct net_device *) data;
2537 struct fe_priv *np = netdev_priv(dev);
2538 u8 __iomem *base = get_hwbase(dev);
2541 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
2543 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2544 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2545 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
2547 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2548 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
2551 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2552 if (!(events & NVREG_IRQ_TIMER))
2553 return IRQ_RETVAL(0);
2555 spin_lock(&np->lock);
2557 spin_unlock(&np->lock);
2559 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
2561 return IRQ_RETVAL(1);
2564 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2566 u8 __iomem *base = get_hwbase(dev);
2570 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
2571 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
2572 * the remaining 8 interrupts.
2574 for (i = 0; i < 8; i++) {
2575 if ((irqmask >> i) & 0x1) {
2576 msixmap |= vector << (i << 2);
2579 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
2582 for (i = 0; i < 8; i++) {
2583 if ((irqmask >> (i + 8)) & 0x1) {
2584 msixmap |= vector << (i << 2);
2587 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2590 static int nv_request_irq(struct net_device *dev, int intr_test)
2592 struct fe_priv *np = get_nvpriv(dev);
2593 u8 __iomem *base = get_hwbase(dev);
2597 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2598 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2599 np->msi_x_entry[i].entry = i;
2601 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2602 np->msi_flags |= NV_MSI_X_ENABLED;
2603 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
2604 /* Request irq for rx handling */
2605 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2606 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2607 pci_disable_msix(np->pci_dev);
2608 np->msi_flags &= ~NV_MSI_X_ENABLED;
2611 /* Request irq for tx handling */
2612 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2613 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2614 pci_disable_msix(np->pci_dev);
2615 np->msi_flags &= ~NV_MSI_X_ENABLED;
2618 /* Request irq for link and timer handling */
2619 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2620 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2621 pci_disable_msix(np->pci_dev);
2622 np->msi_flags &= ~NV_MSI_X_ENABLED;
2625 /* map interrupts to their respective vector */
2626 writel(0, base + NvRegMSIXMap0);
2627 writel(0, base + NvRegMSIXMap1);
2628 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2629 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2630 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2632 /* Request irq for all interrupts */
2634 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
2636 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
2637 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2638 pci_disable_msix(np->pci_dev);
2639 np->msi_flags &= ~NV_MSI_X_ENABLED;
2643 /* map interrupts to vector 0 */
2644 writel(0, base + NvRegMSIXMap0);
2645 writel(0, base + NvRegMSIXMap1);
2649 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2650 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2651 np->msi_flags |= NV_MSI_ENABLED;
2652 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
2653 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
2654 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2655 pci_disable_msi(np->pci_dev);
2656 np->msi_flags &= ~NV_MSI_ENABLED;
2660 /* map interrupts to vector 0 */
2661 writel(0, base + NvRegMSIMap0);
2662 writel(0, base + NvRegMSIMap1);
2663 /* enable msi vector 0 */
2664 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2668 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
2669 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
2676 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2678 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2683 static void nv_free_irq(struct net_device *dev)
2685 struct fe_priv *np = get_nvpriv(dev);
2688 if (np->msi_flags & NV_MSI_X_ENABLED) {
2689 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2690 free_irq(np->msi_x_entry[i].vector, dev);
2692 pci_disable_msix(np->pci_dev);
2693 np->msi_flags &= ~NV_MSI_X_ENABLED;
2695 free_irq(np->pci_dev->irq, dev);
2696 if (np->msi_flags & NV_MSI_ENABLED) {
2697 pci_disable_msi(np->pci_dev);
2698 np->msi_flags &= ~NV_MSI_ENABLED;
2703 static void nv_do_nic_poll(unsigned long data)
2705 struct net_device *dev = (struct net_device *) data;
2706 struct fe_priv *np = netdev_priv(dev);
2707 u8 __iomem *base = get_hwbase(dev);
2711 * First disable irq(s) and then
2712 * reenable interrupts on the nic, we have to do this before calling
2713 * nv_nic_irq because that may decide to do otherwise
2716 if (!using_multi_irqs(dev)) {
2717 if (np->msi_flags & NV_MSI_X_ENABLED)
2718 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2720 disable_irq(dev->irq);
2723 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2724 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2725 mask |= NVREG_IRQ_RX_ALL;
2727 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2728 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2729 mask |= NVREG_IRQ_TX_ALL;
2731 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2732 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2733 mask |= NVREG_IRQ_OTHER;
2736 np->nic_poll_irq = 0;
2738 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2740 writel(mask, base + NvRegIrqMask);
2743 if (!using_multi_irqs(dev)) {
2744 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2745 if (np->msi_flags & NV_MSI_X_ENABLED)
2746 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2748 enable_irq(dev->irq);
2750 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2751 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
2752 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2754 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2755 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
2756 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2758 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2759 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
2760 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2765 #ifdef CONFIG_NET_POLL_CONTROLLER
2766 static void nv_poll_controller(struct net_device *dev)
2768 nv_do_nic_poll((unsigned long) dev);
2772 static void nv_do_stats_poll(unsigned long data)
2774 struct net_device *dev = (struct net_device *) data;
2775 struct fe_priv *np = netdev_priv(dev);
2776 u8 __iomem *base = get_hwbase(dev);
2778 np->estats.tx_bytes += readl(base + NvRegTxCnt);
2779 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
2780 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
2781 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
2782 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
2783 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
2784 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
2785 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
2786 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
2787 np->estats.tx_deferral += readl(base + NvRegTxDef);
2788 np->estats.tx_packets += readl(base + NvRegTxFrame);
2789 np->estats.tx_pause += readl(base + NvRegTxPause);
2790 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
2791 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
2792 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
2793 np->estats.rx_runt += readl(base + NvRegRxRunt);
2794 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
2795 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
2796 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
2797 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
2798 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
2799 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
2800 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
2801 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
2802 np->estats.rx_bytes += readl(base + NvRegRxCnt);
2803 np->estats.rx_pause += readl(base + NvRegRxPause);
2804 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
2805 np->estats.rx_packets =
2806 np->estats.rx_unicast +
2807 np->estats.rx_multicast +
2808 np->estats.rx_broadcast;
2809 np->estats.rx_errors_total =
2810 np->estats.rx_crc_errors +
2811 np->estats.rx_over_errors +
2812 np->estats.rx_frame_error +
2813 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
2814 np->estats.rx_late_collision +
2815 np->estats.rx_runt +
2816 np->estats.rx_frame_too_long;
2818 if (!np->in_shutdown)
2819 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
2822 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2824 struct fe_priv *np = netdev_priv(dev);
2825 strcpy(info->driver, "forcedeth");
2826 strcpy(info->version, FORCEDETH_VERSION);
2827 strcpy(info->bus_info, pci_name(np->pci_dev));
2830 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
2832 struct fe_priv *np = netdev_priv(dev);
2833 wolinfo->supported = WAKE_MAGIC;
2835 spin_lock_irq(&np->lock);
2837 wolinfo->wolopts = WAKE_MAGIC;
2838 spin_unlock_irq(&np->lock);
2841 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
2843 struct fe_priv *np = netdev_priv(dev);
2844 u8 __iomem *base = get_hwbase(dev);
2847 if (wolinfo->wolopts == 0) {
2849 } else if (wolinfo->wolopts & WAKE_MAGIC) {
2851 flags = NVREG_WAKEUPFLAGS_ENABLE;
2853 if (netif_running(dev)) {
2854 spin_lock_irq(&np->lock);
2855 writel(flags, base + NvRegWakeUpFlags);
2856 spin_unlock_irq(&np->lock);
2861 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2863 struct fe_priv *np = netdev_priv(dev);
2866 spin_lock_irq(&np->lock);
2867 ecmd->port = PORT_MII;
2868 if (!netif_running(dev)) {
2869 /* We do not track link speed / duplex setting if the
2870 * interface is disabled. Force a link check */
2871 if (nv_update_linkspeed(dev)) {
2872 if (!netif_carrier_ok(dev))
2873 netif_carrier_on(dev);
2875 if (netif_carrier_ok(dev))
2876 netif_carrier_off(dev);
2880 if (netif_carrier_ok(dev)) {
2881 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
2882 case NVREG_LINKSPEED_10:
2883 ecmd->speed = SPEED_10;
2885 case NVREG_LINKSPEED_100:
2886 ecmd->speed = SPEED_100;
2888 case NVREG_LINKSPEED_1000:
2889 ecmd->speed = SPEED_1000;
2892 ecmd->duplex = DUPLEX_HALF;
2894 ecmd->duplex = DUPLEX_FULL;
2900 ecmd->autoneg = np->autoneg;
2902 ecmd->advertising = ADVERTISED_MII;
2904 ecmd->advertising |= ADVERTISED_Autoneg;
2905 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2906 if (adv & ADVERTISE_10HALF)
2907 ecmd->advertising |= ADVERTISED_10baseT_Half;
2908 if (adv & ADVERTISE_10FULL)
2909 ecmd->advertising |= ADVERTISED_10baseT_Full;
2910 if (adv & ADVERTISE_100HALF)
2911 ecmd->advertising |= ADVERTISED_100baseT_Half;
2912 if (adv & ADVERTISE_100FULL)
2913 ecmd->advertising |= ADVERTISED_100baseT_Full;
2914 if (np->gigabit == PHY_GIGABIT) {
2915 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2916 if (adv & ADVERTISE_1000FULL)
2917 ecmd->advertising |= ADVERTISED_1000baseT_Full;
2920 ecmd->supported = (SUPPORTED_Autoneg |
2921 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2922 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2924 if (np->gigabit == PHY_GIGABIT)
2925 ecmd->supported |= SUPPORTED_1000baseT_Full;
2927 ecmd->phy_address = np->phyaddr;
2928 ecmd->transceiver = XCVR_EXTERNAL;
2930 /* ignore maxtxpkt, maxrxpkt for now */
2931 spin_unlock_irq(&np->lock);
2935 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2937 struct fe_priv *np = netdev_priv(dev);
2939 if (ecmd->port != PORT_MII)
2941 if (ecmd->transceiver != XCVR_EXTERNAL)
2943 if (ecmd->phy_address != np->phyaddr) {
2944 /* TODO: support switching between multiple phys. Should be
2945 * trivial, but not enabled due to lack of test hardware. */
2948 if (ecmd->autoneg == AUTONEG_ENABLE) {
2951 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2952 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
2953 if (np->gigabit == PHY_GIGABIT)
2954 mask |= ADVERTISED_1000baseT_Full;
2956 if ((ecmd->advertising & mask) == 0)
2959 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2960 /* Note: autonegotiation disable, speed 1000 intentionally
2961 * forbidden - noone should need that. */
2963 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2965 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2971 netif_carrier_off(dev);
2972 if (netif_running(dev)) {
2973 nv_disable_irq(dev);
2974 spin_lock_bh(&dev->xmit_lock);
2975 spin_lock(&np->lock);
2979 spin_unlock(&np->lock);
2980 spin_unlock_bh(&dev->xmit_lock);
2983 if (ecmd->autoneg == AUTONEG_ENABLE) {
2988 /* advertise only what has been requested */
2989 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2990 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2991 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2992 adv |= ADVERTISE_10HALF;
2993 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2994 adv |= ADVERTISE_10FULL;
2995 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2996 adv |= ADVERTISE_100HALF;
2997 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2998 adv |= ADVERTISE_100FULL;
2999 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3000 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3001 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3002 adv |= ADVERTISE_PAUSE_ASYM;
3003 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3005 if (np->gigabit == PHY_GIGABIT) {
3006 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3007 adv &= ~ADVERTISE_1000FULL;
3008 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3009 adv |= ADVERTISE_1000FULL;
3010 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3013 if (netif_running(dev))
3014 printk(KERN_INFO "%s: link down.\n", dev->name);
3015 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3016 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3017 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3024 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3025 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3026 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3027 adv |= ADVERTISE_10HALF;
3028 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3029 adv |= ADVERTISE_10FULL;
3030 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3031 adv |= ADVERTISE_100HALF;
3032 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3033 adv |= ADVERTISE_100FULL;
3034 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3035 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3036 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3037 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3039 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3040 adv |= ADVERTISE_PAUSE_ASYM;
3041 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3043 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3044 np->fixed_mode = adv;
3046 if (np->gigabit == PHY_GIGABIT) {
3047 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3048 adv &= ~ADVERTISE_1000FULL;
3049 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3052 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3053 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3054 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3055 bmcr |= BMCR_FULLDPLX;
3056 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3057 bmcr |= BMCR_SPEED100;
3058 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3059 if (np->phy_oui == PHY_OUI_MARVELL) {
3061 if (phy_reset(dev)) {
3062 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3065 } else if (netif_running(dev)) {
3066 /* Wait a bit and then reconfigure the nic. */
3072 if (netif_running(dev)) {
3081 #define FORCEDETH_REGS_VER 1
3083 static int nv_get_regs_len(struct net_device *dev)
3085 struct fe_priv *np = netdev_priv(dev);
3086 return np->register_size;
3089 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
3091 struct fe_priv *np = netdev_priv(dev);
3092 u8 __iomem *base = get_hwbase(dev);
3096 regs->version = FORCEDETH_REGS_VER;
3097 spin_lock_irq(&np->lock);
3098 for (i = 0;i <= np->register_size/sizeof(u32); i++)
3099 rbuf[i] = readl(base + i*sizeof(u32));
3100 spin_unlock_irq(&np->lock);
3103 static int nv_nway_reset(struct net_device *dev)
3105 struct fe_priv *np = netdev_priv(dev);
3111 netif_carrier_off(dev);
3112 if (netif_running(dev)) {
3113 nv_disable_irq(dev);
3114 spin_lock_bh(&dev->xmit_lock);
3115 spin_lock(&np->lock);
3119 spin_unlock(&np->lock);
3120 spin_unlock_bh(&dev->xmit_lock);
3121 printk(KERN_INFO "%s: link down.\n", dev->name);
3124 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3125 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3126 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3128 if (netif_running(dev)) {
3141 static int nv_set_tso(struct net_device *dev, u32 value)
3143 struct fe_priv *np = netdev_priv(dev);
3145 if ((np->driver_data & DEV_HAS_CHECKSUM))
3146 return ethtool_op_set_tso(dev, value);
3151 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3153 struct fe_priv *np = netdev_priv(dev);
3155 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3156 ring->rx_mini_max_pending = 0;
3157 ring->rx_jumbo_max_pending = 0;
3158 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3160 ring->rx_pending = np->rx_ring_size;
3161 ring->rx_mini_pending = 0;
3162 ring->rx_jumbo_pending = 0;
3163 ring->tx_pending = np->tx_ring_size;
3166 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3168 struct fe_priv *np = netdev_priv(dev);
3169 u8 __iomem *base = get_hwbase(dev);
3170 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
3171 dma_addr_t ring_addr;
3173 if (ring->rx_pending < RX_RING_MIN ||
3174 ring->tx_pending < TX_RING_MIN ||
3175 ring->rx_mini_pending != 0 ||
3176 ring->rx_jumbo_pending != 0 ||
3177 (np->desc_ver == DESC_VER_1 &&
3178 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
3179 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
3180 (np->desc_ver != DESC_VER_1 &&
3181 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
3182 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
3186 /* allocate new rings */
3187 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3188 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3189 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3192 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3193 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3196 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
3197 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
3198 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
3199 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
3200 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
3201 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3202 /* fall back to old rings */
3203 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3205 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3206 rxtx_ring, ring_addr);
3209 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3210 rxtx_ring, ring_addr);
3225 if (netif_running(dev)) {
3226 nv_disable_irq(dev);
3227 spin_lock_bh(&dev->xmit_lock);
3228 spin_lock(&np->lock);
3240 /* set new values */
3241 np->rx_ring_size = ring->rx_pending;
3242 np->tx_ring_size = ring->tx_pending;
3243 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
3244 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
3245 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3246 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
3247 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
3249 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
3250 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3252 np->rx_skbuff = (struct sk_buff**)rx_skbuff;
3253 np->rx_dma = (dma_addr_t*)rx_dma;
3254 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
3255 np->tx_dma = (dma_addr_t*)tx_dma;
3256 np->tx_dma_len = (unsigned int*)tx_dma_len;
3257 np->ring_addr = ring_addr;
3259 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
3260 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
3261 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3262 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3263 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3265 if (netif_running(dev)) {
3266 /* reinit driver view of the queues */
3268 if (nv_init_ring(dev)) {
3269 if (!np->in_shutdown)
3270 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3273 /* reinit nic view of the queues */
3274 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3275 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3276 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3277 base + NvRegRingSizes);
3279 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3282 /* restart engines */
3285 spin_unlock(&np->lock);
3286 spin_unlock_bh(&dev->xmit_lock);
3294 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
3296 struct fe_priv *np = netdev_priv(dev);
3298 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
3299 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
3300 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
3303 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
3305 struct fe_priv *np = netdev_priv(dev);
3308 if ((!np->autoneg && np->duplex == 0) ||
3309 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
3310 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
3314 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
3315 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
3319 netif_carrier_off(dev);
3320 if (netif_running(dev)) {
3321 nv_disable_irq(dev);
3322 spin_lock_bh(&dev->xmit_lock);
3323 spin_lock(&np->lock);
3327 spin_unlock(&np->lock);
3328 spin_unlock_bh(&dev->xmit_lock);
3331 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
3332 if (pause->rx_pause)
3333 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
3334 if (pause->tx_pause)
3335 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
3337 if (np->autoneg && pause->autoneg) {
3338 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
3340 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3341 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3342 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3343 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3344 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3345 adv |= ADVERTISE_PAUSE_ASYM;
3346 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3348 if (netif_running(dev))
3349 printk(KERN_INFO "%s: link down.\n", dev->name);
3350 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3351 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3352 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3354 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3355 if (pause->rx_pause)
3356 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3357 if (pause->tx_pause)
3358 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3360 if (!netif_running(dev))
3361 nv_update_linkspeed(dev);
3363 nv_update_pause(dev, np->pause_flags);
3366 if (netif_running(dev)) {
3374 static u32 nv_get_rx_csum(struct net_device *dev)
3376 struct fe_priv *np = netdev_priv(dev);
3377 return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
3380 static int nv_set_rx_csum(struct net_device *dev, u32 data)
3382 struct fe_priv *np = netdev_priv(dev);
3383 u8 __iomem *base = get_hwbase(dev);
3386 if (np->driver_data & DEV_HAS_CHECKSUM) {
3388 if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
3389 (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
3390 /* already set or unset */
3395 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
3396 } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
3397 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3399 printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
3403 if (netif_running(dev)) {
3404 spin_lock_irq(&np->lock);
3405 writel(np->txrxctl_bits, base + NvRegTxRxControl);
3406 spin_unlock_irq(&np->lock);
3415 static int nv_set_tx_csum(struct net_device *dev, u32 data)
3417 struct fe_priv *np = netdev_priv(dev);
3419 if (np->driver_data & DEV_HAS_CHECKSUM)
3420 return ethtool_op_set_tx_hw_csum(dev, data);
3425 static int nv_set_sg(struct net_device *dev, u32 data)
3427 struct fe_priv *np = netdev_priv(dev);
3429 if (np->driver_data & DEV_HAS_CHECKSUM)
3430 return ethtool_op_set_sg(dev, data);
3435 static int nv_get_stats_count(struct net_device *dev)
3437 struct fe_priv *np = netdev_priv(dev);
3439 if (np->driver_data & DEV_HAS_STATISTICS)
3440 return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
3445 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
3447 struct fe_priv *np = netdev_priv(dev);
3450 nv_do_stats_poll((unsigned long)dev);
3452 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
3455 static int nv_self_test_count(struct net_device *dev)
3457 struct fe_priv *np = netdev_priv(dev);
3459 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
3460 return NV_TEST_COUNT_EXTENDED;
3462 return NV_TEST_COUNT_BASE;
3465 static int nv_link_test(struct net_device *dev)
3467 struct fe_priv *np = netdev_priv(dev);
3470 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3471 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3473 /* check phy link status */
3474 if (!(mii_status & BMSR_LSTATUS))
3480 static int nv_register_test(struct net_device *dev)
3482 u8 __iomem *base = get_hwbase(dev);
3484 u32 orig_read, new_read;
3487 orig_read = readl(base + nv_registers_test[i].reg);
3489 /* xor with mask to toggle bits */
3490 orig_read ^= nv_registers_test[i].mask;
3492 writel(orig_read, base + nv_registers_test[i].reg);
3494 new_read = readl(base + nv_registers_test[i].reg);
3496 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
3499 /* restore original value */
3500 orig_read ^= nv_registers_test[i].mask;
3501 writel(orig_read, base + nv_registers_test[i].reg);
3503 } while (nv_registers_test[++i].reg != 0);
3508 static int nv_interrupt_test(struct net_device *dev)
3510 struct fe_priv *np = netdev_priv(dev);
3511 u8 __iomem *base = get_hwbase(dev);
3514 u32 save_msi_flags, save_poll_interval = 0;
3516 if (netif_running(dev)) {
3517 /* free current irq */
3519 save_poll_interval = readl(base+NvRegPollingInterval);
3522 /* flag to test interrupt handler */
3525 /* setup test irq */
3526 save_msi_flags = np->msi_flags;
3527 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
3528 np->msi_flags |= 0x001; /* setup 1 vector */
3529 if (nv_request_irq(dev, 1))
3532 /* setup timer interrupt */
3533 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
3534 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
3536 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
3538 /* wait for at least one interrupt */
3541 spin_lock_irq(&np->lock);
3543 /* flag should be set within ISR */
3544 testcnt = np->intr_test;
3548 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
3549 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3550 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3552 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3554 spin_unlock_irq(&np->lock);
3558 np->msi_flags = save_msi_flags;
3560 if (netif_running(dev)) {
3561 writel(save_poll_interval, base + NvRegPollingInterval);
3562 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
3563 /* restore original irq */
3564 if (nv_request_irq(dev, 0))
3571 static int nv_loopback_test(struct net_device *dev)
3573 struct fe_priv *np = netdev_priv(dev);
3574 u8 __iomem *base = get_hwbase(dev);
3575 struct sk_buff *tx_skb, *rx_skb;
3576 dma_addr_t test_dma_addr;
3577 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3579 int len, i, pkt_len;
3581 u32 filter_flags = 0;
3582 u32 misc1_flags = 0;
3585 if (netif_running(dev)) {
3586 nv_disable_irq(dev);
3587 filter_flags = readl(base + NvRegPacketFilterFlags);
3588 misc1_flags = readl(base + NvRegMisc1);
3593 /* reinit driver view of the rx queue */
3597 /* setup hardware for loopback */
3598 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
3599 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
3601 /* reinit nic view of the rx queue */
3602 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3603 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3604 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3605 base + NvRegRingSizes);
3608 /* restart rx engine */
3612 /* setup packet for tx */
3613 pkt_len = ETH_DATA_LEN;
3614 tx_skb = dev_alloc_skb(pkt_len);
3615 pkt_data = skb_put(tx_skb, pkt_len);
3616 for (i = 0; i < pkt_len; i++)
3617 pkt_data[i] = (u8)(i & 0xff);
3618 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
3619 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3621 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3622 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
3623 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3625 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
3626 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3627 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3629 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3630 pci_push(get_hwbase(dev));
3634 /* check for rx of the packet */
3635 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3636 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
3637 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3640 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
3641 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3644 if (Flags & NV_RX_AVAIL) {
3646 } else if (np->desc_ver == DESC_VER_1) {
3647 if (Flags & NV_RX_ERROR)
3650 if (Flags & NV_RX2_ERROR) {
3656 if (len != pkt_len) {
3658 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
3659 dev->name, len, pkt_len);
3661 rx_skb = np->rx_skbuff[0];
3662 for (i = 0; i < pkt_len; i++) {
3663 if (rx_skb->data[i] != (u8)(i & 0xff)) {
3665 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
3672 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
3675 pci_unmap_page(np->pci_dev, test_dma_addr,
3676 tx_skb->end-tx_skb->data,
3678 dev_kfree_skb_any(tx_skb);
3684 /* drain rx queue */
3688 if (netif_running(dev)) {
3689 writel(misc1_flags, base + NvRegMisc1);
3690 writel(filter_flags, base + NvRegPacketFilterFlags);
3697 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
3699 struct fe_priv *np = netdev_priv(dev);
3700 u8 __iomem *base = get_hwbase(dev);
3702 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
3704 if (!nv_link_test(dev)) {
3705 test->flags |= ETH_TEST_FL_FAILED;
3709 if (test->flags & ETH_TEST_FL_OFFLINE) {
3710 if (netif_running(dev)) {
3711 netif_stop_queue(dev);
3712 spin_lock_bh(&dev->xmit_lock);
3713 spin_lock_irq(&np->lock);
3714 nv_disable_hw_interrupts(dev, np->irqmask);
3715 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3716 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3718 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3724 /* drain rx queue */
3727 spin_unlock_irq(&np->lock);
3728 spin_unlock_bh(&dev->xmit_lock);
3731 if (!nv_register_test(dev)) {
3732 test->flags |= ETH_TEST_FL_FAILED;
3736 result = nv_interrupt_test(dev);
3738 test->flags |= ETH_TEST_FL_FAILED;
3746 if (!nv_loopback_test(dev)) {
3747 test->flags |= ETH_TEST_FL_FAILED;
3751 if (netif_running(dev)) {
3752 /* reinit driver view of the rx queue */
3754 if (nv_init_ring(dev)) {
3755 if (!np->in_shutdown)
3756 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3758 /* reinit nic view of the rx queue */
3759 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3760 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3761 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3762 base + NvRegRingSizes);
3764 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3766 /* restart rx engine */
3769 netif_start_queue(dev);
3770 nv_enable_hw_interrupts(dev, np->irqmask);
3775 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
3777 switch (stringset) {
3779 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
3782 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
3787 static struct ethtool_ops ops = {
3788 .get_drvinfo = nv_get_drvinfo,
3789 .get_link = ethtool_op_get_link,
3790 .get_wol = nv_get_wol,
3791 .set_wol = nv_set_wol,
3792 .get_settings = nv_get_settings,
3793 .set_settings = nv_set_settings,
3794 .get_regs_len = nv_get_regs_len,
3795 .get_regs = nv_get_regs,
3796 .nway_reset = nv_nway_reset,
3797 .get_perm_addr = ethtool_op_get_perm_addr,
3798 .get_tso = ethtool_op_get_tso,
3799 .set_tso = nv_set_tso,
3800 .get_ringparam = nv_get_ringparam,
3801 .set_ringparam = nv_set_ringparam,
3802 .get_pauseparam = nv_get_pauseparam,
3803 .set_pauseparam = nv_set_pauseparam,
3804 .get_rx_csum = nv_get_rx_csum,
3805 .set_rx_csum = nv_set_rx_csum,
3806 .get_tx_csum = ethtool_op_get_tx_csum,
3807 .set_tx_csum = nv_set_tx_csum,
3808 .get_sg = ethtool_op_get_sg,
3809 .set_sg = nv_set_sg,
3810 .get_strings = nv_get_strings,
3811 .get_stats_count = nv_get_stats_count,
3812 .get_ethtool_stats = nv_get_ethtool_stats,
3813 .self_test_count = nv_self_test_count,
3814 .self_test = nv_self_test,
3817 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3819 struct fe_priv *np = get_nvpriv(dev);
3821 spin_lock_irq(&np->lock);
3823 /* save vlan group */
3827 /* enable vlan on MAC */
3828 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
3830 /* disable vlan on MAC */
3831 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
3832 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
3835 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3837 spin_unlock_irq(&np->lock);
3840 static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3845 static int nv_open(struct net_device *dev)
3847 struct fe_priv *np = netdev_priv(dev);
3848 u8 __iomem *base = get_hwbase(dev);
3852 dprintk(KERN_DEBUG "nv_open: begin\n");
3854 /* 1) erase previous misconfiguration */
3855 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3857 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3858 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3859 writel(0, base + NvRegMulticastAddrB);
3860 writel(0, base + NvRegMulticastMaskA);
3861 writel(0, base + NvRegMulticastMaskB);
3862 writel(0, base + NvRegPacketFilterFlags);
3864 writel(0, base + NvRegTransmitterControl);
3865 writel(0, base + NvRegReceiverControl);
3867 writel(0, base + NvRegAdapterControl);
3869 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3870 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3872 /* 2) initialize descriptor rings */
3874 oom = nv_init_ring(dev);
3876 writel(0, base + NvRegLinkSpeed);
3877 writel(0, base + NvRegUnknownTransmitterReg);
3879 writel(0, base + NvRegUnknownSetupReg6);
3881 np->in_shutdown = 0;
3883 /* 3) set mac address */
3884 nv_copy_mac_to_hw(dev);
3886 /* 4) give hw rings */
3887 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3888 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3889 base + NvRegRingSizes);
3891 /* 5) continue setup */
3892 writel(np->linkspeed, base + NvRegLinkSpeed);
3893 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
3894 writel(np->txrxctl_bits, base + NvRegTxRxControl);
3895 writel(np->vlanctl_bits, base + NvRegVlanControl);
3897 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
3898 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
3899 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
3900 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
3902 writel(0, base + NvRegUnknownSetupReg4);
3903 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3904 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3906 /* 6) continue setup */
3907 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3908 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3909 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
3910 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3912 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
3913 get_random_bytes(&i, sizeof(i));
3914 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
3915 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
3916 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
3917 if (poll_interval == -1) {
3918 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
3919 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
3921 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
3924 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
3925 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
3926 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
3927 base + NvRegAdapterControl);
3928 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
3929 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
3931 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
3933 i = readl(base + NvRegPowerState);
3934 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
3935 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
3939 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
3941 nv_disable_hw_interrupts(dev, np->irqmask);
3943 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3944 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3947 if (nv_request_irq(dev, 0)) {
3951 /* ask for interrupts */
3952 nv_enable_hw_interrupts(dev, np->irqmask);
3954 spin_lock_irq(&np->lock);
3955 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3956 writel(0, base + NvRegMulticastAddrB);
3957 writel(0, base + NvRegMulticastMaskA);
3958 writel(0, base + NvRegMulticastMaskB);
3959 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
3960 /* One manual link speed update: Interrupts are enabled, future link
3961 * speed changes cause interrupts and are handled by nv_link_irq().
3965 miistat = readl(base + NvRegMIIStatus);
3966 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
3967 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
3969 /* set linkspeed to invalid value, thus force nv_update_linkspeed
3972 ret = nv_update_linkspeed(dev);
3975 netif_start_queue(dev);
3977 netif_carrier_on(dev);
3979 printk("%s: no link during initialization.\n", dev->name);
3980 netif_carrier_off(dev);
3983 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3985 /* start statistics timer */
3986 if (np->driver_data & DEV_HAS_STATISTICS)
3987 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3989 spin_unlock_irq(&np->lock);
3997 static int nv_close(struct net_device *dev)
3999 struct fe_priv *np = netdev_priv(dev);
4002 spin_lock_irq(&np->lock);
4003 np->in_shutdown = 1;
4004 spin_unlock_irq(&np->lock);
4005 synchronize_irq(dev->irq);
4007 del_timer_sync(&np->oom_kick);
4008 del_timer_sync(&np->nic_poll);
4009 del_timer_sync(&np->stats_poll);
4011 netif_stop_queue(dev);
4012 spin_lock_irq(&np->lock);
4017 /* disable interrupts on the nic or we will lock up */
4018 base = get_hwbase(dev);
4019 nv_disable_hw_interrupts(dev, np->irqmask);
4021 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4023 spin_unlock_irq(&np->lock);
4032 /* special op: write back the misordered MAC address - otherwise
4033 * the next nv_probe would see a wrong address.
4035 writel(np->orig_mac[0], base + NvRegMacAddrA);
4036 writel(np->orig_mac[1], base + NvRegMacAddrB);
4038 /* FIXME: power down nic */
4043 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4045 struct net_device *dev;
4052 dev = alloc_etherdev(sizeof(struct fe_priv));
4057 np = netdev_priv(dev);
4058 np->pci_dev = pci_dev;
4059 spin_lock_init(&np->lock);
4060 SET_MODULE_OWNER(dev);
4061 SET_NETDEV_DEV(dev, &pci_dev->dev);
4063 init_timer(&np->oom_kick);
4064 np->oom_kick.data = (unsigned long) dev;
4065 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
4066 init_timer(&np->nic_poll);
4067 np->nic_poll.data = (unsigned long) dev;
4068 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
4069 init_timer(&np->stats_poll);
4070 np->stats_poll.data = (unsigned long) dev;
4071 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
4073 err = pci_enable_device(pci_dev);
4075 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
4076 err, pci_name(pci_dev));
4080 pci_set_master(pci_dev);
4082 err = pci_request_regions(pci_dev, DRV_NAME);
4086 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
4087 np->register_size = NV_PCI_REGSZ_VER2;
4089 np->register_size = NV_PCI_REGSZ_VER1;
4093 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4094 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
4095 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
4096 pci_resource_len(pci_dev, i),
4097 pci_resource_flags(pci_dev, i));
4098 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
4099 pci_resource_len(pci_dev, i) >= np->register_size) {
4100 addr = pci_resource_start(pci_dev, i);
4104 if (i == DEVICE_COUNT_RESOURCE) {
4105 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
4110 /* copy of driver data */
4111 np->driver_data = id->driver_data;
4113 /* handle different descriptor versions */
4114 if (id->driver_data & DEV_HAS_HIGH_DMA) {
4115 /* packet format 3: supports 40-bit addressing */
4116 np->desc_ver = DESC_VER_3;
4117 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
4118 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
4119 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
4122 dev->features |= NETIF_F_HIGHDMA;
4123 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
4125 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
4126 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
4129 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
4130 /* packet format 2: supports jumbo frames */
4131 np->desc_ver = DESC_VER_2;
4132 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
4134 /* original packet format */
4135 np->desc_ver = DESC_VER_1;
4136 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
4139 np->pkt_limit = NV_PKTLIMIT_1;
4140 if (id->driver_data & DEV_HAS_LARGEDESC)
4141 np->pkt_limit = NV_PKTLIMIT_2;
4143 if (id->driver_data & DEV_HAS_CHECKSUM) {
4144 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4145 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4147 dev->features |= NETIF_F_TSO;
4151 np->vlanctl_bits = 0;
4152 if (id->driver_data & DEV_HAS_VLAN) {
4153 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
4154 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
4155 dev->vlan_rx_register = nv_vlan_rx_register;
4156 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
4160 if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
4161 np->msi_flags |= NV_MSI_CAPABLE;
4163 if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
4164 np->msi_flags |= NV_MSI_X_CAPABLE;
4167 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
4168 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
4169 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
4174 np->base = ioremap(addr, np->register_size);
4177 dev->base_addr = (unsigned long)np->base;
4179 dev->irq = pci_dev->irq;
4181 np->rx_ring_size = RX_RING_DEFAULT;
4182 np->tx_ring_size = TX_RING_DEFAULT;
4183 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4184 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4186 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4187 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
4188 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
4190 if (!np->rx_ring.orig)
4192 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4194 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
4195 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
4197 if (!np->rx_ring.ex)
4199 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4201 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
4202 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
4203 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
4204 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
4205 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
4206 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
4208 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
4209 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
4210 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
4211 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
4212 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
4214 dev->open = nv_open;
4215 dev->stop = nv_close;
4216 dev->hard_start_xmit = nv_start_xmit;
4217 dev->get_stats = nv_get_stats;
4218 dev->change_mtu = nv_change_mtu;
4219 dev->set_mac_address = nv_set_mac_address;
4220 dev->set_multicast_list = nv_set_multicast;
4221 #ifdef CONFIG_NET_POLL_CONTROLLER
4222 dev->poll_controller = nv_poll_controller;
4224 SET_ETHTOOL_OPS(dev, &ops);
4225 dev->tx_timeout = nv_tx_timeout;
4226 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
4228 pci_set_drvdata(pci_dev, dev);
4230 /* read the mac address */
4231 base = get_hwbase(dev);
4232 np->orig_mac[0] = readl(base + NvRegMacAddrA);
4233 np->orig_mac[1] = readl(base + NvRegMacAddrB);
4235 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
4236 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
4237 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
4238 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
4239 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
4240 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
4241 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4243 if (!is_valid_ether_addr(dev->perm_addr)) {
4245 * Bad mac address. At least one bios sets the mac address
4246 * to 01:23:45:67:89:ab
4248 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
4250 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4251 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4252 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
4253 dev->dev_addr[0] = 0x00;
4254 dev->dev_addr[1] = 0x00;
4255 dev->dev_addr[2] = 0x6c;
4256 get_random_bytes(&dev->dev_addr[3], 3);
4259 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
4260 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4261 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4264 writel(0, base + NvRegWakeUpFlags);
4267 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
4269 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
4271 /* take phy and nic out of low power mode */
4272 powerstate = readl(base + NvRegPowerState2);
4273 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
4274 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
4275 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
4276 revision_id >= 0xA3)
4277 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
4278 writel(powerstate, base + NvRegPowerState2);
4281 if (np->desc_ver == DESC_VER_1) {
4282 np->tx_flags = NV_TX_VALID;
4284 np->tx_flags = NV_TX2_VALID;
4286 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
4287 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
4288 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
4289 np->msi_flags |= 0x0003;
4291 np->irqmask = NVREG_IRQMASK_CPU;
4292 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
4293 np->msi_flags |= 0x0001;
4296 if (id->driver_data & DEV_NEED_TIMERIRQ)
4297 np->irqmask |= NVREG_IRQ_TIMER;
4298 if (id->driver_data & DEV_NEED_LINKTIMER) {
4299 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
4300 np->need_linktimer = 1;
4301 np->link_timeout = jiffies + LINK_TIMEOUT;
4303 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
4304 np->need_linktimer = 0;
4307 /* find a suitable phy */
4308 for (i = 1; i <= 32; i++) {
4310 int phyaddr = i & 0x1F;
4312 spin_lock_irq(&np->lock);
4313 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
4314 spin_unlock_irq(&np->lock);
4315 if (id1 < 0 || id1 == 0xffff)
4317 spin_lock_irq(&np->lock);
4318 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
4319 spin_unlock_irq(&np->lock);
4320 if (id2 < 0 || id2 == 0xffff)
4323 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
4324 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
4325 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
4326 pci_name(pci_dev), id1, id2, phyaddr);
4327 np->phyaddr = phyaddr;
4328 np->phy_oui = id1 | id2;
4332 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
4340 /* set default link speed settings */
4341 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
4345 err = register_netdev(dev);
4347 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
4350 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
4351 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
4357 pci_set_drvdata(pci_dev, NULL);
4361 iounmap(get_hwbase(dev));
4363 pci_release_regions(pci_dev);
4365 pci_disable_device(pci_dev);
4372 static void __devexit nv_remove(struct pci_dev *pci_dev)
4374 struct net_device *dev = pci_get_drvdata(pci_dev);
4376 unregister_netdev(dev);
4378 /* free all structures */
4380 iounmap(get_hwbase(dev));
4381 pci_release_regions(pci_dev);
4382 pci_disable_device(pci_dev);
4384 pci_set_drvdata(pci_dev, NULL);
4387 static struct pci_device_id pci_tbl[] = {
4388 { /* nForce Ethernet Controller */
4389 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
4390 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
4392 { /* nForce2 Ethernet Controller */
4393 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
4394 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
4396 { /* nForce3 Ethernet Controller */
4397 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
4398 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
4400 { /* nForce3 Ethernet Controller */
4401 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
4402 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
4404 { /* nForce3 Ethernet Controller */
4405 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
4406 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
4408 { /* nForce3 Ethernet Controller */
4409 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
4410 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
4412 { /* nForce3 Ethernet Controller */
4413 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
4414 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
4416 { /* CK804 Ethernet Controller */
4417 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
4418 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
4420 { /* CK804 Ethernet Controller */
4421 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
4422 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
4424 { /* MCP04 Ethernet Controller */
4425 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
4426 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
4428 { /* MCP04 Ethernet Controller */
4429 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
4430 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
4432 { /* MCP51 Ethernet Controller */
4433 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
4434 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
4436 { /* MCP51 Ethernet Controller */
4437 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
4438 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
4440 { /* MCP55 Ethernet Controller */
4441 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
4442 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
4444 { /* MCP55 Ethernet Controller */
4445 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
4446 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
4451 static struct pci_driver driver = {
4452 .name = "forcedeth",
4453 .id_table = pci_tbl,
4455 .remove = __devexit_p(nv_remove),
4459 static int __init init_nic(void)
4461 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
4462 return pci_module_init(&driver);
4465 static void __exit exit_nic(void)
4467 pci_unregister_driver(&driver);
4470 module_param(max_interrupt_work, int, 0);
4471 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
4472 module_param(optimization_mode, int, 0);
4473 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
4474 module_param(poll_interval, int, 0);
4475 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
4476 module_param(disable_msi, int, 0);
4477 MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
4478 module_param(disable_msix, int, 0);
4479 MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
4481 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
4482 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
4483 MODULE_LICENSE("GPL");
4485 MODULE_DEVICE_TABLE(pci, pci_tbl);
4487 module_init(init_nic);
4488 module_exit(exit_nic);