]> err.no Git - linux-2.6/blob - drivers/net/tg3.c
[TG3]: define TG3_FLG2_5750_PLUS flag
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1009                 udelay(100);
1010
1011                 return 0;
1012
1013         case 1:
1014                 power_control |= 1;
1015                 break;
1016
1017         case 2:
1018                 power_control |= 2;
1019                 break;
1020
1021         case 3:
1022                 power_control |= 3;
1023                 break;
1024
1025         default:
1026                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1027                        "requested.\n",
1028                        tp->dev->name, state);
1029                 return -EINVAL;
1030         };
1031
1032         power_control |= PCI_PM_CTRL_PME_ENABLE;
1033
1034         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1035         tw32(TG3PCI_MISC_HOST_CTRL,
1036              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1037
1038         if (tp->link_config.phy_is_low_power == 0) {
1039                 tp->link_config.phy_is_low_power = 1;
1040                 tp->link_config.orig_speed = tp->link_config.speed;
1041                 tp->link_config.orig_duplex = tp->link_config.duplex;
1042                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1043         }
1044
1045         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1046                 tp->link_config.speed = SPEED_10;
1047                 tp->link_config.duplex = DUPLEX_HALF;
1048                 tp->link_config.autoneg = AUTONEG_ENABLE;
1049                 tg3_setup_phy(tp, 0);
1050         }
1051
1052         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1053
1054         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1055                 u32 mac_mode;
1056
1057                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1058                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1059                         udelay(40);
1060
1061                         mac_mode = MAC_MODE_PORT_MODE_MII;
1062
1063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1064                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1065                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1066                 } else {
1067                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1068                 }
1069
1070                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
1071                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
1072                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1073
1074                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1075                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1076                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1077
1078                 tw32_f(MAC_MODE, mac_mode);
1079                 udelay(100);
1080
1081                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1082                 udelay(10);
1083         }
1084
1085         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1086             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1087              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1088                 u32 base_val;
1089
1090                 base_val = tp->pci_clock_ctrl;
1091                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1092                              CLOCK_CTRL_TXCLK_DISABLE);
1093
1094                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1095                      CLOCK_CTRL_ALTCLK |
1096                      CLOCK_CTRL_PWRDOWN_PLL133);
1097                 udelay(40);
1098         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1099                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1100                 u32 newbits1, newbits2;
1101
1102                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1103                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1104                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1105                                     CLOCK_CTRL_TXCLK_DISABLE |
1106                                     CLOCK_CTRL_ALTCLK);
1107                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1108                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1109                         newbits1 = CLOCK_CTRL_625_CORE;
1110                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1111                 } else {
1112                         newbits1 = CLOCK_CTRL_ALTCLK;
1113                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1114                 }
1115
1116                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1117                 udelay(40);
1118
1119                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1120                 udelay(40);
1121
1122                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1123                         u32 newbits3;
1124
1125                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1126                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1127                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1128                                             CLOCK_CTRL_TXCLK_DISABLE |
1129                                             CLOCK_CTRL_44MHZ_CORE);
1130                         } else {
1131                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1132                         }
1133
1134                         tw32_f(TG3PCI_CLOCK_CTRL,
1135                                          tp->pci_clock_ctrl | newbits3);
1136                         udelay(40);
1137                 }
1138         }
1139
1140         tg3_frob_aux_power(tp);
1141
1142         /* Workaround for unstable PLL clock */
1143         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1144             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1145                 u32 val = tr32(0x7d00);
1146
1147                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1148                 tw32(0x7d00, val);
1149                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1150                         tg3_halt_cpu(tp, RX_CPU_BASE);
1151         }
1152
1153         /* Finally, set the new power state. */
1154         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1155
1156         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1157
1158         return 0;
1159 }
1160
1161 static void tg3_link_report(struct tg3 *tp)
1162 {
1163         if (!netif_carrier_ok(tp->dev)) {
1164                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1165         } else {
1166                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1167                        tp->dev->name,
1168                        (tp->link_config.active_speed == SPEED_1000 ?
1169                         1000 :
1170                         (tp->link_config.active_speed == SPEED_100 ?
1171                          100 : 10)),
1172                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1173                         "full" : "half"));
1174
1175                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1176                        "%s for RX.\n",
1177                        tp->dev->name,
1178                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1179                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1180         }
1181 }
1182
1183 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1184 {
1185         u32 new_tg3_flags = 0;
1186         u32 old_rx_mode = tp->rx_mode;
1187         u32 old_tx_mode = tp->tx_mode;
1188
1189         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1190                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1191                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1192                                 if (remote_adv & LPA_PAUSE_CAP)
1193                                         new_tg3_flags |=
1194                                                 (TG3_FLAG_RX_PAUSE |
1195                                                 TG3_FLAG_TX_PAUSE);
1196                                 else if (remote_adv & LPA_PAUSE_ASYM)
1197                                         new_tg3_flags |=
1198                                                 (TG3_FLAG_RX_PAUSE);
1199                         } else {
1200                                 if (remote_adv & LPA_PAUSE_CAP)
1201                                         new_tg3_flags |=
1202                                                 (TG3_FLAG_RX_PAUSE |
1203                                                 TG3_FLAG_TX_PAUSE);
1204                         }
1205                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1206                         if ((remote_adv & LPA_PAUSE_CAP) &&
1207                         (remote_adv & LPA_PAUSE_ASYM))
1208                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1209                 }
1210
1211                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1212                 tp->tg3_flags |= new_tg3_flags;
1213         } else {
1214                 new_tg3_flags = tp->tg3_flags;
1215         }
1216
1217         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1218                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1219         else
1220                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1221
1222         if (old_rx_mode != tp->rx_mode) {
1223                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1224         }
1225         
1226         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1227                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1228         else
1229                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1230
1231         if (old_tx_mode != tp->tx_mode) {
1232                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1233         }
1234 }
1235
1236 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1237 {
1238         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1239         case MII_TG3_AUX_STAT_10HALF:
1240                 *speed = SPEED_10;
1241                 *duplex = DUPLEX_HALF;
1242                 break;
1243
1244         case MII_TG3_AUX_STAT_10FULL:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_FULL;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_100HALF:
1250                 *speed = SPEED_100;
1251                 *duplex = DUPLEX_HALF;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100FULL:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_FULL;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_1000HALF:
1260                 *speed = SPEED_1000;
1261                 *duplex = DUPLEX_HALF;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000FULL:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_FULL;
1267                 break;
1268
1269         default:
1270                 *speed = SPEED_INVALID;
1271                 *duplex = DUPLEX_INVALID;
1272                 break;
1273         };
1274 }
1275
1276 static void tg3_phy_copper_begin(struct tg3 *tp)
1277 {
1278         u32 new_adv;
1279         int i;
1280
1281         if (tp->link_config.phy_is_low_power) {
1282                 /* Entering low power mode.  Disable gigabit and
1283                  * 100baseT advertisements.
1284                  */
1285                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1286
1287                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1288                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1289                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1290                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1291
1292                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1293         } else if (tp->link_config.speed == SPEED_INVALID) {
1294                 tp->link_config.advertising =
1295                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1296                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1297                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1298                          ADVERTISED_Autoneg | ADVERTISED_MII);
1299
1300                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1301                         tp->link_config.advertising &=
1302                                 ~(ADVERTISED_1000baseT_Half |
1303                                   ADVERTISED_1000baseT_Full);
1304
1305                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1306                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1307                         new_adv |= ADVERTISE_10HALF;
1308                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1309                         new_adv |= ADVERTISE_10FULL;
1310                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1311                         new_adv |= ADVERTISE_100HALF;
1312                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1313                         new_adv |= ADVERTISE_100FULL;
1314                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1315
1316                 if (tp->link_config.advertising &
1317                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1318                         new_adv = 0;
1319                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1320                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1321                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1322                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1323                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1324                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1325                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1326                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1327                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1328                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1329                 } else {
1330                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1331                 }
1332         } else {
1333                 /* Asking for a specific link mode. */
1334                 if (tp->link_config.speed == SPEED_1000) {
1335                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1336                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1337
1338                         if (tp->link_config.duplex == DUPLEX_FULL)
1339                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1340                         else
1341                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1342                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1343                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1344                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1345                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1346                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1347                 } else {
1348                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1349
1350                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1351                         if (tp->link_config.speed == SPEED_100) {
1352                                 if (tp->link_config.duplex == DUPLEX_FULL)
1353                                         new_adv |= ADVERTISE_100FULL;
1354                                 else
1355                                         new_adv |= ADVERTISE_100HALF;
1356                         } else {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_10FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_10HALF;
1361                         }
1362                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1363                 }
1364         }
1365
1366         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1367             tp->link_config.speed != SPEED_INVALID) {
1368                 u32 bmcr, orig_bmcr;
1369
1370                 tp->link_config.active_speed = tp->link_config.speed;
1371                 tp->link_config.active_duplex = tp->link_config.duplex;
1372
1373                 bmcr = 0;
1374                 switch (tp->link_config.speed) {
1375                 default:
1376                 case SPEED_10:
1377                         break;
1378
1379                 case SPEED_100:
1380                         bmcr |= BMCR_SPEED100;
1381                         break;
1382
1383                 case SPEED_1000:
1384                         bmcr |= TG3_BMCR_SPEED1000;
1385                         break;
1386                 };
1387
1388                 if (tp->link_config.duplex == DUPLEX_FULL)
1389                         bmcr |= BMCR_FULLDPLX;
1390
1391                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1392                     (bmcr != orig_bmcr)) {
1393                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1394                         for (i = 0; i < 1500; i++) {
1395                                 u32 tmp;
1396
1397                                 udelay(10);
1398                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1399                                     tg3_readphy(tp, MII_BMSR, &tmp))
1400                                         continue;
1401                                 if (!(tmp & BMSR_LSTATUS)) {
1402                                         udelay(40);
1403                                         break;
1404                                 }
1405                         }
1406                         tg3_writephy(tp, MII_BMCR, bmcr);
1407                         udelay(40);
1408                 }
1409         } else {
1410                 tg3_writephy(tp, MII_BMCR,
1411                              BMCR_ANENABLE | BMCR_ANRESTART);
1412         }
1413 }
1414
1415 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1416 {
1417         int err;
1418
1419         /* Turn off tap power management. */
1420         /* Set Extended packet length bit */
1421         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1422
1423         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1424         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1425
1426         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1427         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1428
1429         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1430         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1431
1432         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1433         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1434
1435         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1436         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1437
1438         udelay(40);
1439
1440         return err;
1441 }
1442
1443 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1444 {
1445         u32 adv_reg, all_mask;
1446
1447         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1448                 return 0;
1449
1450         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1451                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1452         if ((adv_reg & all_mask) != all_mask)
1453                 return 0;
1454         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1455                 u32 tg3_ctrl;
1456
1457                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1458                         return 0;
1459
1460                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1461                             MII_TG3_CTRL_ADV_1000_FULL);
1462                 if ((tg3_ctrl & all_mask) != all_mask)
1463                         return 0;
1464         }
1465         return 1;
1466 }
1467
1468 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1469 {
1470         int current_link_up;
1471         u32 bmsr, dummy;
1472         u16 current_speed;
1473         u8 current_duplex;
1474         int i, err;
1475
1476         tw32(MAC_EVENT, 0);
1477
1478         tw32_f(MAC_STATUS,
1479              (MAC_STATUS_SYNC_CHANGED |
1480               MAC_STATUS_CFG_CHANGED |
1481               MAC_STATUS_MI_COMPLETION |
1482               MAC_STATUS_LNKSTATE_CHANGED));
1483         udelay(40);
1484
1485         tp->mi_mode = MAC_MI_MODE_BASE;
1486         tw32_f(MAC_MI_MODE, tp->mi_mode);
1487         udelay(80);
1488
1489         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1490
1491         /* Some third-party PHYs need to be reset on link going
1492          * down.
1493          */
1494         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1495              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1496              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1497             netif_carrier_ok(tp->dev)) {
1498                 tg3_readphy(tp, MII_BMSR, &bmsr);
1499                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1500                     !(bmsr & BMSR_LSTATUS))
1501                         force_reset = 1;
1502         }
1503         if (force_reset)
1504                 tg3_phy_reset(tp);
1505
1506         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1507                 tg3_readphy(tp, MII_BMSR, &bmsr);
1508                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1509                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1510                         bmsr = 0;
1511
1512                 if (!(bmsr & BMSR_LSTATUS)) {
1513                         err = tg3_init_5401phy_dsp(tp);
1514                         if (err)
1515                                 return err;
1516
1517                         tg3_readphy(tp, MII_BMSR, &bmsr);
1518                         for (i = 0; i < 1000; i++) {
1519                                 udelay(10);
1520                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1521                                     (bmsr & BMSR_LSTATUS)) {
1522                                         udelay(40);
1523                                         break;
1524                                 }
1525                         }
1526
1527                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1528                             !(bmsr & BMSR_LSTATUS) &&
1529                             tp->link_config.active_speed == SPEED_1000) {
1530                                 err = tg3_phy_reset(tp);
1531                                 if (!err)
1532                                         err = tg3_init_5401phy_dsp(tp);
1533                                 if (err)
1534                                         return err;
1535                         }
1536                 }
1537         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1538                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1539                 /* 5701 {A0,B0} CRC bug workaround */
1540                 tg3_writephy(tp, 0x15, 0x0a75);
1541                 tg3_writephy(tp, 0x1c, 0x8c68);
1542                 tg3_writephy(tp, 0x1c, 0x8d68);
1543                 tg3_writephy(tp, 0x1c, 0x8c68);
1544         }
1545
1546         /* Clear pending interrupts... */
1547         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1548         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1549
1550         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1551                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1552         else
1553                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1554
1555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1557                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1558                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1559                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1560                 else
1561                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1562         }
1563
1564         current_link_up = 0;
1565         current_speed = SPEED_INVALID;
1566         current_duplex = DUPLEX_INVALID;
1567
1568         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1569                 u32 val;
1570
1571                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1572                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1573                 if (!(val & (1 << 10))) {
1574                         val |= (1 << 10);
1575                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1576                         goto relink;
1577                 }
1578         }
1579
1580         bmsr = 0;
1581         for (i = 0; i < 100; i++) {
1582                 tg3_readphy(tp, MII_BMSR, &bmsr);
1583                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1584                     (bmsr & BMSR_LSTATUS))
1585                         break;
1586                 udelay(40);
1587         }
1588
1589         if (bmsr & BMSR_LSTATUS) {
1590                 u32 aux_stat, bmcr;
1591
1592                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1593                 for (i = 0; i < 2000; i++) {
1594                         udelay(10);
1595                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1596                             aux_stat)
1597                                 break;
1598                 }
1599
1600                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1601                                              &current_speed,
1602                                              &current_duplex);
1603
1604                 bmcr = 0;
1605                 for (i = 0; i < 200; i++) {
1606                         tg3_readphy(tp, MII_BMCR, &bmcr);
1607                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1608                                 continue;
1609                         if (bmcr && bmcr != 0x7fff)
1610                                 break;
1611                         udelay(10);
1612                 }
1613
1614                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1615                         if (bmcr & BMCR_ANENABLE) {
1616                                 current_link_up = 1;
1617
1618                                 /* Force autoneg restart if we are exiting
1619                                  * low power mode.
1620                                  */
1621                                 if (!tg3_copper_is_advertising_all(tp))
1622                                         current_link_up = 0;
1623                         } else {
1624                                 current_link_up = 0;
1625                         }
1626                 } else {
1627                         if (!(bmcr & BMCR_ANENABLE) &&
1628                             tp->link_config.speed == current_speed &&
1629                             tp->link_config.duplex == current_duplex) {
1630                                 current_link_up = 1;
1631                         } else {
1632                                 current_link_up = 0;
1633                         }
1634                 }
1635
1636                 tp->link_config.active_speed = current_speed;
1637                 tp->link_config.active_duplex = current_duplex;
1638         }
1639
1640         if (current_link_up == 1 &&
1641             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1642             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1643                 u32 local_adv, remote_adv;
1644
1645                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1646                         local_adv = 0;
1647                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1648
1649                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1650                         remote_adv = 0;
1651
1652                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1653
1654                 /* If we are not advertising full pause capability,
1655                  * something is wrong.  Bring the link down and reconfigure.
1656                  */
1657                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1658                         current_link_up = 0;
1659                 } else {
1660                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1661                 }
1662         }
1663 relink:
1664         if (current_link_up == 0) {
1665                 u32 tmp;
1666
1667                 tg3_phy_copper_begin(tp);
1668
1669                 tg3_readphy(tp, MII_BMSR, &tmp);
1670                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1671                     (tmp & BMSR_LSTATUS))
1672                         current_link_up = 1;
1673         }
1674
1675         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1676         if (current_link_up == 1) {
1677                 if (tp->link_config.active_speed == SPEED_100 ||
1678                     tp->link_config.active_speed == SPEED_10)
1679                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1680                 else
1681                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1682         } else
1683                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1684
1685         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1686         if (tp->link_config.active_duplex == DUPLEX_HALF)
1687                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1688
1689         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1691                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1692                     (current_link_up == 1 &&
1693                      tp->link_config.active_speed == SPEED_10))
1694                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1695         } else {
1696                 if (current_link_up == 1)
1697                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1698         }
1699
1700         /* ??? Without this setting Netgear GA302T PHY does not
1701          * ??? send/receive packets...
1702          */
1703         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1704             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1705                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1706                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1707                 udelay(80);
1708         }
1709
1710         tw32_f(MAC_MODE, tp->mac_mode);
1711         udelay(40);
1712
1713         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1714                 /* Polled via timer. */
1715                 tw32_f(MAC_EVENT, 0);
1716         } else {
1717                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1718         }
1719         udelay(40);
1720
1721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1722             current_link_up == 1 &&
1723             tp->link_config.active_speed == SPEED_1000 &&
1724             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1725              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1726                 udelay(120);
1727                 tw32_f(MAC_STATUS,
1728                      (MAC_STATUS_SYNC_CHANGED |
1729                       MAC_STATUS_CFG_CHANGED));
1730                 udelay(40);
1731                 tg3_write_mem(tp,
1732                               NIC_SRAM_FIRMWARE_MBOX,
1733                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1734         }
1735
1736         if (current_link_up != netif_carrier_ok(tp->dev)) {
1737                 if (current_link_up)
1738                         netif_carrier_on(tp->dev);
1739                 else
1740                         netif_carrier_off(tp->dev);
1741                 tg3_link_report(tp);
1742         }
1743
1744         return 0;
1745 }
1746
1747 struct tg3_fiber_aneginfo {
1748         int state;
1749 #define ANEG_STATE_UNKNOWN              0
1750 #define ANEG_STATE_AN_ENABLE            1
1751 #define ANEG_STATE_RESTART_INIT         2
1752 #define ANEG_STATE_RESTART              3
1753 #define ANEG_STATE_DISABLE_LINK_OK      4
1754 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1755 #define ANEG_STATE_ABILITY_DETECT       6
1756 #define ANEG_STATE_ACK_DETECT_INIT      7
1757 #define ANEG_STATE_ACK_DETECT           8
1758 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1759 #define ANEG_STATE_COMPLETE_ACK         10
1760 #define ANEG_STATE_IDLE_DETECT_INIT     11
1761 #define ANEG_STATE_IDLE_DETECT          12
1762 #define ANEG_STATE_LINK_OK              13
1763 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1764 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1765
1766         u32 flags;
1767 #define MR_AN_ENABLE            0x00000001
1768 #define MR_RESTART_AN           0x00000002
1769 #define MR_AN_COMPLETE          0x00000004
1770 #define MR_PAGE_RX              0x00000008
1771 #define MR_NP_LOADED            0x00000010
1772 #define MR_TOGGLE_TX            0x00000020
1773 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1774 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1775 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1776 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1777 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1778 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1779 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1780 #define MR_TOGGLE_RX            0x00002000
1781 #define MR_NP_RX                0x00004000
1782
1783 #define MR_LINK_OK              0x80000000
1784
1785         unsigned long link_time, cur_time;
1786
1787         u32 ability_match_cfg;
1788         int ability_match_count;
1789
1790         char ability_match, idle_match, ack_match;
1791
1792         u32 txconfig, rxconfig;
1793 #define ANEG_CFG_NP             0x00000080
1794 #define ANEG_CFG_ACK            0x00000040
1795 #define ANEG_CFG_RF2            0x00000020
1796 #define ANEG_CFG_RF1            0x00000010
1797 #define ANEG_CFG_PS2            0x00000001
1798 #define ANEG_CFG_PS1            0x00008000
1799 #define ANEG_CFG_HD             0x00004000
1800 #define ANEG_CFG_FD             0x00002000
1801 #define ANEG_CFG_INVAL          0x00001f06
1802
1803 };
1804 #define ANEG_OK         0
1805 #define ANEG_DONE       1
1806 #define ANEG_TIMER_ENAB 2
1807 #define ANEG_FAILED     -1
1808
1809 #define ANEG_STATE_SETTLE_TIME  10000
1810
1811 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1812                                    struct tg3_fiber_aneginfo *ap)
1813 {
1814         unsigned long delta;
1815         u32 rx_cfg_reg;
1816         int ret;
1817
1818         if (ap->state == ANEG_STATE_UNKNOWN) {
1819                 ap->rxconfig = 0;
1820                 ap->link_time = 0;
1821                 ap->cur_time = 0;
1822                 ap->ability_match_cfg = 0;
1823                 ap->ability_match_count = 0;
1824                 ap->ability_match = 0;
1825                 ap->idle_match = 0;
1826                 ap->ack_match = 0;
1827         }
1828         ap->cur_time++;
1829
1830         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1831                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1832
1833                 if (rx_cfg_reg != ap->ability_match_cfg) {
1834                         ap->ability_match_cfg = rx_cfg_reg;
1835                         ap->ability_match = 0;
1836                         ap->ability_match_count = 0;
1837                 } else {
1838                         if (++ap->ability_match_count > 1) {
1839                                 ap->ability_match = 1;
1840                                 ap->ability_match_cfg = rx_cfg_reg;
1841                         }
1842                 }
1843                 if (rx_cfg_reg & ANEG_CFG_ACK)
1844                         ap->ack_match = 1;
1845                 else
1846                         ap->ack_match = 0;
1847
1848                 ap->idle_match = 0;
1849         } else {
1850                 ap->idle_match = 1;
1851                 ap->ability_match_cfg = 0;
1852                 ap->ability_match_count = 0;
1853                 ap->ability_match = 0;
1854                 ap->ack_match = 0;
1855
1856                 rx_cfg_reg = 0;
1857         }
1858
1859         ap->rxconfig = rx_cfg_reg;
1860         ret = ANEG_OK;
1861
1862         switch(ap->state) {
1863         case ANEG_STATE_UNKNOWN:
1864                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1865                         ap->state = ANEG_STATE_AN_ENABLE;
1866
1867                 /* fallthru */
1868         case ANEG_STATE_AN_ENABLE:
1869                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1870                 if (ap->flags & MR_AN_ENABLE) {
1871                         ap->link_time = 0;
1872                         ap->cur_time = 0;
1873                         ap->ability_match_cfg = 0;
1874                         ap->ability_match_count = 0;
1875                         ap->ability_match = 0;
1876                         ap->idle_match = 0;
1877                         ap->ack_match = 0;
1878
1879                         ap->state = ANEG_STATE_RESTART_INIT;
1880                 } else {
1881                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1882                 }
1883                 break;
1884
1885         case ANEG_STATE_RESTART_INIT:
1886                 ap->link_time = ap->cur_time;
1887                 ap->flags &= ~(MR_NP_LOADED);
1888                 ap->txconfig = 0;
1889                 tw32(MAC_TX_AUTO_NEG, 0);
1890                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1891                 tw32_f(MAC_MODE, tp->mac_mode);
1892                 udelay(40);
1893
1894                 ret = ANEG_TIMER_ENAB;
1895                 ap->state = ANEG_STATE_RESTART;
1896
1897                 /* fallthru */
1898         case ANEG_STATE_RESTART:
1899                 delta = ap->cur_time - ap->link_time;
1900                 if (delta > ANEG_STATE_SETTLE_TIME) {
1901                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1902                 } else {
1903                         ret = ANEG_TIMER_ENAB;
1904                 }
1905                 break;
1906
1907         case ANEG_STATE_DISABLE_LINK_OK:
1908                 ret = ANEG_DONE;
1909                 break;
1910
1911         case ANEG_STATE_ABILITY_DETECT_INIT:
1912                 ap->flags &= ~(MR_TOGGLE_TX);
1913                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1914                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1915                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1916                 tw32_f(MAC_MODE, tp->mac_mode);
1917                 udelay(40);
1918
1919                 ap->state = ANEG_STATE_ABILITY_DETECT;
1920                 break;
1921
1922         case ANEG_STATE_ABILITY_DETECT:
1923                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1924                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1925                 }
1926                 break;
1927
1928         case ANEG_STATE_ACK_DETECT_INIT:
1929                 ap->txconfig |= ANEG_CFG_ACK;
1930                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1931                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1932                 tw32_f(MAC_MODE, tp->mac_mode);
1933                 udelay(40);
1934
1935                 ap->state = ANEG_STATE_ACK_DETECT;
1936
1937                 /* fallthru */
1938         case ANEG_STATE_ACK_DETECT:
1939                 if (ap->ack_match != 0) {
1940                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1941                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1942                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1943                         } else {
1944                                 ap->state = ANEG_STATE_AN_ENABLE;
1945                         }
1946                 } else if (ap->ability_match != 0 &&
1947                            ap->rxconfig == 0) {
1948                         ap->state = ANEG_STATE_AN_ENABLE;
1949                 }
1950                 break;
1951
1952         case ANEG_STATE_COMPLETE_ACK_INIT:
1953                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1954                         ret = ANEG_FAILED;
1955                         break;
1956                 }
1957                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1958                                MR_LP_ADV_HALF_DUPLEX |
1959                                MR_LP_ADV_SYM_PAUSE |
1960                                MR_LP_ADV_ASYM_PAUSE |
1961                                MR_LP_ADV_REMOTE_FAULT1 |
1962                                MR_LP_ADV_REMOTE_FAULT2 |
1963                                MR_LP_ADV_NEXT_PAGE |
1964                                MR_TOGGLE_RX |
1965                                MR_NP_RX);
1966                 if (ap->rxconfig & ANEG_CFG_FD)
1967                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1968                 if (ap->rxconfig & ANEG_CFG_HD)
1969                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1970                 if (ap->rxconfig & ANEG_CFG_PS1)
1971                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1972                 if (ap->rxconfig & ANEG_CFG_PS2)
1973                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1974                 if (ap->rxconfig & ANEG_CFG_RF1)
1975                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1976                 if (ap->rxconfig & ANEG_CFG_RF2)
1977                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1978                 if (ap->rxconfig & ANEG_CFG_NP)
1979                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1980
1981                 ap->link_time = ap->cur_time;
1982
1983                 ap->flags ^= (MR_TOGGLE_TX);
1984                 if (ap->rxconfig & 0x0008)
1985                         ap->flags |= MR_TOGGLE_RX;
1986                 if (ap->rxconfig & ANEG_CFG_NP)
1987                         ap->flags |= MR_NP_RX;
1988                 ap->flags |= MR_PAGE_RX;
1989
1990                 ap->state = ANEG_STATE_COMPLETE_ACK;
1991                 ret = ANEG_TIMER_ENAB;
1992                 break;
1993
1994         case ANEG_STATE_COMPLETE_ACK:
1995                 if (ap->ability_match != 0 &&
1996                     ap->rxconfig == 0) {
1997                         ap->state = ANEG_STATE_AN_ENABLE;
1998                         break;
1999                 }
2000                 delta = ap->cur_time - ap->link_time;
2001                 if (delta > ANEG_STATE_SETTLE_TIME) {
2002                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2003                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2004                         } else {
2005                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2006                                     !(ap->flags & MR_NP_RX)) {
2007                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2008                                 } else {
2009                                         ret = ANEG_FAILED;
2010                                 }
2011                         }
2012                 }
2013                 break;
2014
2015         case ANEG_STATE_IDLE_DETECT_INIT:
2016                 ap->link_time = ap->cur_time;
2017                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2018                 tw32_f(MAC_MODE, tp->mac_mode);
2019                 udelay(40);
2020
2021                 ap->state = ANEG_STATE_IDLE_DETECT;
2022                 ret = ANEG_TIMER_ENAB;
2023                 break;
2024
2025         case ANEG_STATE_IDLE_DETECT:
2026                 if (ap->ability_match != 0 &&
2027                     ap->rxconfig == 0) {
2028                         ap->state = ANEG_STATE_AN_ENABLE;
2029                         break;
2030                 }
2031                 delta = ap->cur_time - ap->link_time;
2032                 if (delta > ANEG_STATE_SETTLE_TIME) {
2033                         /* XXX another gem from the Broadcom driver :( */
2034                         ap->state = ANEG_STATE_LINK_OK;
2035                 }
2036                 break;
2037
2038         case ANEG_STATE_LINK_OK:
2039                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2040                 ret = ANEG_DONE;
2041                 break;
2042
2043         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2044                 /* ??? unimplemented */
2045                 break;
2046
2047         case ANEG_STATE_NEXT_PAGE_WAIT:
2048                 /* ??? unimplemented */
2049                 break;
2050
2051         default:
2052                 ret = ANEG_FAILED;
2053                 break;
2054         };
2055
2056         return ret;
2057 }
2058
2059 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2060 {
2061         int res = 0;
2062         struct tg3_fiber_aneginfo aninfo;
2063         int status = ANEG_FAILED;
2064         unsigned int tick;
2065         u32 tmp;
2066
2067         tw32_f(MAC_TX_AUTO_NEG, 0);
2068
2069         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2070         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2071         udelay(40);
2072
2073         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2074         udelay(40);
2075
2076         memset(&aninfo, 0, sizeof(aninfo));
2077         aninfo.flags |= MR_AN_ENABLE;
2078         aninfo.state = ANEG_STATE_UNKNOWN;
2079         aninfo.cur_time = 0;
2080         tick = 0;
2081         while (++tick < 195000) {
2082                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2083                 if (status == ANEG_DONE || status == ANEG_FAILED)
2084                         break;
2085
2086                 udelay(1);
2087         }
2088
2089         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2090         tw32_f(MAC_MODE, tp->mac_mode);
2091         udelay(40);
2092
2093         *flags = aninfo.flags;
2094
2095         if (status == ANEG_DONE &&
2096             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2097                              MR_LP_ADV_FULL_DUPLEX)))
2098                 res = 1;
2099
2100         return res;
2101 }
2102
2103 static void tg3_init_bcm8002(struct tg3 *tp)
2104 {
2105         u32 mac_status = tr32(MAC_STATUS);
2106         int i;
2107
2108         /* Reset when initting first time or we have a link. */
2109         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2110             !(mac_status & MAC_STATUS_PCS_SYNCED))
2111                 return;
2112
2113         /* Set PLL lock range. */
2114         tg3_writephy(tp, 0x16, 0x8007);
2115
2116         /* SW reset */
2117         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2118
2119         /* Wait for reset to complete. */
2120         /* XXX schedule_timeout() ... */
2121         for (i = 0; i < 500; i++)
2122                 udelay(10);
2123
2124         /* Config mode; select PMA/Ch 1 regs. */
2125         tg3_writephy(tp, 0x10, 0x8411);
2126
2127         /* Enable auto-lock and comdet, select txclk for tx. */
2128         tg3_writephy(tp, 0x11, 0x0a10);
2129
2130         tg3_writephy(tp, 0x18, 0x00a0);
2131         tg3_writephy(tp, 0x16, 0x41ff);
2132
2133         /* Assert and deassert POR. */
2134         tg3_writephy(tp, 0x13, 0x0400);
2135         udelay(40);
2136         tg3_writephy(tp, 0x13, 0x0000);
2137
2138         tg3_writephy(tp, 0x11, 0x0a50);
2139         udelay(40);
2140         tg3_writephy(tp, 0x11, 0x0a10);
2141
2142         /* Wait for signal to stabilize */
2143         /* XXX schedule_timeout() ... */
2144         for (i = 0; i < 15000; i++)
2145                 udelay(10);
2146
2147         /* Deselect the channel register so we can read the PHYID
2148          * later.
2149          */
2150         tg3_writephy(tp, 0x10, 0x8011);
2151 }
2152
2153 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2154 {
2155         u32 sg_dig_ctrl, sg_dig_status;
2156         u32 serdes_cfg, expected_sg_dig_ctrl;
2157         int workaround, port_a;
2158         int current_link_up;
2159
2160         serdes_cfg = 0;
2161         expected_sg_dig_ctrl = 0;
2162         workaround = 0;
2163         port_a = 1;
2164         current_link_up = 0;
2165
2166         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2167             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2168                 workaround = 1;
2169                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2170                         port_a = 0;
2171
2172                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2173                 /* preserve bits 20-23 for voltage regulator */
2174                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2175         }
2176
2177         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2178
2179         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2180                 if (sg_dig_ctrl & (1 << 31)) {
2181                         if (workaround) {
2182                                 u32 val = serdes_cfg;
2183
2184                                 if (port_a)
2185                                         val |= 0xc010000;
2186                                 else
2187                                         val |= 0x4010000;
2188                                 tw32_f(MAC_SERDES_CFG, val);
2189                         }
2190                         tw32_f(SG_DIG_CTRL, 0x01388400);
2191                 }
2192                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2193                         tg3_setup_flow_control(tp, 0, 0);
2194                         current_link_up = 1;
2195                 }
2196                 goto out;
2197         }
2198
2199         /* Want auto-negotiation.  */
2200         expected_sg_dig_ctrl = 0x81388400;
2201
2202         /* Pause capability */
2203         expected_sg_dig_ctrl |= (1 << 11);
2204
2205         /* Asymettric pause */
2206         expected_sg_dig_ctrl |= (1 << 12);
2207
2208         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2209                 if (workaround)
2210                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2211                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2212                 udelay(5);
2213                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2214
2215                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2216         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2217                                  MAC_STATUS_SIGNAL_DET)) {
2218                 int i;
2219
2220                 /* Giver time to negotiate (~200ms) */
2221                 for (i = 0; i < 40000; i++) {
2222                         sg_dig_status = tr32(SG_DIG_STATUS);
2223                         if (sg_dig_status & (0x3))
2224                                 break;
2225                         udelay(5);
2226                 }
2227                 mac_status = tr32(MAC_STATUS);
2228
2229                 if ((sg_dig_status & (1 << 1)) &&
2230                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2231                         u32 local_adv, remote_adv;
2232
2233                         local_adv = ADVERTISE_PAUSE_CAP;
2234                         remote_adv = 0;
2235                         if (sg_dig_status & (1 << 19))
2236                                 remote_adv |= LPA_PAUSE_CAP;
2237                         if (sg_dig_status & (1 << 20))
2238                                 remote_adv |= LPA_PAUSE_ASYM;
2239
2240                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2241                         current_link_up = 1;
2242                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2243                 } else if (!(sg_dig_status & (1 << 1))) {
2244                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2245                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2246                         else {
2247                                 if (workaround) {
2248                                         u32 val = serdes_cfg;
2249
2250                                         if (port_a)
2251                                                 val |= 0xc010000;
2252                                         else
2253                                                 val |= 0x4010000;
2254
2255                                         tw32_f(MAC_SERDES_CFG, val);
2256                                 }
2257
2258                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2259                                 udelay(40);
2260
2261                                 /* Link parallel detection - link is up */
2262                                 /* only if we have PCS_SYNC and not */
2263                                 /* receiving config code words */
2264                                 mac_status = tr32(MAC_STATUS);
2265                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2266                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2267                                         tg3_setup_flow_control(tp, 0, 0);
2268                                         current_link_up = 1;
2269                                 }
2270                         }
2271                 }
2272         }
2273
2274 out:
2275         return current_link_up;
2276 }
2277
2278 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2279 {
2280         int current_link_up = 0;
2281
2282         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2283                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2284                 goto out;
2285         }
2286
2287         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2288                 u32 flags;
2289                 int i;
2290   
2291                 if (fiber_autoneg(tp, &flags)) {
2292                         u32 local_adv, remote_adv;
2293
2294                         local_adv = ADVERTISE_PAUSE_CAP;
2295                         remote_adv = 0;
2296                         if (flags & MR_LP_ADV_SYM_PAUSE)
2297                                 remote_adv |= LPA_PAUSE_CAP;
2298                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2299                                 remote_adv |= LPA_PAUSE_ASYM;
2300
2301                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2302
2303                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2304                         current_link_up = 1;
2305                 }
2306                 for (i = 0; i < 30; i++) {
2307                         udelay(20);
2308                         tw32_f(MAC_STATUS,
2309                                (MAC_STATUS_SYNC_CHANGED |
2310                                 MAC_STATUS_CFG_CHANGED));
2311                         udelay(40);
2312                         if ((tr32(MAC_STATUS) &
2313                              (MAC_STATUS_SYNC_CHANGED |
2314                               MAC_STATUS_CFG_CHANGED)) == 0)
2315                                 break;
2316                 }
2317
2318                 mac_status = tr32(MAC_STATUS);
2319                 if (current_link_up == 0 &&
2320                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2321                     !(mac_status & MAC_STATUS_RCVD_CFG))
2322                         current_link_up = 1;
2323         } else {
2324                 /* Forcing 1000FD link up. */
2325                 current_link_up = 1;
2326                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2327
2328                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2329                 udelay(40);
2330         }
2331
2332 out:
2333         return current_link_up;
2334 }
2335
2336 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2337 {
2338         u32 orig_pause_cfg;
2339         u16 orig_active_speed;
2340         u8 orig_active_duplex;
2341         u32 mac_status;
2342         int current_link_up;
2343         int i;
2344
2345         orig_pause_cfg =
2346                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2347                                   TG3_FLAG_TX_PAUSE));
2348         orig_active_speed = tp->link_config.active_speed;
2349         orig_active_duplex = tp->link_config.active_duplex;
2350
2351         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2352             netif_carrier_ok(tp->dev) &&
2353             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2354                 mac_status = tr32(MAC_STATUS);
2355                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2356                                MAC_STATUS_SIGNAL_DET |
2357                                MAC_STATUS_CFG_CHANGED |
2358                                MAC_STATUS_RCVD_CFG);
2359                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2360                                    MAC_STATUS_SIGNAL_DET)) {
2361                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2362                                             MAC_STATUS_CFG_CHANGED));
2363                         return 0;
2364                 }
2365         }
2366
2367         tw32_f(MAC_TX_AUTO_NEG, 0);
2368
2369         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2370         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2371         tw32_f(MAC_MODE, tp->mac_mode);
2372         udelay(40);
2373
2374         if (tp->phy_id == PHY_ID_BCM8002)
2375                 tg3_init_bcm8002(tp);
2376
2377         /* Enable link change event even when serdes polling.  */
2378         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2379         udelay(40);
2380
2381         current_link_up = 0;
2382         mac_status = tr32(MAC_STATUS);
2383
2384         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2385                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2386         else
2387                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2388
2389         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2390         tw32_f(MAC_MODE, tp->mac_mode);
2391         udelay(40);
2392
2393         tp->hw_status->status =
2394                 (SD_STATUS_UPDATED |
2395                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2396
2397         for (i = 0; i < 100; i++) {
2398                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2399                                     MAC_STATUS_CFG_CHANGED));
2400                 udelay(5);
2401                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2402                                          MAC_STATUS_CFG_CHANGED)) == 0)
2403                         break;
2404         }
2405
2406         mac_status = tr32(MAC_STATUS);
2407         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2408                 current_link_up = 0;
2409                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2410                         tw32_f(MAC_MODE, (tp->mac_mode |
2411                                           MAC_MODE_SEND_CONFIGS));
2412                         udelay(1);
2413                         tw32_f(MAC_MODE, tp->mac_mode);
2414                 }
2415         }
2416
2417         if (current_link_up == 1) {
2418                 tp->link_config.active_speed = SPEED_1000;
2419                 tp->link_config.active_duplex = DUPLEX_FULL;
2420                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2421                                     LED_CTRL_LNKLED_OVERRIDE |
2422                                     LED_CTRL_1000MBPS_ON));
2423         } else {
2424                 tp->link_config.active_speed = SPEED_INVALID;
2425                 tp->link_config.active_duplex = DUPLEX_INVALID;
2426                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2427                                     LED_CTRL_LNKLED_OVERRIDE |
2428                                     LED_CTRL_TRAFFIC_OVERRIDE));
2429         }
2430
2431         if (current_link_up != netif_carrier_ok(tp->dev)) {
2432                 if (current_link_up)
2433                         netif_carrier_on(tp->dev);
2434                 else
2435                         netif_carrier_off(tp->dev);
2436                 tg3_link_report(tp);
2437         } else {
2438                 u32 now_pause_cfg =
2439                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2440                                          TG3_FLAG_TX_PAUSE);
2441                 if (orig_pause_cfg != now_pause_cfg ||
2442                     orig_active_speed != tp->link_config.active_speed ||
2443                     orig_active_duplex != tp->link_config.active_duplex)
2444                         tg3_link_report(tp);
2445         }
2446
2447         return 0;
2448 }
2449
2450 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2451 {
2452         int err;
2453
2454         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2455                 err = tg3_setup_fiber_phy(tp, force_reset);
2456         } else {
2457                 err = tg3_setup_copper_phy(tp, force_reset);
2458         }
2459
2460         if (tp->link_config.active_speed == SPEED_1000 &&
2461             tp->link_config.active_duplex == DUPLEX_HALF)
2462                 tw32(MAC_TX_LENGTHS,
2463                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2464                       (6 << TX_LENGTHS_IPG_SHIFT) |
2465                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2466         else
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471
2472         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2473                 if (netif_carrier_ok(tp->dev)) {
2474                         tw32(HOSTCC_STAT_COAL_TICKS,
2475                              DEFAULT_STAT_COAL_TICKS);
2476                 } else {
2477                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2478                 }
2479         }
2480
2481         return err;
2482 }
2483
2484 /* Tigon3 never reports partial packet sends.  So we do not
2485  * need special logic to handle SKBs that have not had all
2486  * of their frags sent yet, like SunGEM does.
2487  */
2488 static void tg3_tx(struct tg3 *tp)
2489 {
2490         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2491         u32 sw_idx = tp->tx_cons;
2492
2493         while (sw_idx != hw_idx) {
2494                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2495                 struct sk_buff *skb = ri->skb;
2496                 int i;
2497
2498                 if (unlikely(skb == NULL))
2499                         BUG();
2500
2501                 pci_unmap_single(tp->pdev,
2502                                  pci_unmap_addr(ri, mapping),
2503                                  skb_headlen(skb),
2504                                  PCI_DMA_TODEVICE);
2505
2506                 ri->skb = NULL;
2507
2508                 sw_idx = NEXT_TX(sw_idx);
2509
2510                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2511                         if (unlikely(sw_idx == hw_idx))
2512                                 BUG();
2513
2514                         ri = &tp->tx_buffers[sw_idx];
2515                         if (unlikely(ri->skb != NULL))
2516                                 BUG();
2517
2518                         pci_unmap_page(tp->pdev,
2519                                        pci_unmap_addr(ri, mapping),
2520                                        skb_shinfo(skb)->frags[i].size,
2521                                        PCI_DMA_TODEVICE);
2522
2523                         sw_idx = NEXT_TX(sw_idx);
2524                 }
2525
2526                 dev_kfree_skb_irq(skb);
2527         }
2528
2529         tp->tx_cons = sw_idx;
2530
2531         if (netif_queue_stopped(tp->dev) &&
2532             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2533                 netif_wake_queue(tp->dev);
2534 }
2535
2536 /* Returns size of skb allocated or < 0 on error.
2537  *
2538  * We only need to fill in the address because the other members
2539  * of the RX descriptor are invariant, see tg3_init_rings.
2540  *
2541  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2542  * posting buffers we only dirty the first cache line of the RX
2543  * descriptor (containing the address).  Whereas for the RX status
2544  * buffers the cpu only reads the last cacheline of the RX descriptor
2545  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2546  */
2547 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2548                             int src_idx, u32 dest_idx_unmasked)
2549 {
2550         struct tg3_rx_buffer_desc *desc;
2551         struct ring_info *map, *src_map;
2552         struct sk_buff *skb;
2553         dma_addr_t mapping;
2554         int skb_size, dest_idx;
2555
2556         src_map = NULL;
2557         switch (opaque_key) {
2558         case RXD_OPAQUE_RING_STD:
2559                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2560                 desc = &tp->rx_std[dest_idx];
2561                 map = &tp->rx_std_buffers[dest_idx];
2562                 if (src_idx >= 0)
2563                         src_map = &tp->rx_std_buffers[src_idx];
2564                 skb_size = RX_PKT_BUF_SZ;
2565                 break;
2566
2567         case RXD_OPAQUE_RING_JUMBO:
2568                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2569                 desc = &tp->rx_jumbo[dest_idx];
2570                 map = &tp->rx_jumbo_buffers[dest_idx];
2571                 if (src_idx >= 0)
2572                         src_map = &tp->rx_jumbo_buffers[src_idx];
2573                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2574                 break;
2575
2576         default:
2577                 return -EINVAL;
2578         };
2579
2580         /* Do not overwrite any of the map or rp information
2581          * until we are sure we can commit to a new buffer.
2582          *
2583          * Callers depend upon this behavior and assume that
2584          * we leave everything unchanged if we fail.
2585          */
2586         skb = dev_alloc_skb(skb_size);
2587         if (skb == NULL)
2588                 return -ENOMEM;
2589
2590         skb->dev = tp->dev;
2591         skb_reserve(skb, tp->rx_offset);
2592
2593         mapping = pci_map_single(tp->pdev, skb->data,
2594                                  skb_size - tp->rx_offset,
2595                                  PCI_DMA_FROMDEVICE);
2596
2597         map->skb = skb;
2598         pci_unmap_addr_set(map, mapping, mapping);
2599
2600         if (src_map != NULL)
2601                 src_map->skb = NULL;
2602
2603         desc->addr_hi = ((u64)mapping >> 32);
2604         desc->addr_lo = ((u64)mapping & 0xffffffff);
2605
2606         return skb_size;
2607 }
2608
2609 /* We only need to move over in the address because the other
2610  * members of the RX descriptor are invariant.  See notes above
2611  * tg3_alloc_rx_skb for full details.
2612  */
2613 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2614                            int src_idx, u32 dest_idx_unmasked)
2615 {
2616         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2617         struct ring_info *src_map, *dest_map;
2618         int dest_idx;
2619
2620         switch (opaque_key) {
2621         case RXD_OPAQUE_RING_STD:
2622                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2623                 dest_desc = &tp->rx_std[dest_idx];
2624                 dest_map = &tp->rx_std_buffers[dest_idx];
2625                 src_desc = &tp->rx_std[src_idx];
2626                 src_map = &tp->rx_std_buffers[src_idx];
2627                 break;
2628
2629         case RXD_OPAQUE_RING_JUMBO:
2630                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2631                 dest_desc = &tp->rx_jumbo[dest_idx];
2632                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2633                 src_desc = &tp->rx_jumbo[src_idx];
2634                 src_map = &tp->rx_jumbo_buffers[src_idx];
2635                 break;
2636
2637         default:
2638                 return;
2639         };
2640
2641         dest_map->skb = src_map->skb;
2642         pci_unmap_addr_set(dest_map, mapping,
2643                            pci_unmap_addr(src_map, mapping));
2644         dest_desc->addr_hi = src_desc->addr_hi;
2645         dest_desc->addr_lo = src_desc->addr_lo;
2646
2647         src_map->skb = NULL;
2648 }
2649
2650 #if TG3_VLAN_TAG_USED
2651 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2652 {
2653         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2654 }
2655 #endif
2656
2657 /* The RX ring scheme is composed of multiple rings which post fresh
2658  * buffers to the chip, and one special ring the chip uses to report
2659  * status back to the host.
2660  *
2661  * The special ring reports the status of received packets to the
2662  * host.  The chip does not write into the original descriptor the
2663  * RX buffer was obtained from.  The chip simply takes the original
2664  * descriptor as provided by the host, updates the status and length
2665  * field, then writes this into the next status ring entry.
2666  *
2667  * Each ring the host uses to post buffers to the chip is described
2668  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2669  * it is first placed into the on-chip ram.  When the packet's length
2670  * is known, it walks down the TG3_BDINFO entries to select the ring.
2671  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2672  * which is within the range of the new packet's length is chosen.
2673  *
2674  * The "separate ring for rx status" scheme may sound queer, but it makes
2675  * sense from a cache coherency perspective.  If only the host writes
2676  * to the buffer post rings, and only the chip writes to the rx status
2677  * rings, then cache lines never move beyond shared-modified state.
2678  * If both the host and chip were to write into the same ring, cache line
2679  * eviction could occur since both entities want it in an exclusive state.
2680  */
2681 static int tg3_rx(struct tg3 *tp, int budget)
2682 {
2683         u32 work_mask;
2684         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2685         u16 hw_idx, sw_idx;
2686         int received;
2687
2688         hw_idx = tp->hw_status->idx[0].rx_producer;
2689         /*
2690          * We need to order the read of hw_idx and the read of
2691          * the opaque cookie.
2692          */
2693         rmb();
2694         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2695         work_mask = 0;
2696         received = 0;
2697         while (sw_idx != hw_idx && budget > 0) {
2698                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2699                 unsigned int len;
2700                 struct sk_buff *skb;
2701                 dma_addr_t dma_addr;
2702                 u32 opaque_key, desc_idx, *post_ptr;
2703
2704                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2705                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2706                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2707                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2708                                                   mapping);
2709                         skb = tp->rx_std_buffers[desc_idx].skb;
2710                         post_ptr = &tp->rx_std_ptr;
2711                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2712                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2713                                                   mapping);
2714                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2715                         post_ptr = &tp->rx_jumbo_ptr;
2716                 }
2717                 else {
2718                         goto next_pkt_nopost;
2719                 }
2720
2721                 work_mask |= opaque_key;
2722
2723                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2724                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2725                 drop_it:
2726                         tg3_recycle_rx(tp, opaque_key,
2727                                        desc_idx, *post_ptr);
2728                 drop_it_no_recycle:
2729                         /* Other statistics kept track of by card. */
2730                         tp->net_stats.rx_dropped++;
2731                         goto next_pkt;
2732                 }
2733
2734                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2735
2736                 if (len > RX_COPY_THRESHOLD 
2737                         && tp->rx_offset == 2
2738                         /* rx_offset != 2 iff this is a 5701 card running
2739                          * in PCI-X mode [see tg3_get_invariants()] */
2740                 ) {
2741                         int skb_size;
2742
2743                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2744                                                     desc_idx, *post_ptr);
2745                         if (skb_size < 0)
2746                                 goto drop_it;
2747
2748                         pci_unmap_single(tp->pdev, dma_addr,
2749                                          skb_size - tp->rx_offset,
2750                                          PCI_DMA_FROMDEVICE);
2751
2752                         skb_put(skb, len);
2753                 } else {
2754                         struct sk_buff *copy_skb;
2755
2756                         tg3_recycle_rx(tp, opaque_key,
2757                                        desc_idx, *post_ptr);
2758
2759                         copy_skb = dev_alloc_skb(len + 2);
2760                         if (copy_skb == NULL)
2761                                 goto drop_it_no_recycle;
2762
2763                         copy_skb->dev = tp->dev;
2764                         skb_reserve(copy_skb, 2);
2765                         skb_put(copy_skb, len);
2766                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2767                         memcpy(copy_skb->data, skb->data, len);
2768                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2769
2770                         /* We'll reuse the original ring buffer. */
2771                         skb = copy_skb;
2772                 }
2773
2774                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2775                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2776                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2777                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2778                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2779                 else
2780                         skb->ip_summed = CHECKSUM_NONE;
2781
2782                 skb->protocol = eth_type_trans(skb, tp->dev);
2783 #if TG3_VLAN_TAG_USED
2784                 if (tp->vlgrp != NULL &&
2785                     desc->type_flags & RXD_FLAG_VLAN) {
2786                         tg3_vlan_rx(tp, skb,
2787                                     desc->err_vlan & RXD_VLAN_MASK);
2788                 } else
2789 #endif
2790                         netif_receive_skb(skb);
2791
2792                 tp->dev->last_rx = jiffies;
2793                 received++;
2794                 budget--;
2795
2796 next_pkt:
2797                 (*post_ptr)++;
2798 next_pkt_nopost:
2799                 rx_rcb_ptr++;
2800                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2801         }
2802
2803         /* ACK the status ring. */
2804         tp->rx_rcb_ptr = rx_rcb_ptr;
2805         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2806                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2807
2808         /* Refill RX ring(s). */
2809         if (work_mask & RXD_OPAQUE_RING_STD) {
2810                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2811                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2812                              sw_idx);
2813         }
2814         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2815                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2816                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2817                              sw_idx);
2818         }
2819         mmiowb();
2820
2821         return received;
2822 }
2823
2824 static int tg3_poll(struct net_device *netdev, int *budget)
2825 {
2826         struct tg3 *tp = netdev_priv(netdev);
2827         struct tg3_hw_status *sblk = tp->hw_status;
2828         unsigned long flags;
2829         int done;
2830
2831         spin_lock_irqsave(&tp->lock, flags);
2832
2833         /* handle link change and other phy events */
2834         if (!(tp->tg3_flags &
2835               (TG3_FLAG_USE_LINKCHG_REG |
2836                TG3_FLAG_POLL_SERDES))) {
2837                 if (sblk->status & SD_STATUS_LINK_CHG) {
2838                         sblk->status = SD_STATUS_UPDATED |
2839                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2840                         tg3_setup_phy(tp, 0);
2841                 }
2842         }
2843
2844         /* run TX completion thread */
2845         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2846                 spin_lock(&tp->tx_lock);
2847                 tg3_tx(tp);
2848                 spin_unlock(&tp->tx_lock);
2849         }
2850
2851         spin_unlock_irqrestore(&tp->lock, flags);
2852
2853         /* run RX thread, within the bounds set by NAPI.
2854          * All RX "locking" is done by ensuring outside
2855          * code synchronizes with dev->poll()
2856          */
2857         done = 1;
2858         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2859                 int orig_budget = *budget;
2860                 int work_done;
2861
2862                 if (orig_budget > netdev->quota)
2863                         orig_budget = netdev->quota;
2864
2865                 work_done = tg3_rx(tp, orig_budget);
2866
2867                 *budget -= work_done;
2868                 netdev->quota -= work_done;
2869
2870                 if (work_done >= orig_budget)
2871                         done = 0;
2872         }
2873
2874         /* if no more work, tell net stack and NIC we're done */
2875         if (done) {
2876                 spin_lock_irqsave(&tp->lock, flags);
2877                 __netif_rx_complete(netdev);
2878                 tg3_restart_ints(tp);
2879                 spin_unlock_irqrestore(&tp->lock, flags);
2880         }
2881
2882         return (done ? 0 : 1);
2883 }
2884
2885 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2886 {
2887         struct tg3_hw_status *sblk = tp->hw_status;
2888         unsigned int work_exists = 0;
2889
2890         /* check for phy events */
2891         if (!(tp->tg3_flags &
2892               (TG3_FLAG_USE_LINKCHG_REG |
2893                TG3_FLAG_POLL_SERDES))) {
2894                 if (sblk->status & SD_STATUS_LINK_CHG)
2895                         work_exists = 1;
2896         }
2897         /* check for RX/TX work to do */
2898         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2899             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2900                 work_exists = 1;
2901
2902         return work_exists;
2903 }
2904
2905 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2906 {
2907         struct net_device *dev = dev_id;
2908         struct tg3 *tp = netdev_priv(dev);
2909         struct tg3_hw_status *sblk = tp->hw_status;
2910         unsigned long flags;
2911         unsigned int handled = 1;
2912
2913         spin_lock_irqsave(&tp->lock, flags);
2914
2915         /* In INTx mode, it is possible for the interrupt to arrive at
2916          * the CPU before the status block posted prior to the interrupt.
2917          * Reading the PCI State register will confirm whether the
2918          * interrupt is ours and will flush the status block.
2919          */
2920         if ((sblk->status & SD_STATUS_UPDATED) ||
2921             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2922                 /*
2923                  * writing any value to intr-mbox-0 clears PCI INTA# and
2924                  * chip-internal interrupt pending events.
2925                  * writing non-zero to intr-mbox-0 additional tells the
2926                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2927                  * event coalescing.
2928                  */
2929                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2930                              0x00000001);
2931                 /*
2932                  * Flush PCI write.  This also guarantees that our
2933                  * status block has been flushed to host memory.
2934                  */
2935                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2936                 sblk->status &= ~SD_STATUS_UPDATED;
2937
2938                 if (likely(tg3_has_work(dev, tp)))
2939                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2940                 else {
2941                         /* no work, shared interrupt perhaps?  re-enable
2942                          * interrupts, and flush that PCI write
2943                          */
2944                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2945                                 0x00000000);
2946                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2947                 }
2948         } else {        /* shared interrupt */
2949                 handled = 0;
2950         }
2951
2952         spin_unlock_irqrestore(&tp->lock, flags);
2953
2954         return IRQ_RETVAL(handled);
2955 }
2956
2957 static int tg3_init_hw(struct tg3 *);
2958 static int tg3_halt(struct tg3 *);
2959
2960 #ifdef CONFIG_NET_POLL_CONTROLLER
2961 static void tg3_poll_controller(struct net_device *dev)
2962 {
2963         tg3_interrupt(dev->irq, dev, NULL);
2964 }
2965 #endif
2966
2967 static void tg3_reset_task(void *_data)
2968 {
2969         struct tg3 *tp = _data;
2970         unsigned int restart_timer;
2971
2972         tg3_netif_stop(tp);
2973
2974         spin_lock_irq(&tp->lock);
2975         spin_lock(&tp->tx_lock);
2976
2977         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2978         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2979
2980         tg3_halt(tp);
2981         tg3_init_hw(tp);
2982
2983         tg3_netif_start(tp);
2984
2985         spin_unlock(&tp->tx_lock);
2986         spin_unlock_irq(&tp->lock);
2987
2988         if (restart_timer)
2989                 mod_timer(&tp->timer, jiffies + 1);
2990 }
2991
2992 static void tg3_tx_timeout(struct net_device *dev)
2993 {
2994         struct tg3 *tp = netdev_priv(dev);
2995
2996         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2997                dev->name);
2998
2999         schedule_work(&tp->reset_task);
3000 }
3001
3002 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3003
3004 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3005                                        u32 guilty_entry, int guilty_len,
3006                                        u32 last_plus_one, u32 *start, u32 mss)
3007 {
3008         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3009         dma_addr_t new_addr;
3010         u32 entry = *start;
3011         int i;
3012
3013         if (!new_skb) {
3014                 dev_kfree_skb(skb);
3015                 return -1;
3016         }
3017
3018         /* New SKB is guaranteed to be linear. */
3019         entry = *start;
3020         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3021                                   PCI_DMA_TODEVICE);
3022         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3023                     (skb->ip_summed == CHECKSUM_HW) ?
3024                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3025         *start = NEXT_TX(entry);
3026
3027         /* Now clean up the sw ring entries. */
3028         i = 0;
3029         while (entry != last_plus_one) {
3030                 int len;
3031
3032                 if (i == 0)
3033                         len = skb_headlen(skb);
3034                 else
3035                         len = skb_shinfo(skb)->frags[i-1].size;
3036                 pci_unmap_single(tp->pdev,
3037                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3038                                  len, PCI_DMA_TODEVICE);
3039                 if (i == 0) {
3040                         tp->tx_buffers[entry].skb = new_skb;
3041                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3042                 } else {
3043                         tp->tx_buffers[entry].skb = NULL;
3044                 }
3045                 entry = NEXT_TX(entry);
3046                 i++;
3047         }
3048
3049         dev_kfree_skb(skb);
3050
3051         return 0;
3052 }
3053
3054 static void tg3_set_txd(struct tg3 *tp, int entry,
3055                         dma_addr_t mapping, int len, u32 flags,
3056                         u32 mss_and_is_end)
3057 {
3058         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3059         int is_end = (mss_and_is_end & 0x1);
3060         u32 mss = (mss_and_is_end >> 1);
3061         u32 vlan_tag = 0;
3062
3063         if (is_end)
3064                 flags |= TXD_FLAG_END;
3065         if (flags & TXD_FLAG_VLAN) {
3066                 vlan_tag = flags >> 16;
3067                 flags &= 0xffff;
3068         }
3069         vlan_tag |= (mss << TXD_MSS_SHIFT);
3070
3071         txd->addr_hi = ((u64) mapping >> 32);
3072         txd->addr_lo = ((u64) mapping & 0xffffffff);
3073         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3074         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3075 }
3076
3077 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3078 {
3079         u32 base = (u32) mapping & 0xffffffff;
3080
3081         return ((base > 0xffffdcc0) &&
3082                 (base + len + 8 < base));
3083 }
3084
3085 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3086 {
3087         struct tg3 *tp = netdev_priv(dev);
3088         dma_addr_t mapping;
3089         unsigned int i;
3090         u32 len, entry, base_flags, mss;
3091         int would_hit_hwbug;
3092         unsigned long flags;
3093
3094         len = skb_headlen(skb);
3095
3096         /* No BH disabling for tx_lock here.  We are running in BH disabled
3097          * context and TX reclaim runs via tp->poll inside of a software
3098          * interrupt.  Rejoice!
3099          *
3100          * Actually, things are not so simple.  If we are to take a hw
3101          * IRQ here, we can deadlock, consider:
3102          *
3103          *       CPU1           CPU2
3104          *   tg3_start_xmit
3105          *   take tp->tx_lock
3106          *                      tg3_timer
3107          *                      take tp->lock
3108          *   tg3_interrupt
3109          *   spin on tp->lock
3110          *                      spin on tp->tx_lock
3111          *
3112          * So we really do need to disable interrupts when taking
3113          * tx_lock here.
3114          */
3115         local_irq_save(flags);
3116         if (!spin_trylock(&tp->tx_lock)) { 
3117                 local_irq_restore(flags);
3118                 return NETDEV_TX_LOCKED; 
3119         } 
3120
3121         /* This is a hard error, log it. */
3122         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3123                 netif_stop_queue(dev);
3124                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3125                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3126                        dev->name);
3127                 return NETDEV_TX_BUSY;
3128         }
3129
3130         entry = tp->tx_prod;
3131         base_flags = 0;
3132         if (skb->ip_summed == CHECKSUM_HW)
3133                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3134 #if TG3_TSO_SUPPORT != 0
3135         mss = 0;
3136         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3137             (mss = skb_shinfo(skb)->tso_size) != 0) {
3138                 int tcp_opt_len, ip_tcp_len;
3139
3140                 if (skb_header_cloned(skb) &&
3141                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3142                         dev_kfree_skb(skb);
3143                         goto out_unlock;
3144                 }
3145
3146                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3147                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3148
3149                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3150                                TXD_FLAG_CPU_POST_DMA);
3151
3152                 skb->nh.iph->check = 0;
3153                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3154                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3155                         skb->h.th->check = 0;
3156                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3157                 }
3158                 else {
3159                         skb->h.th->check =
3160                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3161                                                    skb->nh.iph->daddr,
3162                                                    0, IPPROTO_TCP, 0);
3163                 }
3164
3165                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3166                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3167                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3168                                 int tsflags;
3169
3170                                 tsflags = ((skb->nh.iph->ihl - 5) +
3171                                            (tcp_opt_len >> 2));
3172                                 mss |= (tsflags << 11);
3173                         }
3174                 } else {
3175                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3176                                 int tsflags;
3177
3178                                 tsflags = ((skb->nh.iph->ihl - 5) +
3179                                            (tcp_opt_len >> 2));
3180                                 base_flags |= tsflags << 12;
3181                         }
3182                 }
3183         }
3184 #else
3185         mss = 0;
3186 #endif
3187 #if TG3_VLAN_TAG_USED
3188         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3189                 base_flags |= (TXD_FLAG_VLAN |
3190                                (vlan_tx_tag_get(skb) << 16));
3191 #endif
3192
3193         /* Queue skb data, a.k.a. the main skb fragment. */
3194         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3195
3196         tp->tx_buffers[entry].skb = skb;
3197         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3198
3199         would_hit_hwbug = 0;
3200
3201         if (tg3_4g_overflow_test(mapping, len))
3202                 would_hit_hwbug = entry + 1;
3203
3204         tg3_set_txd(tp, entry, mapping, len, base_flags,
3205                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3206
3207         entry = NEXT_TX(entry);
3208
3209         /* Now loop through additional data fragments, and queue them. */
3210         if (skb_shinfo(skb)->nr_frags > 0) {
3211                 unsigned int i, last;
3212
3213                 last = skb_shinfo(skb)->nr_frags - 1;
3214                 for (i = 0; i <= last; i++) {
3215                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3216
3217                         len = frag->size;
3218                         mapping = pci_map_page(tp->pdev,
3219                                                frag->page,
3220                                                frag->page_offset,
3221                                                len, PCI_DMA_TODEVICE);
3222
3223                         tp->tx_buffers[entry].skb = NULL;
3224                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3225
3226                         if (tg3_4g_overflow_test(mapping, len)) {
3227                                 /* Only one should match. */
3228                                 if (would_hit_hwbug)
3229                                         BUG();
3230                                 would_hit_hwbug = entry + 1;
3231                         }
3232
3233                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3234                                 tg3_set_txd(tp, entry, mapping, len,
3235                                             base_flags, (i == last)|(mss << 1));
3236                         else
3237                                 tg3_set_txd(tp, entry, mapping, len,
3238                                             base_flags, (i == last));
3239
3240                         entry = NEXT_TX(entry);
3241                 }
3242         }
3243
3244         if (would_hit_hwbug) {
3245                 u32 last_plus_one = entry;
3246                 u32 start;
3247                 unsigned int len = 0;
3248
3249                 would_hit_hwbug -= 1;
3250                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3251                 entry &= (TG3_TX_RING_SIZE - 1);
3252                 start = entry;
3253                 i = 0;
3254                 while (entry != last_plus_one) {
3255                         if (i == 0)
3256                                 len = skb_headlen(skb);
3257                         else
3258                                 len = skb_shinfo(skb)->frags[i-1].size;
3259
3260                         if (entry == would_hit_hwbug)
3261                                 break;
3262
3263                         i++;
3264                         entry = NEXT_TX(entry);
3265
3266                 }
3267
3268                 /* If the workaround fails due to memory/mapping
3269                  * failure, silently drop this packet.
3270                  */
3271                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3272                                                 entry, len,
3273                                                 last_plus_one,
3274                                                 &start, mss))
3275                         goto out_unlock;
3276
3277                 entry = start;
3278         }
3279
3280         /* Packets are ready, update Tx producer idx local and on card. */
3281         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3282
3283         tp->tx_prod = entry;
3284         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3285                 netif_stop_queue(dev);
3286
3287 out_unlock:
3288         mmiowb();
3289         spin_unlock_irqrestore(&tp->tx_lock, flags);
3290
3291         dev->trans_start = jiffies;
3292
3293         return NETDEV_TX_OK;
3294 }
3295
3296 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3297                                int new_mtu)
3298 {
3299         dev->mtu = new_mtu;
3300
3301         if (new_mtu > ETH_DATA_LEN)
3302                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3303         else
3304                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3305 }
3306
3307 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3308 {
3309         struct tg3 *tp = netdev_priv(dev);
3310
3311         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3312                 return -EINVAL;
3313
3314         if (!netif_running(dev)) {
3315                 /* We'll just catch it later when the
3316                  * device is up'd.
3317                  */
3318                 tg3_set_mtu(dev, tp, new_mtu);
3319                 return 0;
3320         }
3321
3322         tg3_netif_stop(tp);
3323         spin_lock_irq(&tp->lock);
3324         spin_lock(&tp->tx_lock);
3325
3326         tg3_halt(tp);
3327
3328         tg3_set_mtu(dev, tp, new_mtu);
3329
3330         tg3_init_hw(tp);
3331
3332         tg3_netif_start(tp);
3333
3334         spin_unlock(&tp->tx_lock);
3335         spin_unlock_irq(&tp->lock);
3336
3337         return 0;
3338 }
3339
3340 /* Free up pending packets in all rx/tx rings.
3341  *
3342  * The chip has been shut down and the driver detached from
3343  * the networking, so no interrupts or new tx packets will
3344  * end up in the driver.  tp->{tx,}lock is not held and we are not
3345  * in an interrupt context and thus may sleep.
3346  */
3347 static void tg3_free_rings(struct tg3 *tp)
3348 {
3349         struct ring_info *rxp;
3350         int i;
3351
3352         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3353                 rxp = &tp->rx_std_buffers[i];
3354
3355                 if (rxp->skb == NULL)
3356                         continue;
3357                 pci_unmap_single(tp->pdev,
3358                                  pci_unmap_addr(rxp, mapping),
3359                                  RX_PKT_BUF_SZ - tp->rx_offset,
3360                                  PCI_DMA_FROMDEVICE);
3361                 dev_kfree_skb_any(rxp->skb);
3362                 rxp->skb = NULL;
3363         }
3364
3365         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3366                 rxp = &tp->rx_jumbo_buffers[i];
3367
3368                 if (rxp->skb == NULL)
3369                         continue;
3370                 pci_unmap_single(tp->pdev,
3371                                  pci_unmap_addr(rxp, mapping),
3372                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3373                                  PCI_DMA_FROMDEVICE);
3374                 dev_kfree_skb_any(rxp->skb);
3375                 rxp->skb = NULL;
3376         }
3377
3378         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3379                 struct tx_ring_info *txp;
3380                 struct sk_buff *skb;
3381                 int j;
3382
3383                 txp = &tp->tx_buffers[i];
3384                 skb = txp->skb;
3385
3386                 if (skb == NULL) {
3387                         i++;
3388                         continue;
3389                 }
3390
3391                 pci_unmap_single(tp->pdev,
3392                                  pci_unmap_addr(txp, mapping),
3393                                  skb_headlen(skb),
3394                                  PCI_DMA_TODEVICE);
3395                 txp->skb = NULL;
3396
3397                 i++;
3398
3399                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3400                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3401                         pci_unmap_page(tp->pdev,
3402                                        pci_unmap_addr(txp, mapping),
3403                                        skb_shinfo(skb)->frags[j].size,
3404                                        PCI_DMA_TODEVICE);
3405                         i++;
3406                 }
3407
3408                 dev_kfree_skb_any(skb);
3409         }
3410 }
3411
3412 /* Initialize tx/rx rings for packet processing.
3413  *
3414  * The chip has been shut down and the driver detached from
3415  * the networking, so no interrupts or new tx packets will
3416  * end up in the driver.  tp->{tx,}lock are held and thus
3417  * we may not sleep.
3418  */
3419 static void tg3_init_rings(struct tg3 *tp)
3420 {
3421         u32 i;
3422
3423         /* Free up all the SKBs. */
3424         tg3_free_rings(tp);
3425
3426         /* Zero out all descriptors. */
3427         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3428         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3429         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3430         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3431
3432         /* Initialize invariants of the rings, we only set this
3433          * stuff once.  This works because the card does not
3434          * write into the rx buffer posting rings.
3435          */
3436         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3437                 struct tg3_rx_buffer_desc *rxd;
3438
3439                 rxd = &tp->rx_std[i];
3440                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3441                         << RXD_LEN_SHIFT;
3442                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3443                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3444                                (i << RXD_OPAQUE_INDEX_SHIFT));
3445         }
3446
3447         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3448                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3449                         struct tg3_rx_buffer_desc *rxd;
3450
3451                         rxd = &tp->rx_jumbo[i];
3452                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3453                                 << RXD_LEN_SHIFT;
3454                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3455                                 RXD_FLAG_JUMBO;
3456                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3457                                (i << RXD_OPAQUE_INDEX_SHIFT));
3458                 }
3459         }
3460
3461         /* Now allocate fresh SKBs for each rx ring. */
3462         for (i = 0; i < tp->rx_pending; i++) {
3463                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3464                                      -1, i) < 0)
3465                         break;
3466         }
3467
3468         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3469                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3470                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3471                                              -1, i) < 0)
3472                                 break;
3473                 }
3474         }
3475 }
3476
3477 /*
3478  * Must not be invoked with interrupt sources disabled and
3479  * the hardware shutdown down.
3480  */
3481 static void tg3_free_consistent(struct tg3 *tp)
3482 {
3483         if (tp->rx_std_buffers) {
3484                 kfree(tp->rx_std_buffers);
3485                 tp->rx_std_buffers = NULL;
3486         }
3487         if (tp->rx_std) {
3488                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3489                                     tp->rx_std, tp->rx_std_mapping);
3490                 tp->rx_std = NULL;
3491         }
3492         if (tp->rx_jumbo) {
3493                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3494                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3495                 tp->rx_jumbo = NULL;
3496         }
3497         if (tp->rx_rcb) {
3498                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3499                                     tp->rx_rcb, tp->rx_rcb_mapping);
3500                 tp->rx_rcb = NULL;
3501         }
3502         if (tp->tx_ring) {
3503                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3504                         tp->tx_ring, tp->tx_desc_mapping);
3505                 tp->tx_ring = NULL;
3506         }
3507         if (tp->hw_status) {
3508                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3509                                     tp->hw_status, tp->status_mapping);
3510                 tp->hw_status = NULL;
3511         }
3512         if (tp->hw_stats) {
3513                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3514                                     tp->hw_stats, tp->stats_mapping);
3515                 tp->hw_stats = NULL;
3516         }
3517 }
3518
3519 /*
3520  * Must not be invoked with interrupt sources disabled and
3521  * the hardware shutdown down.  Can sleep.
3522  */
3523 static int tg3_alloc_consistent(struct tg3 *tp)
3524 {
3525         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3526                                       (TG3_RX_RING_SIZE +
3527                                        TG3_RX_JUMBO_RING_SIZE)) +
3528                                      (sizeof(struct tx_ring_info) *
3529                                       TG3_TX_RING_SIZE),
3530                                      GFP_KERNEL);
3531         if (!tp->rx_std_buffers)
3532                 return -ENOMEM;
3533
3534         memset(tp->rx_std_buffers, 0,
3535                (sizeof(struct ring_info) *
3536                 (TG3_RX_RING_SIZE +
3537                  TG3_RX_JUMBO_RING_SIZE)) +
3538                (sizeof(struct tx_ring_info) *
3539                 TG3_TX_RING_SIZE));
3540
3541         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3542         tp->tx_buffers = (struct tx_ring_info *)
3543                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3544
3545         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3546                                           &tp->rx_std_mapping);
3547         if (!tp->rx_std)
3548                 goto err_out;
3549
3550         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3551                                             &tp->rx_jumbo_mapping);
3552
3553         if (!tp->rx_jumbo)
3554                 goto err_out;
3555
3556         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3557                                           &tp->rx_rcb_mapping);
3558         if (!tp->rx_rcb)
3559                 goto err_out;
3560
3561         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3562                                            &tp->tx_desc_mapping);
3563         if (!tp->tx_ring)
3564                 goto err_out;
3565
3566         tp->hw_status = pci_alloc_consistent(tp->pdev,
3567                                              TG3_HW_STATUS_SIZE,
3568                                              &tp->status_mapping);
3569         if (!tp->hw_status)
3570                 goto err_out;
3571
3572         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3573                                             sizeof(struct tg3_hw_stats),
3574                                             &tp->stats_mapping);
3575         if (!tp->hw_stats)
3576                 goto err_out;
3577
3578         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3579         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3580
3581         return 0;
3582
3583 err_out:
3584         tg3_free_consistent(tp);
3585         return -ENOMEM;
3586 }
3587
3588 #define MAX_WAIT_CNT 1000
3589
3590 /* To stop a block, clear the enable bit and poll till it
3591  * clears.  tp->lock is held.
3592  */
3593 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3594 {
3595         unsigned int i;
3596         u32 val;
3597
3598         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3599                 switch (ofs) {
3600                 case RCVLSC_MODE:
3601                 case DMAC_MODE:
3602                 case MBFREE_MODE:
3603                 case BUFMGR_MODE:
3604                 case MEMARB_MODE:
3605                         /* We can't enable/disable these bits of the
3606                          * 5705/5750, just say success.
3607                          */
3608                         return 0;
3609
3610                 default:
3611                         break;
3612                 };
3613         }
3614
3615         val = tr32(ofs);
3616         val &= ~enable_bit;
3617         tw32_f(ofs, val);
3618
3619         for (i = 0; i < MAX_WAIT_CNT; i++) {
3620                 udelay(100);
3621                 val = tr32(ofs);
3622                 if ((val & enable_bit) == 0)
3623                         break;
3624         }
3625
3626         if (i == MAX_WAIT_CNT) {
3627                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3628                        "ofs=%lx enable_bit=%x\n",
3629                        ofs, enable_bit);
3630                 return -ENODEV;
3631         }
3632
3633         return 0;
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_abort_hw(struct tg3 *tp)
3638 {
3639         int i, err;
3640
3641         tg3_disable_ints(tp);
3642
3643         tp->rx_mode &= ~RX_MODE_ENABLE;
3644         tw32_f(MAC_RX_MODE, tp->rx_mode);
3645         udelay(10);
3646
3647         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3648         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3649         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3650         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3651         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3652         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3653
3654         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3658         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3659         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3660         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3661         if (err)
3662                 goto out;
3663
3664         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3665         tw32_f(MAC_MODE, tp->mac_mode);
3666         udelay(40);
3667
3668         tp->tx_mode &= ~TX_MODE_ENABLE;
3669         tw32_f(MAC_TX_MODE, tp->tx_mode);
3670
3671         for (i = 0; i < MAX_WAIT_CNT; i++) {
3672                 udelay(100);
3673                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3674                         break;
3675         }
3676         if (i >= MAX_WAIT_CNT) {
3677                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3678                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3679                        tp->dev->name, tr32(MAC_TX_MODE));
3680                 return -ENODEV;
3681         }
3682
3683         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3684         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3685         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3686
3687         tw32(FTQ_RESET, 0xffffffff);
3688         tw32(FTQ_RESET, 0x00000000);
3689
3690         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3691         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3692         if (err)
3693                 goto out;
3694
3695         if (tp->hw_status)
3696                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3697         if (tp->hw_stats)
3698                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3699
3700 out:
3701         return err;
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_nvram_lock(struct tg3 *tp)
3706 {
3707         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3708                 int i;
3709
3710                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3711                 for (i = 0; i < 8000; i++) {
3712                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3713                                 break;
3714                         udelay(20);
3715                 }
3716                 if (i == 8000)
3717                         return -ENODEV;
3718         }
3719         return 0;
3720 }
3721
3722 /* tp->lock is held. */
3723 static void tg3_nvram_unlock(struct tg3 *tp)
3724 {
3725         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3726                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3727 }
3728
3729 /* tp->lock is held. */
3730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3731 {
3732         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3733                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3734                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3735
3736         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3737                 switch (kind) {
3738                 case RESET_KIND_INIT:
3739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3740                                       DRV_STATE_START);
3741                         break;
3742
3743                 case RESET_KIND_SHUTDOWN:
3744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3745                                       DRV_STATE_UNLOAD);
3746                         break;
3747
3748                 case RESET_KIND_SUSPEND:
3749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3750                                       DRV_STATE_SUSPEND);
3751                         break;
3752
3753                 default:
3754                         break;
3755                 };
3756         }
3757 }
3758
3759 /* tp->lock is held. */
3760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3761 {
3762         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3763                 switch (kind) {
3764                 case RESET_KIND_INIT:
3765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3766                                       DRV_STATE_START_DONE);
3767                         break;
3768
3769                 case RESET_KIND_SHUTDOWN:
3770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3771                                       DRV_STATE_UNLOAD_DONE);
3772                         break;
3773
3774                 default:
3775                         break;
3776                 };
3777         }
3778 }
3779
3780 /* tp->lock is held. */
3781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3782 {
3783         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3784                 switch (kind) {
3785                 case RESET_KIND_INIT:
3786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3787                                       DRV_STATE_START);
3788                         break;
3789
3790                 case RESET_KIND_SHUTDOWN:
3791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3792                                       DRV_STATE_UNLOAD);
3793                         break;
3794
3795                 case RESET_KIND_SUSPEND:
3796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3797                                       DRV_STATE_SUSPEND);
3798                         break;
3799
3800                 default:
3801                         break;
3802                 };
3803         }
3804 }
3805
3806 static void tg3_stop_fw(struct tg3 *);
3807
3808 /* tp->lock is held. */
3809 static int tg3_chip_reset(struct tg3 *tp)
3810 {
3811         u32 val;
3812         u32 flags_save;
3813         int i;
3814
3815         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3816                 tg3_nvram_lock(tp);
3817
3818         /*
3819          * We must avoid the readl() that normally takes place.
3820          * It locks machines, causes machine checks, and other
3821          * fun things.  So, temporarily disable the 5701
3822          * hardware workaround, while we do the reset.
3823          */
3824         flags_save = tp->tg3_flags;
3825         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3826
3827         /* do the reset */
3828         val = GRC_MISC_CFG_CORECLK_RESET;
3829
3830         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3831                 if (tr32(0x7e2c) == 0x60) {
3832                         tw32(0x7e2c, 0x20);
3833                 }
3834                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3835                         tw32(GRC_MISC_CFG, (1 << 29));
3836                         val |= (1 << 29);
3837                 }
3838         }
3839
3840         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3841                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3842         tw32(GRC_MISC_CFG, val);
3843
3844         /* restore 5701 hardware bug workaround flag */
3845         tp->tg3_flags = flags_save;
3846
3847         /* Unfortunately, we have to delay before the PCI read back.
3848          * Some 575X chips even will not respond to a PCI cfg access
3849          * when the reset command is given to the chip.
3850          *
3851          * How do these hardware designers expect things to work
3852          * properly if the PCI write is posted for a long period
3853          * of time?  It is always necessary to have some method by
3854          * which a register read back can occur to push the write
3855          * out which does the reset.
3856          *
3857          * For most tg3 variants the trick below was working.
3858          * Ho hum...
3859          */
3860         udelay(120);
3861
3862         /* Flush PCI posted writes.  The normal MMIO registers
3863          * are inaccessible at this time so this is the only
3864          * way to make this reliably (actually, this is no longer
3865          * the case, see above).  I tried to use indirect
3866          * register read/write but this upset some 5701 variants.
3867          */
3868         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3869
3870         udelay(120);
3871
3872         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3873                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3874                         int i;
3875                         u32 cfg_val;
3876
3877                         /* Wait for link training to complete.  */
3878                         for (i = 0; i < 5000; i++)
3879                                 udelay(100);
3880
3881                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3882                         pci_write_config_dword(tp->pdev, 0xc4,
3883                                                cfg_val | (1 << 15));
3884                 }
3885                 /* Set PCIE max payload size and clear error status.  */
3886                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3887         }
3888
3889         /* Re-enable indirect register accesses. */
3890         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3891                                tp->misc_host_ctrl);
3892
3893         /* Set MAX PCI retry to zero. */
3894         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3895         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3896             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3897                 val |= PCISTATE_RETRY_SAME_DMA;
3898         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3899
3900         pci_restore_state(tp->pdev);
3901
3902         /* Make sure PCI-X relaxed ordering bit is clear. */
3903         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3904         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3905         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3906
3907         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3908
3909         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3910                 tg3_stop_fw(tp);
3911                 tw32(0x5000, 0x400);
3912         }
3913
3914         tw32(GRC_MODE, tp->grc_mode);
3915
3916         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3917                 u32 val = tr32(0xc4);
3918
3919                 tw32(0xc4, val | (1 << 15));
3920         }
3921
3922         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3924                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3925                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3926                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3927                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3928         }
3929
3930         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3931                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3932                 tw32_f(MAC_MODE, tp->mac_mode);
3933         } else
3934                 tw32_f(MAC_MODE, 0);
3935         udelay(40);
3936
3937         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3938                 /* Wait for firmware initialization to complete. */
3939                 for (i = 0; i < 100000; i++) {
3940                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3941                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3942                                 break;
3943                         udelay(10);
3944                 }
3945                 if (i >= 100000) {
3946                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3947                                "firmware will not restart magic=%08x\n",
3948                                tp->dev->name, val);
3949                         return -ENODEV;
3950                 }
3951         }
3952
3953         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3954             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3955                 u32 val = tr32(0x7c00);
3956
3957                 tw32(0x7c00, val | (1 << 25));
3958         }
3959
3960         /* Reprobe ASF enable state.  */
3961         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3962         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3963         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3964         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3965                 u32 nic_cfg;
3966
3967                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3968                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3969                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3970                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
3971                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
3972                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3973                 }
3974         }
3975
3976         return 0;
3977 }
3978
3979 /* tp->lock is held. */
3980 static void tg3_stop_fw(struct tg3 *tp)
3981 {
3982         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3983                 u32 val;
3984                 int i;
3985
3986                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3987                 val = tr32(GRC_RX_CPU_EVENT);
3988                 val |= (1 << 14);
3989                 tw32(GRC_RX_CPU_EVENT, val);
3990
3991                 /* Wait for RX cpu to ACK the event.  */
3992                 for (i = 0; i < 100; i++) {
3993                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3994                                 break;
3995                         udelay(1);
3996                 }
3997         }
3998 }
3999
4000 /* tp->lock is held. */
4001 static int tg3_halt(struct tg3 *tp)
4002 {
4003         int err;
4004
4005         tg3_stop_fw(tp);
4006
4007         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4008
4009         tg3_abort_hw(tp);
4010         err = tg3_chip_reset(tp);
4011
4012         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4013         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4014
4015         if (err)
4016                 return err;
4017
4018         return 0;
4019 }
4020
4021 #define TG3_FW_RELEASE_MAJOR    0x0
4022 #define TG3_FW_RELASE_MINOR     0x0
4023 #define TG3_FW_RELEASE_FIX      0x0
4024 #define TG3_FW_START_ADDR       0x08000000
4025 #define TG3_FW_TEXT_ADDR        0x08000000
4026 #define TG3_FW_TEXT_LEN         0x9c0
4027 #define TG3_FW_RODATA_ADDR      0x080009c0
4028 #define TG3_FW_RODATA_LEN       0x60
4029 #define TG3_FW_DATA_ADDR        0x08000a40
4030 #define TG3_FW_DATA_LEN         0x20
4031 #define TG3_FW_SBSS_ADDR        0x08000a60
4032 #define TG3_FW_SBSS_LEN         0xc
4033 #define TG3_FW_BSS_ADDR         0x08000a70
4034 #define TG3_FW_BSS_LEN          0x10
4035
4036 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4037         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4038         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4039         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4040         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4041         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4042         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4043         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4044         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4045         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4046         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4047         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4048         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4049         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4050         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4051         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4052         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4053         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4054         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4055         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4056         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4057         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4058         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4059         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4060         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4061         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4062         0, 0, 0, 0, 0, 0,
4063         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4065         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4066         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4067         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4068         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4069         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4072         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4073         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4074         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4075         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4076         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4077         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4078         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4079         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4080         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4081         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4082         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4083         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4084         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4085         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4086         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4087         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4088         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4089         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4090         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4091         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4092         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4093         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4094         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4095         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4096         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4097         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4098         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4099         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4100         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4101         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4102         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4103         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4104         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4105         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4106         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4107         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4108         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4109         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4110         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4111         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4112         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4113         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4114         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4115         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4116         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4117         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4118         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4119         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4120         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4121         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4122         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4123         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4124         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4125         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4126         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4127         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4128 };
4129
4130 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4131         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4132         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4133         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4134         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4135         0x00000000
4136 };
4137
4138 #if 0 /* All zeros, don't eat up space with it. */
4139 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4140         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4141         0x00000000, 0x00000000, 0x00000000, 0x00000000
4142 };
4143 #endif
4144
4145 #define RX_CPU_SCRATCH_BASE     0x30000
4146 #define RX_CPU_SCRATCH_SIZE     0x04000
4147 #define TX_CPU_SCRATCH_BASE     0x34000
4148 #define TX_CPU_SCRATCH_SIZE     0x04000
4149
4150 /* tp->lock is held. */
4151 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4152 {
4153         int i;
4154
4155         if (offset == TX_CPU_BASE &&
4156             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4157                 BUG();
4158
4159         if (offset == RX_CPU_BASE) {
4160                 for (i = 0; i < 10000; i++) {
4161                         tw32(offset + CPU_STATE, 0xffffffff);
4162                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4163                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4164                                 break;
4165                 }
4166
4167                 tw32(offset + CPU_STATE, 0xffffffff);
4168                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4169                 udelay(10);
4170         } else {
4171                 for (i = 0; i < 10000; i++) {
4172                         tw32(offset + CPU_STATE, 0xffffffff);
4173                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4174                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4175                                 break;
4176                 }
4177         }
4178
4179         if (i >= 10000) {
4180                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4181                        "and %s CPU\n",
4182                        tp->dev->name,
4183                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4184                 return -ENODEV;
4185         }
4186         return 0;
4187 }
4188
4189 struct fw_info {
4190         unsigned int text_base;
4191         unsigned int text_len;
4192         u32 *text_data;
4193         unsigned int rodata_base;
4194         unsigned int rodata_len;
4195         u32 *rodata_data;
4196         unsigned int data_base;
4197         unsigned int data_len;
4198         u32 *data_data;
4199 };
4200
4201 /* tp->lock is held. */
4202 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4203                                  int cpu_scratch_size, struct fw_info *info)
4204 {
4205         int err, i;
4206         u32 orig_tg3_flags = tp->tg3_flags;
4207         void (*write_op)(struct tg3 *, u32, u32);
4208
4209         if (cpu_base == TX_CPU_BASE &&
4210             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4211                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4212                        "TX cpu firmware on %s which is 5705.\n",
4213                        tp->dev->name);
4214                 return -EINVAL;
4215         }
4216
4217         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4218                 write_op = tg3_write_mem;
4219         else
4220                 write_op = tg3_write_indirect_reg32;
4221
4222         /* Force use of PCI config space for indirect register
4223          * write calls.
4224          */
4225         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4226
4227         err = tg3_halt_cpu(tp, cpu_base);
4228         if (err)
4229                 goto out;
4230
4231         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4232                 write_op(tp, cpu_scratch_base + i, 0);
4233         tw32(cpu_base + CPU_STATE, 0xffffffff);
4234         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4235         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4236                 write_op(tp, (cpu_scratch_base +
4237                               (info->text_base & 0xffff) +
4238                               (i * sizeof(u32))),
4239                          (info->text_data ?
4240                           info->text_data[i] : 0));
4241         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4242                 write_op(tp, (cpu_scratch_base +
4243                               (info->rodata_base & 0xffff) +
4244                               (i * sizeof(u32))),
4245                          (info->rodata_data ?
4246                           info->rodata_data[i] : 0));
4247         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4248                 write_op(tp, (cpu_scratch_base +
4249                               (info->data_base & 0xffff) +
4250                               (i * sizeof(u32))),
4251                          (info->data_data ?
4252                           info->data_data[i] : 0));
4253
4254         err = 0;
4255
4256 out:
4257         tp->tg3_flags = orig_tg3_flags;
4258         return err;
4259 }
4260
4261 /* tp->lock is held. */
4262 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4263 {
4264         struct fw_info info;
4265         int err, i;
4266
4267         info.text_base = TG3_FW_TEXT_ADDR;
4268         info.text_len = TG3_FW_TEXT_LEN;
4269         info.text_data = &tg3FwText[0];
4270         info.rodata_base = TG3_FW_RODATA_ADDR;
4271         info.rodata_len = TG3_FW_RODATA_LEN;
4272         info.rodata_data = &tg3FwRodata[0];
4273         info.data_base = TG3_FW_DATA_ADDR;
4274         info.data_len = TG3_FW_DATA_LEN;
4275         info.data_data = NULL;
4276
4277         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4278                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4279                                     &info);
4280         if (err)
4281                 return err;
4282
4283         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4284                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4285                                     &info);
4286         if (err)
4287                 return err;
4288
4289         /* Now startup only the RX cpu. */
4290         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4291         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4292
4293         for (i = 0; i < 5; i++) {
4294                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4295                         break;
4296                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4297                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4298                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4299                 udelay(1000);
4300         }
4301         if (i >= 5) {
4302                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4303                        "to set RX CPU PC, is %08x should be %08x\n",
4304                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4305                        TG3_FW_TEXT_ADDR);
4306                 return -ENODEV;
4307         }
4308         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4309         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4310
4311         return 0;
4312 }
4313
4314 #if TG3_TSO_SUPPORT != 0
4315
4316 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4317 #define TG3_TSO_FW_RELASE_MINOR         0x6
4318 #define TG3_TSO_FW_RELEASE_FIX          0x0
4319 #define TG3_TSO_FW_START_ADDR           0x08000000
4320 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4321 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4322 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4323 #define TG3_TSO_FW_RODATA_LEN           0x60
4324 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4325 #define TG3_TSO_FW_DATA_LEN             0x30
4326 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4327 #define TG3_TSO_FW_SBSS_LEN             0x2c
4328 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4329 #define TG3_TSO_FW_BSS_LEN              0x894
4330
4331 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4332         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4333         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4334         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4335         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4336         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4337         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4338         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4339         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4340         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4341         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4342         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4343         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4344         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4345         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4346         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4347         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4348         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4349         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4350         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4351         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4352         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4353         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4354         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4355         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4356         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4357         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4358         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4359         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4360         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4361         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4362         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4363         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4364         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4365         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4366         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4367         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4368         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4369         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4370         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4371         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4372         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4373         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4374         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4375         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4376         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4377         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4378         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4379         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4380         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4381         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4382         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4383         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4384         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4385         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4386         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4387         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4388         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4389         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4390         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4391         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4392         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4393         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4394         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4395         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4396         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4397         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4398         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4399         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4400         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4401         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4402         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4403         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4404         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4405         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4406         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4407         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4408         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4409         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4410         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4411         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4412         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4413         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4414         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4415         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4416         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4417         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4418         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4419         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4420         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4421         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4422         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4423         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4424         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4425         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4426         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4427         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4428         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4429         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4430         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4431         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4432         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4433         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4434         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4435         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4436         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4437         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4438         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4439         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4440         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4441         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4442         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4443         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4444         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4445         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4446         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4447         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4448         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4449         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4450         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4451         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4452         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4453         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4454         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4455         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4456         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4457         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4458         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4459         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4460         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4461         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4462         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4463         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4464         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4465         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4466         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4467         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4468         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4469         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4470         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4471         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4472         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4473         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4474         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4475         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4476         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4477         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4478         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4479         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4480         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4481         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4482         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4483         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4484         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4485         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4486         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4487         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4488         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4489         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4490         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4491         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4492         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4493         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4494         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4495         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4496         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4497         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4498         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4499         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4500         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4501         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4502         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4503         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4504         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4505         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4506         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4507         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4508         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4509         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4510         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4511         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4512         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4513         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4514         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4515         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4516         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4517         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4518         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4519         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4520         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4521         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4522         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4523         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4524         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4525         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4526         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4527         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4528         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4529         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4530         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4531         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4532         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4533         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4534         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4535         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4536         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4537         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4538         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4539         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4540         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4541         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4542         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4543         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4544         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4545         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4546         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4547         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4548         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4549         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4550         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4551         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4552         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4553         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4554         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4555         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4556         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4557         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4558         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4559         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4560         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4561         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4562         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4563         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4564         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4565         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4566         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4567         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4568         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4569         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4570         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4571         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4572         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4573         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4574         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4575         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4576         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4577         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4578         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4579         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4580         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4581         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4582         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4583         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4584         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4585         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4586         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4587         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4588         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4589         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4590         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4591         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4592         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4593         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4594         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4595         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4596         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4597         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4598         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4599         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4600         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4601         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4602         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4603         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4604         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4605         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4606         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4607         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4608         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4609         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4610         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4611         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4612         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4613         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4614         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4615         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4616 };
4617
4618 static u32 tg3TsoFwRodata[] = {
4619         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4620         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4621         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4622         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4623         0x00000000,
4624 };
4625
4626 static u32 tg3TsoFwData[] = {
4627         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4628         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4629         0x00000000,
4630 };
4631
4632 /* 5705 needs a special version of the TSO firmware.  */
4633 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4634 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4635 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4636 #define TG3_TSO5_FW_START_ADDR          0x00010000
4637 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4638 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4639 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4640 #define TG3_TSO5_FW_RODATA_LEN          0x50
4641 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4642 #define TG3_TSO5_FW_DATA_LEN            0x20
4643 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4644 #define TG3_TSO5_FW_SBSS_LEN            0x28
4645 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4646 #define TG3_TSO5_FW_BSS_LEN             0x88
4647
4648 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4649         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4650         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4651         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4652         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4653         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4654         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4655         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4656         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4657         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4658         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4659         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4660         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4661         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4662         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4663         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4664         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4665         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4666         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4667         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4668         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4669         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4670         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4671         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4672         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4673         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4674         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4675         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4676         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4677         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4678         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4679         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4680         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4681         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4682         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4683         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4684         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4685         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4686         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4687         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4688         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4689         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4690         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4691         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4692         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4693         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4694         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4695         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4696         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4697         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4698         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4699         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4700         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4701         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4702         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4703         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4704         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4705         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4706         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4707         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4708         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4709         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4710         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4711         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4712         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4713         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4714         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4715         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4716         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4717         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4718         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4719         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4720         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4721         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4722         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4723         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4724         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4725         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4726         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4727         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4728         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4729         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4730         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4731         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4732         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4733         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4734         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4735         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4736         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4737         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4738         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4739         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4740         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4741         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4742         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4743         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4744         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4745         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4746         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4747         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4748         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4749         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4750         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4751         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4752         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4753         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4754         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4755         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4756         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4757         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4758         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4759         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4760         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4761         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4762         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4763         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4764         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4765         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4766         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4767         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4768         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4769         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4770         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4771         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4772         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4773         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4774         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4775         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4776         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4777         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4778         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4779         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4780         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4781         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4782         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4783         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4784         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4785         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4786         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4787         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4788         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4789         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4790         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4791         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4792         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4793         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4794         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4795         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4796         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4797         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4798         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4799         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4800         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4801         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4802         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4803         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4804         0x00000000, 0x00000000, 0x00000000,
4805 };
4806
4807 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4808         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4809         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4810         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4811         0x00000000, 0x00000000, 0x00000000,
4812 };
4813
4814 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4815         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4816         0x00000000, 0x00000000, 0x00000000,
4817 };
4818
4819 /* tp->lock is held. */
4820 static int tg3_load_tso_firmware(struct tg3 *tp)
4821 {
4822         struct fw_info info;
4823         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4824         int err, i;
4825
4826         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4827                 return 0;
4828
4829         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4830                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4831                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4832                 info.text_data = &tg3Tso5FwText[0];
4833                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4834                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4835                 info.rodata_data = &tg3Tso5FwRodata[0];
4836                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4837                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4838                 info.data_data = &tg3Tso5FwData[0];
4839                 cpu_base = RX_CPU_BASE;
4840                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4841                 cpu_scratch_size = (info.text_len +
4842                                     info.rodata_len +
4843                                     info.data_len +
4844                                     TG3_TSO5_FW_SBSS_LEN +
4845                                     TG3_TSO5_FW_BSS_LEN);
4846         } else {
4847                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4848                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4849                 info.text_data = &tg3TsoFwText[0];
4850                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4851                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4852                 info.rodata_data = &tg3TsoFwRodata[0];
4853                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4854                 info.data_len = TG3_TSO_FW_DATA_LEN;
4855                 info.data_data = &tg3TsoFwData[0];
4856                 cpu_base = TX_CPU_BASE;
4857                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4858                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4859         }
4860
4861         err = tg3_load_firmware_cpu(tp, cpu_base,
4862                                     cpu_scratch_base, cpu_scratch_size,
4863                                     &info);
4864         if (err)
4865                 return err;
4866
4867         /* Now startup the cpu. */
4868         tw32(cpu_base + CPU_STATE, 0xffffffff);
4869         tw32_f(cpu_base + CPU_PC,    info.text_base);
4870
4871         for (i = 0; i < 5; i++) {
4872                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4873                         break;
4874                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4875                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4876                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4877                 udelay(1000);
4878         }
4879         if (i >= 5) {
4880                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4881                        "to set CPU PC, is %08x should be %08x\n",
4882                        tp->dev->name, tr32(cpu_base + CPU_PC),
4883                        info.text_base);
4884                 return -ENODEV;
4885         }
4886         tw32(cpu_base + CPU_STATE, 0xffffffff);
4887         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4888         return 0;
4889 }
4890
4891 #endif /* TG3_TSO_SUPPORT != 0 */
4892
4893 /* tp->lock is held. */
4894 static void __tg3_set_mac_addr(struct tg3 *tp)
4895 {
4896         u32 addr_high, addr_low;
4897         int i;
4898
4899         addr_high = ((tp->dev->dev_addr[0] << 8) |
4900                      tp->dev->dev_addr[1]);
4901         addr_low = ((tp->dev->dev_addr[2] << 24) |
4902                     (tp->dev->dev_addr[3] << 16) |
4903                     (tp->dev->dev_addr[4] <<  8) |
4904                     (tp->dev->dev_addr[5] <<  0));
4905         for (i = 0; i < 4; i++) {
4906                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4907                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4908         }
4909
4910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4912                 for (i = 0; i < 12; i++) {
4913                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4914                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4915                 }
4916         }
4917
4918         addr_high = (tp->dev->dev_addr[0] +
4919                      tp->dev->dev_addr[1] +
4920                      tp->dev->dev_addr[2] +
4921                      tp->dev->dev_addr[3] +
4922                      tp->dev->dev_addr[4] +
4923                      tp->dev->dev_addr[5]) &
4924                 TX_BACKOFF_SEED_MASK;
4925         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4926 }
4927
4928 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4929 {
4930         struct tg3 *tp = netdev_priv(dev);
4931         struct sockaddr *addr = p;
4932
4933         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4934
4935         spin_lock_irq(&tp->lock);
4936         __tg3_set_mac_addr(tp);
4937         spin_unlock_irq(&tp->lock);
4938
4939         return 0;
4940 }
4941
4942 /* tp->lock is held. */
4943 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4944                            dma_addr_t mapping, u32 maxlen_flags,
4945                            u32 nic_addr)
4946 {
4947         tg3_write_mem(tp,
4948                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4949                       ((u64) mapping >> 32));
4950         tg3_write_mem(tp,
4951                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4952                       ((u64) mapping & 0xffffffff));
4953         tg3_write_mem(tp,
4954                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4955                        maxlen_flags);
4956
4957         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4958                 tg3_write_mem(tp,
4959                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4960                               nic_addr);
4961 }
4962
4963 static void __tg3_set_rx_mode(struct net_device *);
4964
4965 /* tp->lock is held. */
4966 static int tg3_reset_hw(struct tg3 *tp)
4967 {
4968         u32 val, rdmac_mode;
4969         int i, err, limit;
4970
4971         tg3_disable_ints(tp);
4972
4973         tg3_stop_fw(tp);
4974
4975         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4976
4977         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4978                 err = tg3_abort_hw(tp);
4979                 if (err)
4980                         return err;
4981         }
4982
4983         err = tg3_chip_reset(tp);
4984         if (err)
4985                 return err;
4986
4987         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4988
4989         /* This works around an issue with Athlon chipsets on
4990          * B3 tigon3 silicon.  This bit has no effect on any
4991          * other revision.  But do not set this on PCI Express
4992          * chips.
4993          */
4994         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4995                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4996         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4997
4998         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4999             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5000                 val = tr32(TG3PCI_PCISTATE);
5001                 val |= PCISTATE_RETRY_SAME_DMA;
5002                 tw32(TG3PCI_PCISTATE, val);
5003         }
5004
5005         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5006                 /* Enable some hw fixes.  */
5007                 val = tr32(TG3PCI_MSI_DATA);
5008                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5009                 tw32(TG3PCI_MSI_DATA, val);
5010         }
5011
5012         /* Descriptor ring init may make accesses to the
5013          * NIC SRAM area to setup the TX descriptors, so we
5014          * can only do this after the hardware has been
5015          * successfully reset.
5016          */
5017         tg3_init_rings(tp);
5018
5019         /* This value is determined during the probe time DMA
5020          * engine test, tg3_test_dma.
5021          */
5022         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5023
5024         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5025                           GRC_MODE_4X_NIC_SEND_RINGS |
5026                           GRC_MODE_NO_TX_PHDR_CSUM |
5027                           GRC_MODE_NO_RX_PHDR_CSUM);
5028         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5029         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5030                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5031         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5032                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5033
5034         tw32(GRC_MODE,
5035              tp->grc_mode |
5036              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5037
5038         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5039         val = tr32(GRC_MISC_CFG);
5040         val &= ~0xff;
5041         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5042         tw32(GRC_MISC_CFG, val);
5043
5044         /* Initialize MBUF/DESC pool. */
5045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
5047                 /* Do nothing.  */
5048         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5049                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5051                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5052                 else
5053                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5054                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5055                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5056         }
5057 #if TG3_TSO_SUPPORT != 0
5058         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5059                 int fw_len;
5060
5061                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5062                           TG3_TSO5_FW_RODATA_LEN +
5063                           TG3_TSO5_FW_DATA_LEN +
5064                           TG3_TSO5_FW_SBSS_LEN +
5065                           TG3_TSO5_FW_BSS_LEN);
5066                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5067                 tw32(BUFMGR_MB_POOL_ADDR,
5068                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5069                 tw32(BUFMGR_MB_POOL_SIZE,
5070                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5071         }
5072 #endif
5073
5074         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5075                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5076                      tp->bufmgr_config.mbuf_read_dma_low_water);
5077                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5078                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5079                 tw32(BUFMGR_MB_HIGH_WATER,
5080                      tp->bufmgr_config.mbuf_high_water);
5081         } else {
5082                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5083                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5084                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5085                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5086                 tw32(BUFMGR_MB_HIGH_WATER,
5087                      tp->bufmgr_config.mbuf_high_water_jumbo);
5088         }
5089         tw32(BUFMGR_DMA_LOW_WATER,
5090              tp->bufmgr_config.dma_low_water);
5091         tw32(BUFMGR_DMA_HIGH_WATER,
5092              tp->bufmgr_config.dma_high_water);
5093
5094         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5095         for (i = 0; i < 2000; i++) {
5096                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5097                         break;
5098                 udelay(10);
5099         }
5100         if (i >= 2000) {
5101                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5102                        tp->dev->name);
5103                 return -ENODEV;
5104         }
5105
5106         /* Setup replenish threshold. */
5107         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5108
5109         /* Initialize TG3_BDINFO's at:
5110          *  RCVDBDI_STD_BD:     standard eth size rx ring
5111          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5112          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5113          *
5114          * like so:
5115          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5116          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5117          *                              ring attribute flags
5118          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5119          *
5120          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5121          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5122          *
5123          * The size of each ring is fixed in the firmware, but the location is
5124          * configurable.
5125          */
5126         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5127              ((u64) tp->rx_std_mapping >> 32));
5128         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5129              ((u64) tp->rx_std_mapping & 0xffffffff));
5130         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5131              NIC_SRAM_RX_BUFFER_DESC);
5132
5133         /* Don't even try to program the JUMBO/MINI buffer descriptor
5134          * configs on 5705.
5135          */
5136         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5137                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5138                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5139         } else {
5140                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5141                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5142
5143                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5144                      BDINFO_FLAGS_DISABLED);
5145
5146                 /* Setup replenish threshold. */
5147                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5148
5149                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5150                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5151                              ((u64) tp->rx_jumbo_mapping >> 32));
5152                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5153                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5154                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5155                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5156                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5157                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5158                 } else {
5159                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5160                              BDINFO_FLAGS_DISABLED);
5161                 }
5162
5163         }
5164
5165         /* There is only one send ring on 5705/5750, no need to explicitly
5166          * disable the others.
5167          */
5168         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5169                 /* Clear out send RCB ring in SRAM. */
5170                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5171                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5172                                       BDINFO_FLAGS_DISABLED);
5173         }
5174
5175         tp->tx_prod = 0;
5176         tp->tx_cons = 0;
5177         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5178         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5179
5180         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5181                        tp->tx_desc_mapping,
5182                        (TG3_TX_RING_SIZE <<
5183                         BDINFO_FLAGS_MAXLEN_SHIFT),
5184                        NIC_SRAM_TX_BUFFER_DESC);
5185
5186         /* There is only one receive return ring on 5705/5750, no need
5187          * to explicitly disable the others.
5188          */
5189         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5190                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5191                      i += TG3_BDINFO_SIZE) {
5192                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5193                                       BDINFO_FLAGS_DISABLED);
5194                 }
5195         }
5196
5197         tp->rx_rcb_ptr = 0;
5198         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5199
5200         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5201                        tp->rx_rcb_mapping,
5202                        (TG3_RX_RCB_RING_SIZE(tp) <<
5203                         BDINFO_FLAGS_MAXLEN_SHIFT),
5204                        0);
5205
5206         tp->rx_std_ptr = tp->rx_pending;
5207         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5208                      tp->rx_std_ptr);
5209
5210         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5211                                                 tp->rx_jumbo_pending : 0;
5212         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5213                      tp->rx_jumbo_ptr);
5214
5215         /* Initialize MAC address and backoff seed. */
5216         __tg3_set_mac_addr(tp);
5217
5218         /* MTU + ethernet header + FCS + optional VLAN tag */
5219         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5220
5221         /* The slot time is changed by tg3_setup_phy if we
5222          * run at gigabit with half duplex.
5223          */
5224         tw32(MAC_TX_LENGTHS,
5225              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5226              (6 << TX_LENGTHS_IPG_SHIFT) |
5227              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5228
5229         /* Receive rules. */
5230         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5231         tw32(RCVLPC_CONFIG, 0x0181);
5232
5233         /* Calculate RDMAC_MODE setting early, we need it to determine
5234          * the RCVLPC_STATE_ENABLE mask.
5235          */
5236         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5237                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5238                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5239                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5240                       RDMAC_MODE_LNGREAD_ENAB);
5241         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5242                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5243         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5244              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5245             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5246              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
5247                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5248                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5249                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5250                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5251                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5252                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5253                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5254                 }
5255         }
5256
5257 #if TG3_TSO_SUPPORT != 0
5258         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5259                 rdmac_mode |= (1 << 27);
5260 #endif
5261
5262         /* Receive/send statistics. */
5263         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5264             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5265                 val = tr32(RCVLPC_STATS_ENABLE);
5266                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5267                 tw32(RCVLPC_STATS_ENABLE, val);
5268         } else {
5269                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5270         }
5271         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5272         tw32(SNDDATAI_STATSENAB, 0xffffff);
5273         tw32(SNDDATAI_STATSCTRL,
5274              (SNDDATAI_SCTRL_ENABLE |
5275               SNDDATAI_SCTRL_FASTUPD));
5276
5277         /* Setup host coalescing engine. */
5278         tw32(HOSTCC_MODE, 0);
5279         for (i = 0; i < 2000; i++) {
5280                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5281                         break;
5282                 udelay(10);
5283         }
5284
5285         tw32(HOSTCC_RXCOL_TICKS, 0);
5286         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5287         tw32(HOSTCC_RXMAX_FRAMES, 1);
5288         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5289         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5290                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5291                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5292         }
5293         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5294         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5295
5296         /* set status block DMA address */
5297         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5298              ((u64) tp->status_mapping >> 32));
5299         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5300              ((u64) tp->status_mapping & 0xffffffff));
5301
5302         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5303                 /* Status/statistics block address.  See tg3_timer,
5304                  * the tg3_periodic_fetch_stats call there, and
5305                  * tg3_get_stats to see how this works for 5705/5750 chips.
5306                  */
5307                 tw32(HOSTCC_STAT_COAL_TICKS,
5308                      DEFAULT_STAT_COAL_TICKS);
5309                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5310                      ((u64) tp->stats_mapping >> 32));
5311                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5312                      ((u64) tp->stats_mapping & 0xffffffff));
5313                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5314                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5315         }
5316
5317         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5318
5319         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5320         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5321         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5322                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5323
5324         /* Clear statistics/status block in chip, and status block in ram. */
5325         for (i = NIC_SRAM_STATS_BLK;
5326              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5327              i += sizeof(u32)) {
5328                 tg3_write_mem(tp, i, 0);
5329                 udelay(40);
5330         }
5331         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5332
5333         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5334                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5335         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5336         udelay(40);
5337
5338         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5340                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5341                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5342         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5343         udelay(100);
5344
5345         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5346         tr32(MAILBOX_INTERRUPT_0);
5347
5348         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5349                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5350                 udelay(40);
5351         }
5352
5353         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5354                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5355                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5356                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5357                WDMAC_MODE_LNGREAD_ENAB);
5358
5359         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5360              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5361             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5362              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
5363                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5364                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5365                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5366                         /* nothing */
5367                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5368                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5369                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5370                         val |= WDMAC_MODE_RX_ACCEL;
5371                 }
5372         }
5373
5374         tw32_f(WDMAC_MODE, val);
5375         udelay(40);
5376
5377         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5378                 val = tr32(TG3PCI_X_CAPS);
5379                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5380                         val &= ~PCIX_CAPS_BURST_MASK;
5381                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5382                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5383                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5384                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5385                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5386                                 val |= (tp->split_mode_max_reqs <<
5387                                         PCIX_CAPS_SPLIT_SHIFT);
5388                 }
5389                 tw32(TG3PCI_X_CAPS, val);
5390         }
5391
5392         tw32_f(RDMAC_MODE, rdmac_mode);
5393         udelay(40);
5394
5395         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5396         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5397                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5398         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5399         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5400         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5401         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5402         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5403 #if TG3_TSO_SUPPORT != 0
5404         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5405                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5406 #endif
5407         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5408         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5409
5410         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5411                 err = tg3_load_5701_a0_firmware_fix(tp);
5412                 if (err)
5413                         return err;
5414         }
5415
5416 #if TG3_TSO_SUPPORT != 0
5417         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5418                 err = tg3_load_tso_firmware(tp);
5419                 if (err)
5420                         return err;
5421         }
5422 #endif
5423
5424         tp->tx_mode = TX_MODE_ENABLE;
5425         tw32_f(MAC_TX_MODE, tp->tx_mode);
5426         udelay(100);
5427
5428         tp->rx_mode = RX_MODE_ENABLE;
5429         tw32_f(MAC_RX_MODE, tp->rx_mode);
5430         udelay(10);
5431
5432         if (tp->link_config.phy_is_low_power) {
5433                 tp->link_config.phy_is_low_power = 0;
5434                 tp->link_config.speed = tp->link_config.orig_speed;
5435                 tp->link_config.duplex = tp->link_config.orig_duplex;
5436                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5437         }
5438
5439         tp->mi_mode = MAC_MI_MODE_BASE;
5440         tw32_f(MAC_MI_MODE, tp->mi_mode);
5441         udelay(80);
5442
5443         tw32(MAC_LED_CTRL, tp->led_ctrl);
5444
5445         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5446         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5447                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5448                 udelay(10);
5449         }
5450         tw32_f(MAC_RX_MODE, tp->rx_mode);
5451         udelay(10);
5452
5453         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5454                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5455                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5456                         /* Set drive transmission level to 1.2V  */
5457                         /* only if the signal pre-emphasis bit is not set  */
5458                         val = tr32(MAC_SERDES_CFG);
5459                         val &= 0xfffff000;
5460                         val |= 0x880;
5461                         tw32(MAC_SERDES_CFG, val);
5462                 }
5463                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5464                         tw32(MAC_SERDES_CFG, 0x616000);
5465         }
5466
5467         /* Prevent chip from dropping frames when flow control
5468          * is enabled.
5469          */
5470         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5471
5472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5473             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5474                 /* Use hardware link auto-negotiation */
5475                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5476         }
5477
5478         err = tg3_setup_phy(tp, 1);
5479         if (err)
5480                 return err;
5481
5482         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5483                 u32 tmp;
5484
5485                 /* Clear CRC stats. */
5486                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5487                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5488                         tg3_readphy(tp, 0x14, &tmp);
5489                 }
5490         }
5491
5492         __tg3_set_rx_mode(tp->dev);
5493
5494         /* Initialize receive rules. */
5495         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5496         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5497         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5498         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5499
5500         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5501                 limit = 8;
5502         else
5503                 limit = 16;
5504         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5505                 limit -= 4;
5506         switch (limit) {
5507         case 16:
5508                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5509         case 15:
5510                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5511         case 14:
5512                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5513         case 13:
5514                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5515         case 12:
5516                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5517         case 11:
5518                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5519         case 10:
5520                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5521         case 9:
5522                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5523         case 8:
5524                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5525         case 7:
5526                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5527         case 6:
5528                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5529         case 5:
5530                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5531         case 4:
5532                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5533         case 3:
5534                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5535         case 2:
5536         case 1:
5537
5538         default:
5539                 break;
5540         };
5541
5542         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5543
5544         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5545                 tg3_enable_ints(tp);
5546
5547         return 0;
5548 }
5549
5550 /* Called at device open time to get the chip ready for
5551  * packet processing.  Invoked with tp->lock held.
5552  */
5553 static int tg3_init_hw(struct tg3 *tp)
5554 {
5555         int err;
5556
5557         /* Force the chip into D0. */
5558         err = tg3_set_power_state(tp, 0);
5559         if (err)
5560                 goto out;
5561
5562         tg3_switch_clocks(tp);
5563
5564         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5565
5566         err = tg3_reset_hw(tp);
5567
5568 out:
5569         return err;
5570 }
5571
5572 #define TG3_STAT_ADD32(PSTAT, REG) \
5573 do {    u32 __val = tr32(REG); \
5574         (PSTAT)->low += __val; \
5575         if ((PSTAT)->low < __val) \
5576                 (PSTAT)->high += 1; \
5577 } while (0)
5578
5579 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5580 {
5581         struct tg3_hw_stats *sp = tp->hw_stats;
5582
5583         if (!netif_carrier_ok(tp->dev))
5584                 return;
5585
5586         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5587         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5588         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5589         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5590         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5591         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5592         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5593         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5594         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5595         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5596         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5597         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5598         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5599
5600         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5601         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5602         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5603         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5604         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5605         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5606         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5607         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5608         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5609         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5610         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5611         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5612         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5613         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5614 }
5615
5616 static void tg3_timer(unsigned long __opaque)
5617 {
5618         struct tg3 *tp = (struct tg3 *) __opaque;
5619         unsigned long flags;
5620
5621         spin_lock_irqsave(&tp->lock, flags);
5622         spin_lock(&tp->tx_lock);
5623
5624         /* All of this garbage is because when using non-tagged
5625          * IRQ status the mailbox/status_block protocol the chip
5626          * uses with the cpu is race prone.
5627          */
5628         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5629                 tw32(GRC_LOCAL_CTRL,
5630                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5631         } else {
5632                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5633                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5634         }
5635
5636         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5637                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5638                 spin_unlock(&tp->tx_lock);
5639                 spin_unlock_irqrestore(&tp->lock, flags);
5640                 schedule_work(&tp->reset_task);
5641                 return;
5642         }
5643
5644         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5645                 tg3_periodic_fetch_stats(tp);
5646
5647         /* This part only runs once per second. */
5648         if (!--tp->timer_counter) {
5649                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5650                         u32 mac_stat;
5651                         int phy_event;
5652
5653                         mac_stat = tr32(MAC_STATUS);
5654
5655                         phy_event = 0;
5656                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5657                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5658                                         phy_event = 1;
5659                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5660                                 phy_event = 1;
5661
5662                         if (phy_event)
5663                                 tg3_setup_phy(tp, 0);
5664                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5665                         u32 mac_stat = tr32(MAC_STATUS);
5666                         int need_setup = 0;
5667
5668                         if (netif_carrier_ok(tp->dev) &&
5669                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5670                                 need_setup = 1;
5671                         }
5672                         if (! netif_carrier_ok(tp->dev) &&
5673                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5674                                          MAC_STATUS_SIGNAL_DET))) {
5675                                 need_setup = 1;
5676                         }
5677                         if (need_setup) {
5678                                 tw32_f(MAC_MODE,
5679                                      (tp->mac_mode &
5680                                       ~MAC_MODE_PORT_MODE_MASK));
5681                                 udelay(40);
5682                                 tw32_f(MAC_MODE, tp->mac_mode);
5683                                 udelay(40);
5684                                 tg3_setup_phy(tp, 0);
5685                         }
5686                 }
5687
5688                 tp->timer_counter = tp->timer_multiplier;
5689         }
5690
5691         /* Heartbeat is only sent once every 120 seconds.  */
5692         if (!--tp->asf_counter) {
5693                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5694                         u32 val;
5695
5696                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5697                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5698                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5699                         val = tr32(GRC_RX_CPU_EVENT);
5700                         val |= (1 << 14);
5701                         tw32(GRC_RX_CPU_EVENT, val);
5702                 }
5703                 tp->asf_counter = tp->asf_multiplier;
5704         }
5705
5706         spin_unlock(&tp->tx_lock);
5707         spin_unlock_irqrestore(&tp->lock, flags);
5708
5709         tp->timer.expires = jiffies + tp->timer_offset;
5710         add_timer(&tp->timer);
5711 }
5712
5713 static int tg3_open(struct net_device *dev)
5714 {
5715         struct tg3 *tp = netdev_priv(dev);
5716         int err;
5717
5718         spin_lock_irq(&tp->lock);
5719         spin_lock(&tp->tx_lock);
5720
5721         tg3_disable_ints(tp);
5722         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5723
5724         spin_unlock(&tp->tx_lock);
5725         spin_unlock_irq(&tp->lock);
5726
5727         /* The placement of this call is tied
5728          * to the setup and use of Host TX descriptors.
5729          */
5730         err = tg3_alloc_consistent(tp);
5731         if (err)
5732                 return err;
5733
5734         err = request_irq(dev->irq, tg3_interrupt,
5735                           SA_SHIRQ, dev->name, dev);
5736
5737         if (err) {
5738                 tg3_free_consistent(tp);
5739                 return err;
5740         }
5741
5742         spin_lock_irq(&tp->lock);
5743         spin_lock(&tp->tx_lock);
5744
5745         err = tg3_init_hw(tp);
5746         if (err) {
5747                 tg3_halt(tp);
5748                 tg3_free_rings(tp);
5749         } else {
5750                 tp->timer_offset = HZ / 10;
5751                 tp->timer_counter = tp->timer_multiplier = 10;
5752                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5753
5754                 init_timer(&tp->timer);
5755                 tp->timer.expires = jiffies + tp->timer_offset;
5756                 tp->timer.data = (unsigned long) tp;
5757                 tp->timer.function = tg3_timer;
5758                 add_timer(&tp->timer);
5759
5760                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5761         }
5762
5763         spin_unlock(&tp->tx_lock);
5764         spin_unlock_irq(&tp->lock);
5765
5766         if (err) {
5767                 free_irq(dev->irq, dev);
5768                 tg3_free_consistent(tp);
5769                 return err;
5770         }
5771
5772         spin_lock_irq(&tp->lock);
5773         spin_lock(&tp->tx_lock);
5774
5775         tg3_enable_ints(tp);
5776
5777         spin_unlock(&tp->tx_lock);
5778         spin_unlock_irq(&tp->lock);
5779
5780         netif_start_queue(dev);
5781
5782         return 0;
5783 }
5784
5785 #if 0
5786 /*static*/ void tg3_dump_state(struct tg3 *tp)
5787 {
5788         u32 val32, val32_2, val32_3, val32_4, val32_5;
5789         u16 val16;
5790         int i;
5791
5792         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5793         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5794         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5795                val16, val32);
5796
5797         /* MAC block */
5798         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5799                tr32(MAC_MODE), tr32(MAC_STATUS));
5800         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5801                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5802         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5803                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5804         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5805                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5806
5807         /* Send data initiator control block */
5808         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5809                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5810         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5811                tr32(SNDDATAI_STATSCTRL));
5812
5813         /* Send data completion control block */
5814         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5815
5816         /* Send BD ring selector block */
5817         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5818                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5819
5820         /* Send BD initiator control block */
5821         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5822                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5823
5824         /* Send BD completion control block */
5825         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5826
5827         /* Receive list placement control block */
5828         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5829                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5830         printk("       RCVLPC_STATSCTRL[%08x]\n",
5831                tr32(RCVLPC_STATSCTRL));
5832
5833         /* Receive data and receive BD initiator control block */
5834         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5835                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5836
5837         /* Receive data completion control block */
5838         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5839                tr32(RCVDCC_MODE));
5840
5841         /* Receive BD initiator control block */
5842         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5843                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5844
5845         /* Receive BD completion control block */
5846         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5847                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5848
5849         /* Receive list selector control block */
5850         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5851                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5852
5853         /* Mbuf cluster free block */
5854         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5855                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5856
5857         /* Host coalescing control block */
5858         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5859                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5860         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5861                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5862                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5863         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5864                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5865                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5866         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5867                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5868         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5869                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5870
5871         /* Memory arbiter control block */
5872         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5873                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5874
5875         /* Buffer manager control block */
5876         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5877                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5878         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5879                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5880         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5881                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5882                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5883                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5884
5885         /* Read DMA control block */
5886         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5887                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5888
5889         /* Write DMA control block */
5890         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5891                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5892
5893         /* DMA completion block */
5894         printk("DEBUG: DMAC_MODE[%08x]\n",
5895                tr32(DMAC_MODE));
5896
5897         /* GRC block */
5898         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5899                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5900         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5901                tr32(GRC_LOCAL_CTRL));
5902
5903         /* TG3_BDINFOs */
5904         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5905                tr32(RCVDBDI_JUMBO_BD + 0x0),
5906                tr32(RCVDBDI_JUMBO_BD + 0x4),
5907                tr32(RCVDBDI_JUMBO_BD + 0x8),
5908                tr32(RCVDBDI_JUMBO_BD + 0xc));
5909         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5910                tr32(RCVDBDI_STD_BD + 0x0),
5911                tr32(RCVDBDI_STD_BD + 0x4),
5912                tr32(RCVDBDI_STD_BD + 0x8),
5913                tr32(RCVDBDI_STD_BD + 0xc));
5914         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5915                tr32(RCVDBDI_MINI_BD + 0x0),
5916                tr32(RCVDBDI_MINI_BD + 0x4),
5917                tr32(RCVDBDI_MINI_BD + 0x8),
5918                tr32(RCVDBDI_MINI_BD + 0xc));
5919
5920         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5921         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5922         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5923         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5924         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5925                val32, val32_2, val32_3, val32_4);
5926
5927         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5928         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5929         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5930         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5931         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5932                val32, val32_2, val32_3, val32_4);
5933
5934         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5935         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5936         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5937         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5938         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5939         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5940                val32, val32_2, val32_3, val32_4, val32_5);
5941
5942         /* SW status block */
5943         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5944                tp->hw_status->status,
5945                tp->hw_status->status_tag,
5946                tp->hw_status->rx_jumbo_consumer,
5947                tp->hw_status->rx_consumer,
5948                tp->hw_status->rx_mini_consumer,
5949                tp->hw_status->idx[0].rx_producer,
5950                tp->hw_status->idx[0].tx_consumer);
5951
5952         /* SW statistics block */
5953         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5954                ((u32 *)tp->hw_stats)[0],
5955                ((u32 *)tp->hw_stats)[1],
5956                ((u32 *)tp->hw_stats)[2],
5957                ((u32 *)tp->hw_stats)[3]);
5958
5959         /* Mailboxes */
5960         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5961                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5962                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5963                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5964                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5965
5966         /* NIC side send descriptors. */
5967         for (i = 0; i < 6; i++) {
5968                 unsigned long txd;
5969
5970                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5971                         + (i * sizeof(struct tg3_tx_buffer_desc));
5972                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5973                        i,
5974                        readl(txd + 0x0), readl(txd + 0x4),
5975                        readl(txd + 0x8), readl(txd + 0xc));
5976         }
5977
5978         /* NIC side RX descriptors. */
5979         for (i = 0; i < 6; i++) {
5980                 unsigned long rxd;
5981
5982                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5983                         + (i * sizeof(struct tg3_rx_buffer_desc));
5984                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5985                        i,
5986                        readl(rxd + 0x0), readl(rxd + 0x4),
5987                        readl(rxd + 0x8), readl(rxd + 0xc));
5988                 rxd += (4 * sizeof(u32));
5989                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5990                        i,
5991                        readl(rxd + 0x0), readl(rxd + 0x4),
5992                        readl(rxd + 0x8), readl(rxd + 0xc));
5993         }
5994
5995         for (i = 0; i < 6; i++) {
5996                 unsigned long rxd;
5997
5998                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5999                         + (i * sizeof(struct tg3_rx_buffer_desc));
6000                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6001                        i,
6002                        readl(rxd + 0x0), readl(rxd + 0x4),
6003                        readl(rxd + 0x8), readl(rxd + 0xc));
6004                 rxd += (4 * sizeof(u32));
6005                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6006                        i,
6007                        readl(rxd + 0x0), readl(rxd + 0x4),
6008                        readl(rxd + 0x8), readl(rxd + 0xc));
6009         }
6010 }
6011 #endif
6012
6013 static struct net_device_stats *tg3_get_stats(struct net_device *);
6014 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6015
6016 static int tg3_close(struct net_device *dev)
6017 {
6018         struct tg3 *tp = netdev_priv(dev);
6019
6020         netif_stop_queue(dev);
6021
6022         del_timer_sync(&tp->timer);
6023
6024         spin_lock_irq(&tp->lock);
6025         spin_lock(&tp->tx_lock);
6026 #if 0
6027         tg3_dump_state(tp);
6028 #endif
6029
6030         tg3_disable_ints(tp);
6031
6032         tg3_halt(tp);
6033         tg3_free_rings(tp);
6034         tp->tg3_flags &=
6035                 ~(TG3_FLAG_INIT_COMPLETE |
6036                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6037         netif_carrier_off(tp->dev);
6038
6039         spin_unlock(&tp->tx_lock);
6040         spin_unlock_irq(&tp->lock);
6041
6042         free_irq(dev->irq, dev);
6043
6044         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6045                sizeof(tp->net_stats_prev));
6046         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6047                sizeof(tp->estats_prev));
6048
6049         tg3_free_consistent(tp);
6050
6051         return 0;
6052 }
6053
6054 static inline unsigned long get_stat64(tg3_stat64_t *val)
6055 {
6056         unsigned long ret;
6057
6058 #if (BITS_PER_LONG == 32)
6059         ret = val->low;
6060 #else
6061         ret = ((u64)val->high << 32) | ((u64)val->low);
6062 #endif
6063         return ret;
6064 }
6065
6066 static unsigned long calc_crc_errors(struct tg3 *tp)
6067 {
6068         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6069
6070         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6071             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6072              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6073                 unsigned long flags;
6074                 u32 val;
6075
6076                 spin_lock_irqsave(&tp->lock, flags);
6077                 if (!tg3_readphy(tp, 0x1e, &val)) {
6078                         tg3_writephy(tp, 0x1e, val | 0x8000);
6079                         tg3_readphy(tp, 0x14, &val);
6080                 } else
6081                         val = 0;
6082                 spin_unlock_irqrestore(&tp->lock, flags);
6083
6084                 tp->phy_crc_errors += val;
6085
6086                 return tp->phy_crc_errors;
6087         }
6088
6089         return get_stat64(&hw_stats->rx_fcs_errors);
6090 }
6091
6092 #define ESTAT_ADD(member) \
6093         estats->member =        old_estats->member + \
6094                                 get_stat64(&hw_stats->member)
6095
6096 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6097 {
6098         struct tg3_ethtool_stats *estats = &tp->estats;
6099         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6100         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6101
6102         if (!hw_stats)
6103                 return old_estats;
6104
6105         ESTAT_ADD(rx_octets);
6106         ESTAT_ADD(rx_fragments);
6107         ESTAT_ADD(rx_ucast_packets);
6108         ESTAT_ADD(rx_mcast_packets);
6109         ESTAT_ADD(rx_bcast_packets);
6110         ESTAT_ADD(rx_fcs_errors);
6111         ESTAT_ADD(rx_align_errors);
6112         ESTAT_ADD(rx_xon_pause_rcvd);
6113         ESTAT_ADD(rx_xoff_pause_rcvd);
6114         ESTAT_ADD(rx_mac_ctrl_rcvd);
6115         ESTAT_ADD(rx_xoff_entered);
6116         ESTAT_ADD(rx_frame_too_long_errors);
6117         ESTAT_ADD(rx_jabbers);
6118         ESTAT_ADD(rx_undersize_packets);
6119         ESTAT_ADD(rx_in_length_errors);
6120         ESTAT_ADD(rx_out_length_errors);
6121         ESTAT_ADD(rx_64_or_less_octet_packets);
6122         ESTAT_ADD(rx_65_to_127_octet_packets);
6123         ESTAT_ADD(rx_128_to_255_octet_packets);
6124         ESTAT_ADD(rx_256_to_511_octet_packets);
6125         ESTAT_ADD(rx_512_to_1023_octet_packets);
6126         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6127         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6128         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6129         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6130         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6131
6132         ESTAT_ADD(tx_octets);
6133         ESTAT_ADD(tx_collisions);
6134         ESTAT_ADD(tx_xon_sent);
6135         ESTAT_ADD(tx_xoff_sent);
6136         ESTAT_ADD(tx_flow_control);
6137         ESTAT_ADD(tx_mac_errors);
6138         ESTAT_ADD(tx_single_collisions);
6139         ESTAT_ADD(tx_mult_collisions);
6140         ESTAT_ADD(tx_deferred);
6141         ESTAT_ADD(tx_excessive_collisions);
6142         ESTAT_ADD(tx_late_collisions);
6143         ESTAT_ADD(tx_collide_2times);
6144         ESTAT_ADD(tx_collide_3times);
6145         ESTAT_ADD(tx_collide_4times);
6146         ESTAT_ADD(tx_collide_5times);
6147         ESTAT_ADD(tx_collide_6times);
6148         ESTAT_ADD(tx_collide_7times);
6149         ESTAT_ADD(tx_collide_8times);
6150         ESTAT_ADD(tx_collide_9times);
6151         ESTAT_ADD(tx_collide_10times);
6152         ESTAT_ADD(tx_collide_11times);
6153         ESTAT_ADD(tx_collide_12times);
6154         ESTAT_ADD(tx_collide_13times);
6155         ESTAT_ADD(tx_collide_14times);
6156         ESTAT_ADD(tx_collide_15times);
6157         ESTAT_ADD(tx_ucast_packets);
6158         ESTAT_ADD(tx_mcast_packets);
6159         ESTAT_ADD(tx_bcast_packets);
6160         ESTAT_ADD(tx_carrier_sense_errors);
6161         ESTAT_ADD(tx_discards);
6162         ESTAT_ADD(tx_errors);
6163
6164         ESTAT_ADD(dma_writeq_full);
6165         ESTAT_ADD(dma_write_prioq_full);
6166         ESTAT_ADD(rxbds_empty);
6167         ESTAT_ADD(rx_discards);
6168         ESTAT_ADD(rx_errors);
6169         ESTAT_ADD(rx_threshold_hit);
6170
6171         ESTAT_ADD(dma_readq_full);
6172         ESTAT_ADD(dma_read_prioq_full);
6173         ESTAT_ADD(tx_comp_queue_full);
6174
6175         ESTAT_ADD(ring_set_send_prod_index);
6176         ESTAT_ADD(ring_status_update);
6177         ESTAT_ADD(nic_irqs);
6178         ESTAT_ADD(nic_avoided_irqs);
6179         ESTAT_ADD(nic_tx_threshold_hit);
6180
6181         return estats;
6182 }
6183
6184 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6185 {
6186         struct tg3 *tp = netdev_priv(dev);
6187         struct net_device_stats *stats = &tp->net_stats;
6188         struct net_device_stats *old_stats = &tp->net_stats_prev;
6189         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6190
6191         if (!hw_stats)
6192                 return old_stats;
6193
6194         stats->rx_packets = old_stats->rx_packets +
6195                 get_stat64(&hw_stats->rx_ucast_packets) +
6196                 get_stat64(&hw_stats->rx_mcast_packets) +
6197                 get_stat64(&hw_stats->rx_bcast_packets);
6198                 
6199         stats->tx_packets = old_stats->tx_packets +
6200                 get_stat64(&hw_stats->tx_ucast_packets) +
6201                 get_stat64(&hw_stats->tx_mcast_packets) +
6202                 get_stat64(&hw_stats->tx_bcast_packets);
6203
6204         stats->rx_bytes = old_stats->rx_bytes +
6205                 get_stat64(&hw_stats->rx_octets);
6206         stats->tx_bytes = old_stats->tx_bytes +
6207                 get_stat64(&hw_stats->tx_octets);
6208
6209         stats->rx_errors = old_stats->rx_errors +
6210                 get_stat64(&hw_stats->rx_errors) +
6211                 get_stat64(&hw_stats->rx_discards);
6212         stats->tx_errors = old_stats->tx_errors +
6213                 get_stat64(&hw_stats->tx_errors) +
6214                 get_stat64(&hw_stats->tx_mac_errors) +
6215                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6216                 get_stat64(&hw_stats->tx_discards);
6217
6218         stats->multicast = old_stats->multicast +
6219                 get_stat64(&hw_stats->rx_mcast_packets);
6220         stats->collisions = old_stats->collisions +
6221                 get_stat64(&hw_stats->tx_collisions);
6222
6223         stats->rx_length_errors = old_stats->rx_length_errors +
6224                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6225                 get_stat64(&hw_stats->rx_undersize_packets);
6226
6227         stats->rx_over_errors = old_stats->rx_over_errors +
6228                 get_stat64(&hw_stats->rxbds_empty);
6229         stats->rx_frame_errors = old_stats->rx_frame_errors +
6230                 get_stat64(&hw_stats->rx_align_errors);
6231         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6232                 get_stat64(&hw_stats->tx_discards);
6233         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6234                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6235
6236         stats->rx_crc_errors = old_stats->rx_crc_errors +
6237                 calc_crc_errors(tp);
6238
6239         return stats;
6240 }
6241
6242 static inline u32 calc_crc(unsigned char *buf, int len)
6243 {
6244         u32 reg;
6245         u32 tmp;
6246         int j, k;
6247
6248         reg = 0xffffffff;
6249
6250         for (j = 0; j < len; j++) {
6251                 reg ^= buf[j];
6252
6253                 for (k = 0; k < 8; k++) {
6254                         tmp = reg & 0x01;
6255
6256                         reg >>= 1;
6257
6258                         if (tmp) {
6259                                 reg ^= 0xedb88320;
6260                         }
6261                 }
6262         }
6263
6264         return ~reg;
6265 }
6266
6267 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6268 {
6269         /* accept or reject all multicast frames */
6270         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6271         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6272         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6273         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6274 }
6275
6276 static void __tg3_set_rx_mode(struct net_device *dev)
6277 {
6278         struct tg3 *tp = netdev_priv(dev);
6279         u32 rx_mode;
6280
6281         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6282                                   RX_MODE_KEEP_VLAN_TAG);
6283
6284         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6285          * flag clear.
6286          */
6287 #if TG3_VLAN_TAG_USED
6288         if (!tp->vlgrp &&
6289             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6290                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6291 #else
6292         /* By definition, VLAN is disabled always in this
6293          * case.
6294          */
6295         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6296                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6297 #endif
6298
6299         if (dev->flags & IFF_PROMISC) {
6300                 /* Promiscuous mode. */
6301                 rx_mode |= RX_MODE_PROMISC;
6302         } else if (dev->flags & IFF_ALLMULTI) {
6303                 /* Accept all multicast. */
6304                 tg3_set_multi (tp, 1);
6305         } else if (dev->mc_count < 1) {
6306                 /* Reject all multicast. */
6307                 tg3_set_multi (tp, 0);
6308         } else {
6309                 /* Accept one or more multicast(s). */
6310                 struct dev_mc_list *mclist;
6311                 unsigned int i;
6312                 u32 mc_filter[4] = { 0, };
6313                 u32 regidx;
6314                 u32 bit;
6315                 u32 crc;
6316
6317                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6318                      i++, mclist = mclist->next) {
6319
6320                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6321                         bit = ~crc & 0x7f;
6322                         regidx = (bit & 0x60) >> 5;
6323                         bit &= 0x1f;
6324                         mc_filter[regidx] |= (1 << bit);
6325                 }
6326
6327                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6328                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6329                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6330                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6331         }
6332
6333         if (rx_mode != tp->rx_mode) {
6334                 tp->rx_mode = rx_mode;
6335                 tw32_f(MAC_RX_MODE, rx_mode);
6336                 udelay(10);
6337         }
6338 }
6339
6340 static void tg3_set_rx_mode(struct net_device *dev)
6341 {
6342         struct tg3 *tp = netdev_priv(dev);
6343
6344         spin_lock_irq(&tp->lock);
6345         spin_lock(&tp->tx_lock);
6346         __tg3_set_rx_mode(dev);
6347         spin_unlock(&tp->tx_lock);
6348         spin_unlock_irq(&tp->lock);
6349 }
6350
6351 #define TG3_REGDUMP_LEN         (32 * 1024)
6352
6353 static int tg3_get_regs_len(struct net_device *dev)
6354 {
6355         return TG3_REGDUMP_LEN;
6356 }
6357
6358 static void tg3_get_regs(struct net_device *dev,
6359                 struct ethtool_regs *regs, void *_p)
6360 {
6361         u32 *p = _p;
6362         struct tg3 *tp = netdev_priv(dev);
6363         u8 *orig_p = _p;
6364         int i;
6365
6366         regs->version = 0;
6367
6368         memset(p, 0, TG3_REGDUMP_LEN);
6369
6370         spin_lock_irq(&tp->lock);
6371         spin_lock(&tp->tx_lock);
6372
6373 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6374 #define GET_REG32_LOOP(base,len)                \
6375 do {    p = (u32 *)(orig_p + (base));           \
6376         for (i = 0; i < len; i += 4)            \
6377                 __GET_REG32((base) + i);        \
6378 } while (0)
6379 #define GET_REG32_1(reg)                        \
6380 do {    p = (u32 *)(orig_p + (reg));            \
6381         __GET_REG32((reg));                     \
6382 } while (0)
6383
6384         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6385         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6386         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6387         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6388         GET_REG32_1(SNDDATAC_MODE);
6389         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6390         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6391         GET_REG32_1(SNDBDC_MODE);
6392         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6393         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6394         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6395         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6396         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6397         GET_REG32_1(RCVDCC_MODE);
6398         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6399         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6400         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6401         GET_REG32_1(MBFREE_MODE);
6402         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6403         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6404         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6405         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6406         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6407         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6408         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6409         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6410         GET_REG32_LOOP(FTQ_RESET, 0x120);
6411         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6412         GET_REG32_1(DMAC_MODE);
6413         GET_REG32_LOOP(GRC_MODE, 0x4c);
6414         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6415                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6416
6417 #undef __GET_REG32
6418 #undef GET_REG32_LOOP
6419 #undef GET_REG32_1
6420
6421         spin_unlock(&tp->tx_lock);
6422         spin_unlock_irq(&tp->lock);
6423 }
6424
6425 static int tg3_get_eeprom_len(struct net_device *dev)
6426 {
6427         struct tg3 *tp = netdev_priv(dev);
6428
6429         return tp->nvram_size;
6430 }
6431
6432 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6433
6434 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6435 {
6436         struct tg3 *tp = netdev_priv(dev);
6437         int ret;
6438         u8  *pd;
6439         u32 i, offset, len, val, b_offset, b_count;
6440
6441         offset = eeprom->offset;
6442         len = eeprom->len;
6443         eeprom->len = 0;
6444
6445         eeprom->magic = TG3_EEPROM_MAGIC;
6446
6447         if (offset & 3) {
6448                 /* adjustments to start on required 4 byte boundary */
6449                 b_offset = offset & 3;
6450                 b_count = 4 - b_offset;
6451                 if (b_count > len) {
6452                         /* i.e. offset=1 len=2 */
6453                         b_count = len;
6454                 }
6455                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6456                 if (ret)
6457                         return ret;
6458                 val = cpu_to_le32(val);
6459                 memcpy(data, ((char*)&val) + b_offset, b_count);
6460                 len -= b_count;
6461                 offset += b_count;
6462                 eeprom->len += b_count;
6463         }
6464
6465         /* read bytes upto the last 4 byte boundary */
6466         pd = &data[eeprom->len];
6467         for (i = 0; i < (len - (len & 3)); i += 4) {
6468                 ret = tg3_nvram_read(tp, offset + i, &val);
6469                 if (ret) {
6470                         eeprom->len += i;
6471                         return ret;
6472                 }
6473                 val = cpu_to_le32(val);
6474                 memcpy(pd + i, &val, 4);
6475         }
6476         eeprom->len += i;
6477
6478         if (len & 3) {
6479                 /* read last bytes not ending on 4 byte boundary */
6480                 pd = &data[eeprom->len];
6481                 b_count = len & 3;
6482                 b_offset = offset + len - b_count;
6483                 ret = tg3_nvram_read(tp, b_offset, &val);
6484                 if (ret)
6485                         return ret;
6486                 val = cpu_to_le32(val);
6487                 memcpy(pd, ((char*)&val), b_count);
6488                 eeprom->len += b_count;
6489         }
6490         return 0;
6491 }
6492
6493 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6494
6495 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6496 {
6497         struct tg3 *tp = netdev_priv(dev);
6498         int ret;
6499         u32 offset, len, b_offset, odd_len, start, end;
6500         u8 *buf;
6501
6502         if (eeprom->magic != TG3_EEPROM_MAGIC)
6503                 return -EINVAL;
6504
6505         offset = eeprom->offset;
6506         len = eeprom->len;
6507
6508         if ((b_offset = (offset & 3))) {
6509                 /* adjustments to start on required 4 byte boundary */
6510                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6511                 if (ret)
6512                         return ret;
6513                 start = cpu_to_le32(start);
6514                 len += b_offset;
6515                 offset &= ~3;
6516         }
6517
6518         odd_len = 0;
6519         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6520                 /* adjustments to end on required 4 byte boundary */
6521                 odd_len = 1;
6522                 len = (len + 3) & ~3;
6523                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6524                 if (ret)
6525                         return ret;
6526                 end = cpu_to_le32(end);
6527         }
6528
6529         buf = data;
6530         if (b_offset || odd_len) {
6531                 buf = kmalloc(len, GFP_KERNEL);
6532                 if (buf == 0)
6533                         return -ENOMEM;
6534                 if (b_offset)
6535                         memcpy(buf, &start, 4);
6536                 if (odd_len)
6537                         memcpy(buf+len-4, &end, 4);
6538                 memcpy(buf + b_offset, data, eeprom->len);
6539         }
6540
6541         ret = tg3_nvram_write_block(tp, offset, len, buf);
6542
6543         if (buf != data)
6544                 kfree(buf);
6545
6546         return ret;
6547 }
6548
6549 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6550 {
6551         struct tg3 *tp = netdev_priv(dev);
6552   
6553         cmd->supported = (SUPPORTED_Autoneg);
6554
6555         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6556                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6557                                    SUPPORTED_1000baseT_Full);
6558
6559         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6560                 cmd->supported |= (SUPPORTED_100baseT_Half |
6561                                   SUPPORTED_100baseT_Full |
6562                                   SUPPORTED_10baseT_Half |
6563                                   SUPPORTED_10baseT_Full |
6564                                   SUPPORTED_MII);
6565         else
6566                 cmd->supported |= SUPPORTED_FIBRE;
6567   
6568         cmd->advertising = tp->link_config.advertising;
6569         if (netif_running(dev)) {
6570                 cmd->speed = tp->link_config.active_speed;
6571                 cmd->duplex = tp->link_config.active_duplex;
6572         }
6573         cmd->port = 0;
6574         cmd->phy_address = PHY_ADDR;
6575         cmd->transceiver = 0;
6576         cmd->autoneg = tp->link_config.autoneg;
6577         cmd->maxtxpkt = 0;
6578         cmd->maxrxpkt = 0;
6579         return 0;
6580 }
6581   
6582 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6583 {
6584         struct tg3 *tp = netdev_priv(dev);
6585   
6586         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6587                 /* These are the only valid advertisement bits allowed.  */
6588                 if (cmd->autoneg == AUTONEG_ENABLE &&
6589                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6590                                           ADVERTISED_1000baseT_Full |
6591                                           ADVERTISED_Autoneg |
6592                                           ADVERTISED_FIBRE)))
6593                         return -EINVAL;
6594         }
6595
6596         spin_lock_irq(&tp->lock);
6597         spin_lock(&tp->tx_lock);
6598
6599         tp->link_config.autoneg = cmd->autoneg;
6600         if (cmd->autoneg == AUTONEG_ENABLE) {
6601                 tp->link_config.advertising = cmd->advertising;
6602                 tp->link_config.speed = SPEED_INVALID;
6603                 tp->link_config.duplex = DUPLEX_INVALID;
6604         } else {
6605                 tp->link_config.advertising = 0;
6606                 tp->link_config.speed = cmd->speed;
6607                 tp->link_config.duplex = cmd->duplex;
6608         }
6609   
6610         if (netif_running(dev))
6611                 tg3_setup_phy(tp, 1);
6612
6613         spin_unlock(&tp->tx_lock);
6614         spin_unlock_irq(&tp->lock);
6615   
6616         return 0;
6617 }
6618   
6619 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6620 {
6621         struct tg3 *tp = netdev_priv(dev);
6622   
6623         strcpy(info->driver, DRV_MODULE_NAME);
6624         strcpy(info->version, DRV_MODULE_VERSION);
6625         strcpy(info->bus_info, pci_name(tp->pdev));
6626 }
6627   
6628 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6629 {
6630         struct tg3 *tp = netdev_priv(dev);
6631   
6632         wol->supported = WAKE_MAGIC;
6633         wol->wolopts = 0;
6634         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6635                 wol->wolopts = WAKE_MAGIC;
6636         memset(&wol->sopass, 0, sizeof(wol->sopass));
6637 }
6638   
6639 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6640 {
6641         struct tg3 *tp = netdev_priv(dev);
6642   
6643         if (wol->wolopts & ~WAKE_MAGIC)
6644                 return -EINVAL;
6645         if ((wol->wolopts & WAKE_MAGIC) &&
6646             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6647             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6648                 return -EINVAL;
6649   
6650         spin_lock_irq(&tp->lock);
6651         if (wol->wolopts & WAKE_MAGIC)
6652                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6653         else
6654                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6655         spin_unlock_irq(&tp->lock);
6656   
6657         return 0;
6658 }
6659   
6660 static u32 tg3_get_msglevel(struct net_device *dev)
6661 {
6662         struct tg3 *tp = netdev_priv(dev);
6663         return tp->msg_enable;
6664 }
6665   
6666 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6667 {
6668         struct tg3 *tp = netdev_priv(dev);
6669         tp->msg_enable = value;
6670 }
6671   
6672 #if TG3_TSO_SUPPORT != 0
6673 static int tg3_set_tso(struct net_device *dev, u32 value)
6674 {
6675         struct tg3 *tp = netdev_priv(dev);
6676
6677         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6678                 if (value)
6679                         return -EINVAL;
6680                 return 0;
6681         }
6682         return ethtool_op_set_tso(dev, value);
6683 }
6684 #endif
6685   
6686 static int tg3_nway_reset(struct net_device *dev)
6687 {
6688         struct tg3 *tp = netdev_priv(dev);
6689         u32 bmcr;
6690         int r;
6691   
6692         if (!netif_running(dev))
6693                 return -EAGAIN;
6694
6695         spin_lock_irq(&tp->lock);
6696         r = -EINVAL;
6697         tg3_readphy(tp, MII_BMCR, &bmcr);
6698         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6699             (bmcr & BMCR_ANENABLE)) {
6700                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6701                 r = 0;
6702         }
6703         spin_unlock_irq(&tp->lock);
6704   
6705         return r;
6706 }
6707   
6708 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6709 {
6710         struct tg3 *tp = netdev_priv(dev);
6711   
6712         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6713         ering->rx_mini_max_pending = 0;
6714         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6715
6716         ering->rx_pending = tp->rx_pending;
6717         ering->rx_mini_pending = 0;
6718         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6719         ering->tx_pending = tp->tx_pending;
6720 }
6721   
6722 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6723 {
6724         struct tg3 *tp = netdev_priv(dev);
6725   
6726         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6727             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6728             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6729                 return -EINVAL;
6730   
6731         if (netif_running(dev))
6732                 tg3_netif_stop(tp);
6733
6734         spin_lock_irq(&tp->lock);
6735         spin_lock(&tp->tx_lock);
6736   
6737         tp->rx_pending = ering->rx_pending;
6738
6739         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6740             tp->rx_pending > 63)
6741                 tp->rx_pending = 63;
6742         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6743         tp->tx_pending = ering->tx_pending;
6744
6745         if (netif_running(dev)) {
6746                 tg3_halt(tp);
6747                 tg3_init_hw(tp);
6748                 tg3_netif_start(tp);
6749         }
6750
6751         spin_unlock(&tp->tx_lock);
6752         spin_unlock_irq(&tp->lock);
6753   
6754         return 0;
6755 }
6756   
6757 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6758 {
6759         struct tg3 *tp = netdev_priv(dev);
6760   
6761         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6762         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6763         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6764 }
6765   
6766 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6767 {
6768         struct tg3 *tp = netdev_priv(dev);
6769   
6770         if (netif_running(dev))
6771                 tg3_netif_stop(tp);
6772
6773         spin_lock_irq(&tp->lock);
6774         spin_lock(&tp->tx_lock);
6775         if (epause->autoneg)
6776                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6777         else
6778                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6779         if (epause->rx_pause)
6780                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6781         else
6782                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6783         if (epause->tx_pause)
6784                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6785         else
6786                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6787
6788         if (netif_running(dev)) {
6789                 tg3_halt(tp);
6790                 tg3_init_hw(tp);
6791                 tg3_netif_start(tp);
6792         }
6793         spin_unlock(&tp->tx_lock);
6794         spin_unlock_irq(&tp->lock);
6795   
6796         return 0;
6797 }
6798   
6799 static u32 tg3_get_rx_csum(struct net_device *dev)
6800 {
6801         struct tg3 *tp = netdev_priv(dev);
6802         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6803 }
6804   
6805 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6806 {
6807         struct tg3 *tp = netdev_priv(dev);
6808   
6809         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6810                 if (data != 0)
6811                         return -EINVAL;
6812                 return 0;
6813         }
6814   
6815         spin_lock_irq(&tp->lock);
6816         if (data)
6817                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6818         else
6819                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6820         spin_unlock_irq(&tp->lock);
6821   
6822         return 0;
6823 }
6824   
6825 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6826 {
6827         struct tg3 *tp = netdev_priv(dev);
6828   
6829         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6830                 if (data != 0)
6831                         return -EINVAL;
6832                 return 0;
6833         }
6834   
6835         if (data)
6836                 dev->features |= NETIF_F_IP_CSUM;
6837         else
6838                 dev->features &= ~NETIF_F_IP_CSUM;
6839
6840         return 0;
6841 }
6842
6843 static int tg3_get_stats_count (struct net_device *dev)
6844 {
6845         return TG3_NUM_STATS;
6846 }
6847
6848 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6849 {
6850         switch (stringset) {
6851         case ETH_SS_STATS:
6852                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6853                 break;
6854         default:
6855                 WARN_ON(1);     /* we need a WARN() */
6856                 break;
6857         }
6858 }
6859
6860 static void tg3_get_ethtool_stats (struct net_device *dev,
6861                                    struct ethtool_stats *estats, u64 *tmp_stats)
6862 {
6863         struct tg3 *tp = netdev_priv(dev);
6864         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6865 }
6866
6867 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6868 {
6869         struct mii_ioctl_data *data = if_mii(ifr);
6870         struct tg3 *tp = netdev_priv(dev);
6871         int err;
6872
6873         switch(cmd) {
6874         case SIOCGMIIPHY:
6875                 data->phy_id = PHY_ADDR;
6876
6877                 /* fallthru */
6878         case SIOCGMIIREG: {
6879                 u32 mii_regval;
6880
6881                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6882                         break;                  /* We have no PHY */
6883
6884                 spin_lock_irq(&tp->lock);
6885                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6886                 spin_unlock_irq(&tp->lock);
6887
6888                 data->val_out = mii_regval;
6889
6890                 return err;
6891         }
6892
6893         case SIOCSMIIREG:
6894                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6895                         break;                  /* We have no PHY */
6896
6897                 if (!capable(CAP_NET_ADMIN))
6898                         return -EPERM;
6899
6900                 spin_lock_irq(&tp->lock);
6901                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6902                 spin_unlock_irq(&tp->lock);
6903
6904                 return err;
6905
6906         default:
6907                 /* do nothing */
6908                 break;
6909         }
6910         return -EOPNOTSUPP;
6911 }
6912
6913 #if TG3_VLAN_TAG_USED
6914 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6915 {
6916         struct tg3 *tp = netdev_priv(dev);
6917
6918         spin_lock_irq(&tp->lock);
6919         spin_lock(&tp->tx_lock);
6920
6921         tp->vlgrp = grp;
6922
6923         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6924         __tg3_set_rx_mode(dev);
6925
6926         spin_unlock(&tp->tx_lock);
6927         spin_unlock_irq(&tp->lock);
6928 }
6929
6930 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6931 {
6932         struct tg3 *tp = netdev_priv(dev);
6933
6934         spin_lock_irq(&tp->lock);
6935         spin_lock(&tp->tx_lock);
6936         if (tp->vlgrp)
6937                 tp->vlgrp->vlan_devices[vid] = NULL;
6938         spin_unlock(&tp->tx_lock);
6939         spin_unlock_irq(&tp->lock);
6940 }
6941 #endif
6942
6943 static struct ethtool_ops tg3_ethtool_ops = {
6944         .get_settings           = tg3_get_settings,
6945         .set_settings           = tg3_set_settings,
6946         .get_drvinfo            = tg3_get_drvinfo,
6947         .get_regs_len           = tg3_get_regs_len,
6948         .get_regs               = tg3_get_regs,
6949         .get_wol                = tg3_get_wol,
6950         .set_wol                = tg3_set_wol,
6951         .get_msglevel           = tg3_get_msglevel,
6952         .set_msglevel           = tg3_set_msglevel,
6953         .nway_reset             = tg3_nway_reset,
6954         .get_link               = ethtool_op_get_link,
6955         .get_eeprom_len         = tg3_get_eeprom_len,
6956         .get_eeprom             = tg3_get_eeprom,
6957         .set_eeprom             = tg3_set_eeprom,
6958         .get_ringparam          = tg3_get_ringparam,
6959         .set_ringparam          = tg3_set_ringparam,
6960         .get_pauseparam         = tg3_get_pauseparam,
6961         .set_pauseparam         = tg3_set_pauseparam,
6962         .get_rx_csum            = tg3_get_rx_csum,
6963         .set_rx_csum            = tg3_set_rx_csum,
6964         .get_tx_csum            = ethtool_op_get_tx_csum,
6965         .set_tx_csum            = tg3_set_tx_csum,
6966         .get_sg                 = ethtool_op_get_sg,
6967         .set_sg                 = ethtool_op_set_sg,
6968 #if TG3_TSO_SUPPORT != 0
6969         .get_tso                = ethtool_op_get_tso,
6970         .set_tso                = tg3_set_tso,
6971 #endif
6972         .get_strings            = tg3_get_strings,
6973         .get_stats_count        = tg3_get_stats_count,
6974         .get_ethtool_stats      = tg3_get_ethtool_stats,
6975 };
6976
6977 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6978 {
6979         u32 cursize, val;
6980
6981         tp->nvram_size = EEPROM_CHIP_SIZE;
6982
6983         if (tg3_nvram_read(tp, 0, &val) != 0)
6984                 return;
6985
6986         if (swab32(val) != TG3_EEPROM_MAGIC)
6987                 return;
6988
6989         /*
6990          * Size the chip by reading offsets at increasing powers of two.
6991          * When we encounter our validation signature, we know the addressing
6992          * has wrapped around, and thus have our chip size.
6993          */
6994         cursize = 0x800;
6995
6996         while (cursize < tp->nvram_size) {
6997                 if (tg3_nvram_read(tp, cursize, &val) != 0)
6998                         return;
6999
7000                 if (swab32(val) == TG3_EEPROM_MAGIC)
7001                         break;
7002
7003                 cursize <<= 1;
7004         }
7005
7006         tp->nvram_size = cursize;
7007 }
7008                 
7009 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7010 {
7011         u32 val;
7012
7013         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7014                 if (val != 0) {
7015                         tp->nvram_size = (val >> 16) * 1024;
7016                         return;
7017                 }
7018         }
7019         tp->nvram_size = 0x20000;
7020 }
7021
7022 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7023 {
7024         u32 nvcfg1;
7025
7026         nvcfg1 = tr32(NVRAM_CFG1);
7027         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7028                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7029         }
7030         else {
7031                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7032                 tw32(NVRAM_CFG1, nvcfg1);
7033         }
7034
7035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7037                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7038                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7039                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7040                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7041                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7042                                 break;
7043                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7044                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7045                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7046                                 break;
7047                         case FLASH_VENDOR_ATMEL_EEPROM:
7048                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7049                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7050                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7051                                 break;
7052                         case FLASH_VENDOR_ST:
7053                                 tp->nvram_jedecnum = JEDEC_ST;
7054                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7055                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7056                                 break;
7057                         case FLASH_VENDOR_SAIFUN:
7058                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7059                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7060                                 break;
7061                         case FLASH_VENDOR_SST_SMALL:
7062                         case FLASH_VENDOR_SST_LARGE:
7063                                 tp->nvram_jedecnum = JEDEC_SST;
7064                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7065                                 break;
7066                 }
7067         }
7068         else {
7069                 tp->nvram_jedecnum = JEDEC_ATMEL;
7070                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7071                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7072         }
7073 }
7074
7075 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7076 static void __devinit tg3_nvram_init(struct tg3 *tp)
7077 {
7078         int j;
7079
7080         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7081                 return;
7082
7083         tw32_f(GRC_EEPROM_ADDR,
7084              (EEPROM_ADDR_FSM_RESET |
7085               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7086                EEPROM_ADDR_CLKPERD_SHIFT)));
7087
7088         /* XXX schedule_timeout() ... */
7089         for (j = 0; j < 100; j++)
7090                 udelay(10);
7091
7092         /* Enable seeprom accesses. */
7093         tw32_f(GRC_LOCAL_CTRL,
7094              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7095         udelay(100);
7096
7097         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7098             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7099                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7100
7101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7102                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7103                         u32 nvaccess = tr32(NVRAM_ACCESS);
7104
7105                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7106                 }
7107
7108                 tg3_get_nvram_info(tp);
7109                 tg3_get_nvram_size(tp);
7110
7111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7113                         u32 nvaccess = tr32(NVRAM_ACCESS);
7114
7115                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7116                 }
7117
7118         } else {
7119                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7120
7121                 tg3_get_eeprom_size(tp);
7122         }
7123 }
7124
7125 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7126                                         u32 offset, u32 *val)
7127 {
7128         u32 tmp;
7129         int i;
7130
7131         if (offset > EEPROM_ADDR_ADDR_MASK ||
7132             (offset % 4) != 0)
7133                 return -EINVAL;
7134
7135         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7136                                         EEPROM_ADDR_DEVID_MASK |
7137                                         EEPROM_ADDR_READ);
7138         tw32(GRC_EEPROM_ADDR,
7139              tmp |
7140              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7141              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7142               EEPROM_ADDR_ADDR_MASK) |
7143              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7144
7145         for (i = 0; i < 10000; i++) {
7146                 tmp = tr32(GRC_EEPROM_ADDR);
7147
7148                 if (tmp & EEPROM_ADDR_COMPLETE)
7149                         break;
7150                 udelay(100);
7151         }
7152         if (!(tmp & EEPROM_ADDR_COMPLETE))
7153                 return -EBUSY;
7154
7155         *val = tr32(GRC_EEPROM_DATA);
7156         return 0;
7157 }
7158
7159 #define NVRAM_CMD_TIMEOUT 10000
7160
7161 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7162 {
7163         int i;
7164
7165         tw32(NVRAM_CMD, nvram_cmd);
7166         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7167                 udelay(10);
7168                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7169                         udelay(10);
7170                         break;
7171                 }
7172         }
7173         if (i == NVRAM_CMD_TIMEOUT) {
7174                 return -EBUSY;
7175         }
7176         return 0;
7177 }
7178
7179 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7180 {
7181         int ret;
7182
7183         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7184                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7185                 return -EINVAL;
7186         }
7187
7188         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7189                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7190
7191         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7192                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7193                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7194
7195                 offset = ((offset / tp->nvram_pagesize) <<
7196                           ATMEL_AT45DB0X1B_PAGE_POS) +
7197                         (offset % tp->nvram_pagesize);
7198         }
7199
7200         if (offset > NVRAM_ADDR_MSK)
7201                 return -EINVAL;
7202
7203         tg3_nvram_lock(tp);
7204
7205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7206             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7207                 u32 nvaccess = tr32(NVRAM_ACCESS);
7208
7209                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7210         }
7211
7212         tw32(NVRAM_ADDR, offset);
7213         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7214                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7215
7216         if (ret == 0)
7217                 *val = swab32(tr32(NVRAM_RDDATA));
7218
7219         tg3_nvram_unlock(tp);
7220
7221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7223                 u32 nvaccess = tr32(NVRAM_ACCESS);
7224
7225                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7226         }
7227
7228         return ret;
7229 }
7230
7231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7232                                     u32 offset, u32 len, u8 *buf)
7233 {
7234         int i, j, rc = 0;
7235         u32 val;
7236
7237         for (i = 0; i < len; i += 4) {
7238                 u32 addr, data;
7239
7240                 addr = offset + i;
7241
7242                 memcpy(&data, buf + i, 4);
7243
7244                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7245
7246                 val = tr32(GRC_EEPROM_ADDR);
7247                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7248
7249                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7250                         EEPROM_ADDR_READ);
7251                 tw32(GRC_EEPROM_ADDR, val |
7252                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7253                         (addr & EEPROM_ADDR_ADDR_MASK) |
7254                         EEPROM_ADDR_START |
7255                         EEPROM_ADDR_WRITE);
7256                 
7257                 for (j = 0; j < 10000; j++) {
7258                         val = tr32(GRC_EEPROM_ADDR);
7259
7260                         if (val & EEPROM_ADDR_COMPLETE)
7261                                 break;
7262                         udelay(100);
7263                 }
7264                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7265                         rc = -EBUSY;
7266                         break;
7267                 }
7268         }
7269
7270         return rc;
7271 }
7272
7273 /* offset and length are dword aligned */
7274 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7275                 u8 *buf)
7276 {
7277         int ret = 0;
7278         u32 pagesize = tp->nvram_pagesize;
7279         u32 pagemask = pagesize - 1;
7280         u32 nvram_cmd;
7281         u8 *tmp;
7282
7283         tmp = kmalloc(pagesize, GFP_KERNEL);
7284         if (tmp == NULL)
7285                 return -ENOMEM;
7286
7287         while (len) {
7288                 int j;
7289                 u32 phy_addr, page_off, size, nvaccess;
7290
7291                 phy_addr = offset & ~pagemask;
7292         
7293                 for (j = 0; j < pagesize; j += 4) {
7294                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7295                                                 (u32 *) (tmp + j))))
7296                                 break;
7297                 }
7298                 if (ret)
7299                         break;
7300
7301                 page_off = offset & pagemask;
7302                 size = pagesize;
7303                 if (len < size)
7304                         size = len;
7305
7306                 len -= size;
7307
7308                 memcpy(tmp + page_off, buf, size);
7309
7310                 offset = offset + (pagesize - page_off);
7311
7312                 nvaccess = tr32(NVRAM_ACCESS);
7313                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7314
7315                 /*
7316                  * Before we can erase the flash page, we need
7317                  * to issue a special "write enable" command.
7318                  */
7319                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7320
7321                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7322                         break;
7323
7324                 /* Erase the target page */
7325                 tw32(NVRAM_ADDR, phy_addr);
7326
7327                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7328                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7329
7330                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7331                         break;
7332
7333                 /* Issue another write enable to start the write. */
7334                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7335
7336                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7337                         break;
7338
7339                 for (j = 0; j < pagesize; j += 4) {
7340                         u32 data;
7341
7342                         data = *((u32 *) (tmp + j));
7343                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7344
7345                         tw32(NVRAM_ADDR, phy_addr + j);
7346
7347                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7348                                 NVRAM_CMD_WR;
7349
7350                         if (j == 0)
7351                                 nvram_cmd |= NVRAM_CMD_FIRST;
7352                         else if (j == (pagesize - 4))
7353                                 nvram_cmd |= NVRAM_CMD_LAST;
7354
7355                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7356                                 break;
7357                 }
7358                 if (ret)
7359                         break;
7360         }
7361
7362         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7363         tg3_nvram_exec_cmd(tp, nvram_cmd);
7364
7365         kfree(tmp);
7366
7367         return ret;
7368 }
7369
7370 /* offset and length are dword aligned */
7371 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7372                 u8 *buf)
7373 {
7374         int i, ret = 0;
7375
7376         for (i = 0; i < len; i += 4, offset += 4) {
7377                 u32 data, page_off, phy_addr, nvram_cmd;
7378
7379                 memcpy(&data, buf + i, 4);
7380                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7381
7382                 page_off = offset % tp->nvram_pagesize;
7383
7384                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7385                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7386
7387                         phy_addr = ((offset / tp->nvram_pagesize) <<
7388                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7389                 }
7390                 else {
7391                         phy_addr = offset;
7392                 }
7393
7394                 tw32(NVRAM_ADDR, phy_addr);
7395
7396                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7397
7398                 if ((page_off == 0) || (i == 0))
7399                         nvram_cmd |= NVRAM_CMD_FIRST;
7400                 else if (page_off == (tp->nvram_pagesize - 4))
7401                         nvram_cmd |= NVRAM_CMD_LAST;
7402
7403                 if (i == (len - 4))
7404                         nvram_cmd |= NVRAM_CMD_LAST;
7405
7406                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7407                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7408
7409                         if ((ret = tg3_nvram_exec_cmd(tp,
7410                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7411                                 NVRAM_CMD_DONE)))
7412
7413                                 break;
7414                 }
7415                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7416                         /* We always do complete word writes to eeprom. */
7417                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7418                 }
7419
7420                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7421                         break;
7422         }
7423         return ret;
7424 }
7425
7426 /* offset and length are dword aligned */
7427 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7428 {
7429         int ret;
7430
7431         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7432                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7433                 return -EINVAL;
7434         }
7435
7436         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7437                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7438                        GRC_LCLCTRL_GPIO_OE1);
7439                 udelay(40);
7440         }
7441
7442         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7443                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7444         }
7445         else {
7446                 u32 grc_mode;
7447
7448                 tg3_nvram_lock(tp);
7449
7450                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7451                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7452                         u32 nvaccess = tr32(NVRAM_ACCESS);
7453
7454                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7455
7456                         tw32(NVRAM_WRITE1, 0x406);
7457                 }
7458
7459                 grc_mode = tr32(GRC_MODE);
7460                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7461
7462                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7463                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7464
7465                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7466                                 buf);
7467                 }
7468                 else {
7469                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7470                                 buf);
7471                 }
7472
7473                 grc_mode = tr32(GRC_MODE);
7474                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7475
7476                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7477                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7478                         u32 nvaccess = tr32(NVRAM_ACCESS);
7479
7480                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7481                 }
7482                 tg3_nvram_unlock(tp);
7483         }
7484
7485         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7486                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7487                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7488                 udelay(40);
7489         }
7490
7491         return ret;
7492 }
7493
7494 struct subsys_tbl_ent {
7495         u16 subsys_vendor, subsys_devid;
7496         u32 phy_id;
7497 };
7498
7499 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7500         /* Broadcom boards. */
7501         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7502         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7503         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7504         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7505         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7506         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7507         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7508         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7509         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7510         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7511         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7512
7513         /* 3com boards. */
7514         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7515         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7516         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7517         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7518         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7519
7520         /* DELL boards. */
7521         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7522         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7523         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7524         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7525
7526         /* Compaq boards. */
7527         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7528         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7529         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7530         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7531         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7532
7533         /* IBM boards. */
7534         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7535 };
7536
7537 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7538 {
7539         int i;
7540
7541         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7542                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7543                      tp->pdev->subsystem_vendor) &&
7544                     (subsys_id_to_phy_id[i].subsys_devid ==
7545                      tp->pdev->subsystem_device))
7546                         return &subsys_id_to_phy_id[i];
7547         }
7548         return NULL;
7549 }
7550
7551 static int __devinit tg3_phy_probe(struct tg3 *tp)
7552 {
7553         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7554         u32 hw_phy_id, hw_phy_id_masked;
7555         u32 val;
7556         int eeprom_signature_found, eeprom_phy_serdes, err;
7557
7558         tp->phy_id = PHY_ID_INVALID;
7559         eeprom_phy_id = PHY_ID_INVALID;
7560         eeprom_phy_serdes = 0;
7561         eeprom_signature_found = 0;
7562         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7563         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7564                 u32 nic_cfg, led_cfg;
7565                 u32 nic_phy_id, ver, cfg2 = 0;
7566
7567                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7568                 tp->nic_sram_data_cfg = nic_cfg;
7569
7570                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7571                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7572                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7573                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7574                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7575                     (ver > 0) && (ver < 0x100))
7576                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7577
7578                 eeprom_signature_found = 1;
7579
7580                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7581                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7582                         eeprom_phy_serdes = 1;
7583
7584                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7585                 if (nic_phy_id != 0) {
7586                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7587                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7588
7589                         eeprom_phy_id  = (id1 >> 16) << 10;
7590                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7591                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7592                 } else
7593                         eeprom_phy_id = 0;
7594
7595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7596                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
7597                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7598                                     SHASTA_EXT_LED_MODE_MASK);
7599                 } else
7600                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7601
7602                 switch (led_cfg) {
7603                 default:
7604                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7605                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7606                         break;
7607
7608                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7609                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7610                         break;
7611
7612                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7613                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7614                         break;
7615
7616                 case SHASTA_EXT_LED_SHARED:
7617                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7618                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7619                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7620                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7621                                                  LED_CTRL_MODE_PHY_2);
7622                         break;
7623
7624                 case SHASTA_EXT_LED_MAC:
7625                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7626                         break;
7627
7628                 case SHASTA_EXT_LED_COMBO:
7629                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7630                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7631                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7632                                                  LED_CTRL_MODE_PHY_2);
7633                         break;
7634
7635                 };
7636
7637                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7638                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7639                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7640                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7641
7642                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7643                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7644                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7645                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7646
7647                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7648                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7649                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7650                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7651                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7652                 }
7653                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7654                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7655
7656                 if (cfg2 & (1 << 17))
7657                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7658
7659                 /* serdes signal pre-emphasis in register 0x590 set by */
7660                 /* bootcode if bit 18 is set */
7661                 if (cfg2 & (1 << 18))
7662                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7663         }
7664
7665         /* Reading the PHY ID register can conflict with ASF
7666          * firwmare access to the PHY hardware.
7667          */
7668         err = 0;
7669         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7670                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7671         } else {
7672                 /* Now read the physical PHY_ID from the chip and verify
7673                  * that it is sane.  If it doesn't look good, we fall back
7674                  * to either the hard-coded table based PHY_ID and failing
7675                  * that the value found in the eeprom area.
7676                  */
7677                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7678                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7679
7680                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7681                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7682                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7683
7684                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7685         }
7686
7687         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7688                 tp->phy_id = hw_phy_id;
7689                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7690                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7691         } else {
7692                 if (eeprom_signature_found) {
7693                         tp->phy_id = eeprom_phy_id;
7694                         if (eeprom_phy_serdes)
7695                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7696                 } else {
7697                         struct subsys_tbl_ent *p;
7698
7699                         /* No eeprom signature?  Try the hardcoded
7700                          * subsys device table.
7701                          */
7702                         p = lookup_by_subsys(tp);
7703                         if (!p)
7704                                 return -ENODEV;
7705
7706                         tp->phy_id = p->phy_id;
7707                         if (!tp->phy_id ||
7708                             tp->phy_id == PHY_ID_BCM8002)
7709                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7710                 }
7711         }
7712
7713         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7714             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7715                 u32 bmsr, adv_reg, tg3_ctrl;
7716
7717                 tg3_readphy(tp, MII_BMSR, &bmsr);
7718                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7719                     (bmsr & BMSR_LSTATUS))
7720                         goto skip_phy_reset;
7721                     
7722                 err = tg3_phy_reset(tp);
7723                 if (err)
7724                         return err;
7725
7726                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7727                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7728                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7729                 tg3_ctrl = 0;
7730                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7731                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7732                                     MII_TG3_CTRL_ADV_1000_FULL);
7733                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7734                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7735                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7736                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7737                 }
7738
7739                 if (!tg3_copper_is_advertising_all(tp)) {
7740                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7741
7742                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7743                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7744
7745                         tg3_writephy(tp, MII_BMCR,
7746                                      BMCR_ANENABLE | BMCR_ANRESTART);
7747                 }
7748                 tg3_phy_set_wirespeed(tp);
7749
7750                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7751                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7752                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7753         }
7754
7755 skip_phy_reset:
7756         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7757                 err = tg3_init_5401phy_dsp(tp);
7758                 if (err)
7759                         return err;
7760         }
7761
7762         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7763                 err = tg3_init_5401phy_dsp(tp);
7764         }
7765
7766         if (!eeprom_signature_found)
7767                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7768
7769         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7770                 tp->link_config.advertising =
7771                         (ADVERTISED_1000baseT_Half |
7772                          ADVERTISED_1000baseT_Full |
7773                          ADVERTISED_Autoneg |
7774                          ADVERTISED_FIBRE);
7775         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7776                 tp->link_config.advertising &=
7777                         ~(ADVERTISED_1000baseT_Half |
7778                           ADVERTISED_1000baseT_Full);
7779
7780         return err;
7781 }
7782
7783 static void __devinit tg3_read_partno(struct tg3 *tp)
7784 {
7785         unsigned char vpd_data[256];
7786         int i;
7787
7788         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7789                 /* Sun decided not to put the necessary bits in the
7790                  * NVRAM of their onboard tg3 parts :(
7791                  */
7792                 strcpy(tp->board_part_number, "Sun 570X");
7793                 return;
7794         }
7795
7796         for (i = 0; i < 256; i += 4) {
7797                 u32 tmp;
7798
7799                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7800                         goto out_not_found;
7801
7802                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7803                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7804                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7805                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7806         }
7807
7808         /* Now parse and find the part number. */
7809         for (i = 0; i < 256; ) {
7810                 unsigned char val = vpd_data[i];
7811                 int block_end;
7812
7813                 if (val == 0x82 || val == 0x91) {
7814                         i = (i + 3 +
7815                              (vpd_data[i + 1] +
7816                               (vpd_data[i + 2] << 8)));
7817                         continue;
7818                 }
7819
7820                 if (val != 0x90)
7821                         goto out_not_found;
7822
7823                 block_end = (i + 3 +
7824                              (vpd_data[i + 1] +
7825                               (vpd_data[i + 2] << 8)));
7826                 i += 3;
7827                 while (i < block_end) {
7828                         if (vpd_data[i + 0] == 'P' &&
7829                             vpd_data[i + 1] == 'N') {
7830                                 int partno_len = vpd_data[i + 2];
7831
7832                                 if (partno_len > 24)
7833                                         goto out_not_found;
7834
7835                                 memcpy(tp->board_part_number,
7836                                        &vpd_data[i + 3],
7837                                        partno_len);
7838
7839                                 /* Success. */
7840                                 return;
7841                         }
7842                 }
7843
7844                 /* Part number not found. */
7845                 goto out_not_found;
7846         }
7847
7848 out_not_found:
7849         strcpy(tp->board_part_number, "none");
7850 }
7851
7852 #ifdef CONFIG_SPARC64
7853 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7854 {
7855         struct pci_dev *pdev = tp->pdev;
7856         struct pcidev_cookie *pcp = pdev->sysdata;
7857
7858         if (pcp != NULL) {
7859                 int node = pcp->prom_node;
7860                 u32 venid;
7861                 int err;
7862
7863                 err = prom_getproperty(node, "subsystem-vendor-id",
7864                                        (char *) &venid, sizeof(venid));
7865                 if (err == 0 || err == -1)
7866                         return 0;
7867                 if (venid == PCI_VENDOR_ID_SUN)
7868                         return 1;
7869         }
7870         return 0;
7871 }
7872 #endif
7873
7874 static int __devinit tg3_get_invariants(struct tg3 *tp)
7875 {
7876         static struct pci_device_id write_reorder_chipsets[] = {
7877                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7878                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7879                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7880                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7881                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7882                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7883                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7884                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7885                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7886                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7887                 { },
7888         };
7889         u32 misc_ctrl_reg;
7890         u32 cacheline_sz_reg;
7891         u32 pci_state_reg, grc_misc_cfg;
7892         u32 val;
7893         u16 pci_cmd;
7894         int err;
7895
7896 #ifdef CONFIG_SPARC64
7897         if (tg3_is_sun_570X(tp))
7898                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7899 #endif
7900
7901         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7902          * reordering to the mailbox registers done by the host
7903          * controller can cause major troubles.  We read back from
7904          * every mailbox register write to force the writes to be
7905          * posted to the chip in order.
7906          */
7907         if (pci_dev_present(write_reorder_chipsets))
7908                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7909
7910         /* Force memory write invalidate off.  If we leave it on,
7911          * then on 5700_BX chips we have to enable a workaround.
7912          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7913          * to match the cacheline size.  The Broadcom driver have this
7914          * workaround but turns MWI off all the times so never uses
7915          * it.  This seems to suggest that the workaround is insufficient.
7916          */
7917         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7918         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7919         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7920
7921         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7922          * has the register indirect write enable bit set before
7923          * we try to access any of the MMIO registers.  It is also
7924          * critical that the PCI-X hw workaround situation is decided
7925          * before that as well.
7926          */
7927         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7928                               &misc_ctrl_reg);
7929
7930         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7931                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7932
7933         /* Initialize misc host control in PCI block. */
7934         tp->misc_host_ctrl |= (misc_ctrl_reg &
7935                                MISC_HOST_CTRL_CHIPREV);
7936         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7937                                tp->misc_host_ctrl);
7938
7939         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7940                               &cacheline_sz_reg);
7941
7942         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7943         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7944         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7945         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7946
7947         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
7948             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
7949             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752))
7950                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7951
7952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7954                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
7955
7956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7958                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7959
7960         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7961                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7962
7963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7964             tp->pci_lat_timer < 64) {
7965                 tp->pci_lat_timer = 64;
7966
7967                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7968                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7969                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7970                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7971
7972                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7973                                        cacheline_sz_reg);
7974         }
7975
7976         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7977                               &pci_state_reg);
7978
7979         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7980                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7981
7982                 /* If this is a 5700 BX chipset, and we are in PCI-X
7983                  * mode, enable register write workaround.
7984                  *
7985                  * The workaround is to use indirect register accesses
7986                  * for all chip writes not to mailbox registers.
7987                  */
7988                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7989                         u32 pm_reg;
7990                         u16 pci_cmd;
7991
7992                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7993
7994                         /* The chip can have it's power management PCI config
7995                          * space registers clobbered due to this bug.
7996                          * So explicitly force the chip into D0 here.
7997                          */
7998                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7999                                               &pm_reg);
8000                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8001                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8002                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8003                                                pm_reg);
8004
8005                         /* Also, force SERR#/PERR# in PCI command. */
8006                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8007                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8008                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8009                 }
8010         }
8011
8012         /* Back to back register writes can cause problems on this chip,
8013          * the workaround is to read back all reg writes except those to
8014          * mailbox regs.  See tg3_write_indirect_reg32().
8015          *
8016          * PCI Express 5750_A0 rev chips need this workaround too.
8017          */
8018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8019             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8020              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8021                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8022
8023         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8024                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8025         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8026                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8027
8028         /* Chip-specific fixup from Broadcom driver */
8029         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8030             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8031                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8032                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8033         }
8034
8035         /* Force the chip into D0. */
8036         err = tg3_set_power_state(tp, 0);
8037         if (err) {
8038                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8039                        pci_name(tp->pdev));
8040                 return err;
8041         }
8042
8043         /* 5700 B0 chips do not support checksumming correctly due
8044          * to hardware bugs.
8045          */
8046         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8047                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8048
8049         /* Pseudo-header checksum is done by hardware logic and not
8050          * the offload processers, so make the chip do the pseudo-
8051          * header checksums on receive.  For transmit it is more
8052          * convenient to do the pseudo-header checksum in software
8053          * as Linux does that on transmit for us in all cases.
8054          */
8055         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8056         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8057
8058         /* Derive initial jumbo mode from MTU assigned in
8059          * ether_setup() via the alloc_etherdev() call
8060          */
8061         if (tp->dev->mtu > ETH_DATA_LEN)
8062                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8063
8064         /* Determine WakeOnLan speed to use. */
8065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8066             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8067             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8068             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8069                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8070         } else {
8071                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8072         }
8073
8074         /* A few boards don't want Ethernet@WireSpeed phy feature */
8075         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8076             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8077              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8078              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8079                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8080
8081         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8082             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8083                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8084         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8085                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8086
8087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8089             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8090                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8091
8092         /* Only 5701 and later support tagged irq status mode.
8093          * Also, 5788 chips cannot use tagged irq status.
8094          *
8095          * However, since we are using NAPI avoid tagged irq status
8096          * because the interrupt condition is more difficult to
8097          * fully clear in that mode.
8098          */
8099         tp->coalesce_mode = 0;
8100
8101         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8102             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8103                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8104
8105         /* Initialize MAC MI mode, polling disabled. */
8106         tw32_f(MAC_MI_MODE, tp->mi_mode);
8107         udelay(80);
8108
8109         /* Initialize data/descriptor byte/word swapping. */
8110         val = tr32(GRC_MODE);
8111         val &= GRC_MODE_HOST_STACKUP;
8112         tw32(GRC_MODE, val | tp->grc_mode);
8113
8114         tg3_switch_clocks(tp);
8115
8116         /* Clear this out for sanity. */
8117         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8118
8119         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8120                               &pci_state_reg);
8121         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8122             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8123                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8124
8125                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8126                     chiprevid == CHIPREV_ID_5701_B0 ||
8127                     chiprevid == CHIPREV_ID_5701_B2 ||
8128                     chiprevid == CHIPREV_ID_5701_B5) {
8129                         void __iomem *sram_base;
8130
8131                         /* Write some dummy words into the SRAM status block
8132                          * area, see if it reads back correctly.  If the return
8133                          * value is bad, force enable the PCIX workaround.
8134                          */
8135                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8136
8137                         writel(0x00000000, sram_base);
8138                         writel(0x00000000, sram_base + 4);
8139                         writel(0xffffffff, sram_base + 4);
8140                         if (readl(sram_base) != 0x00000000)
8141                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8142                 }
8143         }
8144
8145         udelay(50);
8146         tg3_nvram_init(tp);
8147
8148         grc_misc_cfg = tr32(GRC_MISC_CFG);
8149         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8150
8151         /* Broadcom's driver says that CIOBE multisplit has a bug */
8152 #if 0
8153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8154             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8155                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8156                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8157         }
8158 #endif
8159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8160             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8161              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8162                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8163
8164         /* these are limited to 10/100 only */
8165         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8166              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8167             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8168              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8169              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8170               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8171               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8172             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8173              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8174               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8175                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8176
8177         err = tg3_phy_probe(tp);
8178         if (err) {
8179                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8180                        pci_name(tp->pdev), err);
8181                 /* ... but do not return immediately ... */
8182         }
8183
8184         tg3_read_partno(tp);
8185
8186         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8187                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8188         } else {
8189                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8190                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8191                 else
8192                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8193         }
8194
8195         /* 5700 {AX,BX} chips have a broken status block link
8196          * change bit implementation, so we must use the
8197          * status register in those cases.
8198          */
8199         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8200                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8201         else
8202                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8203
8204         /* The led_ctrl is set during tg3_phy_probe, here we might
8205          * have to force the link status polling mechanism based
8206          * upon subsystem IDs.
8207          */
8208         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8209             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8210                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8211                                   TG3_FLAG_USE_LINKCHG_REG);
8212         }
8213
8214         /* For all SERDES we poll the MAC status register. */
8215         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8216                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8217         else
8218                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8219
8220         /* 5700 BX chips need to have their TX producer index mailboxes
8221          * written twice to workaround a bug.
8222          */
8223         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8224                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8225         else
8226                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8227
8228         /* It seems all chips can get confused if TX buffers
8229          * straddle the 4GB address boundary in some cases.
8230          */
8231         tp->dev->hard_start_xmit = tg3_start_xmit;
8232
8233         tp->rx_offset = 2;
8234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8235             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8236                 tp->rx_offset = 0;
8237
8238         /* By default, disable wake-on-lan.  User can change this
8239          * using ETHTOOL_SWOL.
8240          */
8241         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8242
8243         return err;
8244 }
8245
8246 #ifdef CONFIG_SPARC64
8247 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8248 {
8249         struct net_device *dev = tp->dev;
8250         struct pci_dev *pdev = tp->pdev;
8251         struct pcidev_cookie *pcp = pdev->sysdata;
8252
8253         if (pcp != NULL) {
8254                 int node = pcp->prom_node;
8255
8256                 if (prom_getproplen(node, "local-mac-address") == 6) {
8257                         prom_getproperty(node, "local-mac-address",
8258                                          dev->dev_addr, 6);
8259                         return 0;
8260                 }
8261         }
8262         return -ENODEV;
8263 }
8264
8265 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8266 {
8267         struct net_device *dev = tp->dev;
8268
8269         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8270         return 0;
8271 }
8272 #endif
8273
8274 static int __devinit tg3_get_device_address(struct tg3 *tp)
8275 {
8276         struct net_device *dev = tp->dev;
8277         u32 hi, lo, mac_offset;
8278
8279 #ifdef CONFIG_SPARC64
8280         if (!tg3_get_macaddr_sparc(tp))
8281                 return 0;
8282 #endif
8283
8284         mac_offset = 0x7c;
8285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8286             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8287                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8288                         mac_offset = 0xcc;
8289                 if (tg3_nvram_lock(tp))
8290                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8291                 else
8292                         tg3_nvram_unlock(tp);
8293         }
8294
8295         /* First try to get it from MAC address mailbox. */
8296         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8297         if ((hi >> 16) == 0x484b) {
8298                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8299                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8300
8301                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8302                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8303                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8304                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8305                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8306         }
8307         /* Next, try NVRAM. */
8308         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8309                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8310                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8311                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8312                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8313                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8314                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8315                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8316                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8317         }
8318         /* Finally just fetch it out of the MAC control regs. */
8319         else {
8320                 hi = tr32(MAC_ADDR_0_HIGH);
8321                 lo = tr32(MAC_ADDR_0_LOW);
8322
8323                 dev->dev_addr[5] = lo & 0xff;
8324                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8325                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8326                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8327                 dev->dev_addr[1] = hi & 0xff;
8328                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8329         }
8330
8331         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8332 #ifdef CONFIG_SPARC64
8333                 if (!tg3_get_default_macaddr_sparc(tp))
8334                         return 0;
8335 #endif
8336                 return -EINVAL;
8337         }
8338         return 0;
8339 }
8340
8341 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8342 {
8343         struct tg3_internal_buffer_desc test_desc;
8344         u32 sram_dma_descs;
8345         int i, ret;
8346
8347         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8348
8349         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8350         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8351         tw32(RDMAC_STATUS, 0);
8352         tw32(WDMAC_STATUS, 0);
8353
8354         tw32(BUFMGR_MODE, 0);
8355         tw32(FTQ_RESET, 0);
8356
8357         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8358         test_desc.addr_lo = buf_dma & 0xffffffff;
8359         test_desc.nic_mbuf = 0x00002100;
8360         test_desc.len = size;
8361
8362         /*
8363          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8364          * the *second* time the tg3 driver was getting loaded after an
8365          * initial scan.
8366          *
8367          * Broadcom tells me:
8368          *   ...the DMA engine is connected to the GRC block and a DMA
8369          *   reset may affect the GRC block in some unpredictable way...
8370          *   The behavior of resets to individual blocks has not been tested.
8371          *
8372          * Broadcom noted the GRC reset will also reset all sub-components.
8373          */
8374         if (to_device) {
8375                 test_desc.cqid_sqid = (13 << 8) | 2;
8376
8377                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8378                 udelay(40);
8379         } else {
8380                 test_desc.cqid_sqid = (16 << 8) | 7;
8381
8382                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8383                 udelay(40);
8384         }
8385         test_desc.flags = 0x00000005;
8386
8387         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8388                 u32 val;
8389
8390                 val = *(((u32 *)&test_desc) + i);
8391                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8392                                        sram_dma_descs + (i * sizeof(u32)));
8393                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8394         }
8395         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8396
8397         if (to_device) {
8398                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8399         } else {
8400                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8401         }
8402
8403         ret = -ENODEV;
8404         for (i = 0; i < 40; i++) {
8405                 u32 val;
8406
8407                 if (to_device)
8408                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8409                 else
8410                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8411                 if ((val & 0xffff) == sram_dma_descs) {
8412                         ret = 0;
8413                         break;
8414                 }
8415
8416                 udelay(100);
8417         }
8418
8419         return ret;
8420 }
8421
8422 #define TEST_BUFFER_SIZE        0x400
8423
8424 static int __devinit tg3_test_dma(struct tg3 *tp)
8425 {
8426         dma_addr_t buf_dma;
8427         u32 *buf;
8428         int ret;
8429
8430         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8431         if (!buf) {
8432                 ret = -ENOMEM;
8433                 goto out_nofree;
8434         }
8435
8436         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8437                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8438
8439 #ifndef CONFIG_X86
8440         {
8441                 u8 byte;
8442                 int cacheline_size;
8443                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8444
8445                 if (byte == 0)
8446                         cacheline_size = 1024;
8447                 else
8448                         cacheline_size = (int) byte * 4;
8449
8450                 switch (cacheline_size) {
8451                 case 16:
8452                 case 32:
8453                 case 64:
8454                 case 128:
8455                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8456                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8457                                 tp->dma_rwctrl |=
8458                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8459                                 break;
8460                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8461                                 tp->dma_rwctrl &=
8462                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8463                                 tp->dma_rwctrl |=
8464                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8465                                 break;
8466                         }
8467                         /* fallthrough */
8468                 case 256:
8469                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8470                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8471                                 tp->dma_rwctrl |=
8472                                         DMA_RWCTRL_WRITE_BNDRY_256;
8473                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8474                                 tp->dma_rwctrl |=
8475                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8476                 };
8477         }
8478 #endif
8479
8480         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8481                 /* DMA read watermark not used on PCIE */
8482                 tp->dma_rwctrl |= 0x00180000;
8483         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8484                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8485                         tp->dma_rwctrl |= 0x003f0000;
8486                 else
8487                         tp->dma_rwctrl |= 0x003f000f;
8488         } else {
8489                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8490                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8491                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8492
8493                         if (ccval == 0x6 || ccval == 0x7)
8494                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8495
8496                         /* Set bit 23 to renable PCIX hw bug fix */
8497                         tp->dma_rwctrl |= 0x009f0000;
8498                 } else {
8499                         tp->dma_rwctrl |= 0x001b000f;
8500                 }
8501         }
8502
8503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8504             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8505                 tp->dma_rwctrl &= 0xfffffff0;
8506
8507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8509                 /* Remove this if it causes problems for some boards. */
8510                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8511
8512                 /* On 5700/5701 chips, we need to set this bit.
8513                  * Otherwise the chip will issue cacheline transactions
8514                  * to streamable DMA memory with not all the byte
8515                  * enables turned on.  This is an error on several
8516                  * RISC PCI controllers, in particular sparc64.
8517                  *
8518                  * On 5703/5704 chips, this bit has been reassigned
8519                  * a different meaning.  In particular, it is used
8520                  * on those chips to enable a PCI-X workaround.
8521                  */
8522                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8523         }
8524
8525         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8526
8527 #if 0
8528         /* Unneeded, already done by tg3_get_invariants.  */
8529         tg3_switch_clocks(tp);
8530 #endif
8531
8532         ret = 0;
8533         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8534             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8535                 goto out;
8536
8537         while (1) {
8538                 u32 *p = buf, i;
8539
8540                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8541                         p[i] = i;
8542
8543                 /* Send the buffer to the chip. */
8544                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8545                 if (ret) {
8546                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8547                         break;
8548                 }
8549
8550 #if 0
8551                 /* validate data reached card RAM correctly. */
8552                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8553                         u32 val;
8554                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8555                         if (le32_to_cpu(val) != p[i]) {
8556                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8557                                 /* ret = -ENODEV here? */
8558                         }
8559                         p[i] = 0;
8560                 }
8561 #endif
8562                 /* Now read it back. */
8563                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8564                 if (ret) {
8565                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8566
8567                         break;
8568                 }
8569
8570                 /* Verify it. */
8571                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8572                         if (p[i] == i)
8573                                 continue;
8574
8575                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8576                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8577                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8578                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8579                                 break;
8580                         } else {
8581                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8582                                 ret = -ENODEV;
8583                                 goto out;
8584                         }
8585                 }
8586
8587                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8588                         /* Success. */
8589                         ret = 0;
8590                         break;
8591                 }
8592         }
8593
8594 out:
8595         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8596 out_nofree:
8597         return ret;
8598 }
8599
8600 static void __devinit tg3_init_link_config(struct tg3 *tp)
8601 {
8602         tp->link_config.advertising =
8603                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8604                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8605                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8606                  ADVERTISED_Autoneg | ADVERTISED_MII);
8607         tp->link_config.speed = SPEED_INVALID;
8608         tp->link_config.duplex = DUPLEX_INVALID;
8609         tp->link_config.autoneg = AUTONEG_ENABLE;
8610         netif_carrier_off(tp->dev);
8611         tp->link_config.active_speed = SPEED_INVALID;
8612         tp->link_config.active_duplex = DUPLEX_INVALID;
8613         tp->link_config.phy_is_low_power = 0;
8614         tp->link_config.orig_speed = SPEED_INVALID;
8615         tp->link_config.orig_duplex = DUPLEX_INVALID;
8616         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8617 }
8618
8619 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8620 {
8621         tp->bufmgr_config.mbuf_read_dma_low_water =
8622                 DEFAULT_MB_RDMA_LOW_WATER;
8623         tp->bufmgr_config.mbuf_mac_rx_low_water =
8624                 DEFAULT_MB_MACRX_LOW_WATER;
8625         tp->bufmgr_config.mbuf_high_water =
8626                 DEFAULT_MB_HIGH_WATER;
8627
8628         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8629                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8630         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8631                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8632         tp->bufmgr_config.mbuf_high_water_jumbo =
8633                 DEFAULT_MB_HIGH_WATER_JUMBO;
8634
8635         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8636         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8637 }
8638
8639 static char * __devinit tg3_phy_string(struct tg3 *tp)
8640 {
8641         switch (tp->phy_id & PHY_ID_MASK) {
8642         case PHY_ID_BCM5400:    return "5400";
8643         case PHY_ID_BCM5401:    return "5401";
8644         case PHY_ID_BCM5411:    return "5411";
8645         case PHY_ID_BCM5701:    return "5701";
8646         case PHY_ID_BCM5703:    return "5703";
8647         case PHY_ID_BCM5704:    return "5704";
8648         case PHY_ID_BCM5705:    return "5705";
8649         case PHY_ID_BCM5750:    return "5750";
8650         case PHY_ID_BCM8002:    return "8002/serdes";
8651         case 0:                 return "serdes";
8652         default:                return "unknown";
8653         };
8654 }
8655
8656 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8657 {
8658         struct pci_dev *peer;
8659         unsigned int func, devnr = tp->pdev->devfn & ~7;
8660
8661         for (func = 0; func < 8; func++) {
8662                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8663                 if (peer && peer != tp->pdev)
8664                         break;
8665                 pci_dev_put(peer);
8666         }
8667         if (!peer || peer == tp->pdev)
8668                 BUG();
8669
8670         /*
8671          * We don't need to keep the refcount elevated; there's no way
8672          * to remove one half of this device without removing the other
8673          */
8674         pci_dev_put(peer);
8675
8676         return peer;
8677 }
8678
8679 static int __devinit tg3_init_one(struct pci_dev *pdev,
8680                                   const struct pci_device_id *ent)
8681 {
8682         static int tg3_version_printed = 0;
8683         unsigned long tg3reg_base, tg3reg_len;
8684         struct net_device *dev;
8685         struct tg3 *tp;
8686         int i, err, pci_using_dac, pm_cap;
8687
8688         if (tg3_version_printed++ == 0)
8689                 printk(KERN_INFO "%s", version);
8690
8691         err = pci_enable_device(pdev);
8692         if (err) {
8693                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8694                        "aborting.\n");
8695                 return err;
8696         }
8697
8698         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8699                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8700                        "base address, aborting.\n");
8701                 err = -ENODEV;
8702                 goto err_out_disable_pdev;
8703         }
8704
8705         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8706         if (err) {
8707                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8708                        "aborting.\n");
8709                 goto err_out_disable_pdev;
8710         }
8711
8712         pci_set_master(pdev);
8713
8714         /* Find power-management capability. */
8715         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8716         if (pm_cap == 0) {
8717                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8718                        "aborting.\n");
8719                 err = -EIO;
8720                 goto err_out_free_res;
8721         }
8722
8723         /* Configure DMA attributes. */
8724         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8725         if (!err) {
8726                 pci_using_dac = 1;
8727                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8728                 if (err < 0) {
8729                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8730                                "for consistent allocations\n");
8731                         goto err_out_free_res;
8732                 }
8733         } else {
8734                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8735                 if (err) {
8736                         printk(KERN_ERR PFX "No usable DMA configuration, "
8737                                "aborting.\n");
8738                         goto err_out_free_res;
8739                 }
8740                 pci_using_dac = 0;
8741         }
8742
8743         tg3reg_base = pci_resource_start(pdev, 0);
8744         tg3reg_len = pci_resource_len(pdev, 0);
8745
8746         dev = alloc_etherdev(sizeof(*tp));
8747         if (!dev) {
8748                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8749                 err = -ENOMEM;
8750                 goto err_out_free_res;
8751         }
8752
8753         SET_MODULE_OWNER(dev);
8754         SET_NETDEV_DEV(dev, &pdev->dev);
8755
8756         if (pci_using_dac)
8757                 dev->features |= NETIF_F_HIGHDMA;
8758         dev->features |= NETIF_F_LLTX;
8759 #if TG3_VLAN_TAG_USED
8760         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8761         dev->vlan_rx_register = tg3_vlan_rx_register;
8762         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8763 #endif
8764
8765         tp = netdev_priv(dev);
8766         tp->pdev = pdev;
8767         tp->dev = dev;
8768         tp->pm_cap = pm_cap;
8769         tp->mac_mode = TG3_DEF_MAC_MODE;
8770         tp->rx_mode = TG3_DEF_RX_MODE;
8771         tp->tx_mode = TG3_DEF_TX_MODE;
8772         tp->mi_mode = MAC_MI_MODE_BASE;
8773         if (tg3_debug > 0)
8774                 tp->msg_enable = tg3_debug;
8775         else
8776                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8777
8778         /* The word/byte swap controls here control register access byte
8779          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8780          * setting below.
8781          */
8782         tp->misc_host_ctrl =
8783                 MISC_HOST_CTRL_MASK_PCI_INT |
8784                 MISC_HOST_CTRL_WORD_SWAP |
8785                 MISC_HOST_CTRL_INDIR_ACCESS |
8786                 MISC_HOST_CTRL_PCISTATE_RW;
8787
8788         /* The NONFRM (non-frame) byte/word swap controls take effect
8789          * on descriptor entries, anything which isn't packet data.
8790          *
8791          * The StrongARM chips on the board (one for tx, one for rx)
8792          * are running in big-endian mode.
8793          */
8794         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8795                         GRC_MODE_WSWAP_NONFRM_DATA);
8796 #ifdef __BIG_ENDIAN
8797         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8798 #endif
8799         spin_lock_init(&tp->lock);
8800         spin_lock_init(&tp->tx_lock);
8801         spin_lock_init(&tp->indirect_lock);
8802         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8803
8804         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8805         if (tp->regs == 0UL) {
8806                 printk(KERN_ERR PFX "Cannot map device registers, "
8807                        "aborting.\n");
8808                 err = -ENOMEM;
8809                 goto err_out_free_dev;
8810         }
8811
8812         tg3_init_link_config(tp);
8813
8814         tg3_init_bufmgr_config(tp);
8815
8816         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8817         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8818         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8819
8820         dev->open = tg3_open;
8821         dev->stop = tg3_close;
8822         dev->get_stats = tg3_get_stats;
8823         dev->set_multicast_list = tg3_set_rx_mode;
8824         dev->set_mac_address = tg3_set_mac_addr;
8825         dev->do_ioctl = tg3_ioctl;
8826         dev->tx_timeout = tg3_tx_timeout;
8827         dev->poll = tg3_poll;
8828         dev->ethtool_ops = &tg3_ethtool_ops;
8829         dev->weight = 64;
8830         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8831         dev->change_mtu = tg3_change_mtu;
8832         dev->irq = pdev->irq;
8833 #ifdef CONFIG_NET_POLL_CONTROLLER
8834         dev->poll_controller = tg3_poll_controller;
8835 #endif
8836
8837         err = tg3_get_invariants(tp);
8838         if (err) {
8839                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8840                        "aborting.\n");
8841                 goto err_out_iounmap;
8842         }
8843
8844         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8845                 tp->bufmgr_config.mbuf_read_dma_low_water =
8846                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8847                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8848                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8849                 tp->bufmgr_config.mbuf_high_water =
8850                         DEFAULT_MB_HIGH_WATER_5705;
8851         }
8852
8853 #if TG3_TSO_SUPPORT != 0
8854         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8855                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8856         }
8857         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8859             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8860             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8861                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8862         } else {
8863                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8864         }
8865
8866         /* TSO is off by default, user can enable using ethtool.  */
8867 #if 0
8868         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8869                 dev->features |= NETIF_F_TSO;
8870 #endif
8871
8872 #endif
8873
8874         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8875             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8876             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8877                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8878                 tp->rx_pending = 63;
8879         }
8880
8881         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8882                 tp->pdev_peer = tg3_find_5704_peer(tp);
8883
8884         err = tg3_get_device_address(tp);
8885         if (err) {
8886                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8887                        "aborting.\n");
8888                 goto err_out_iounmap;
8889         }
8890
8891         /*
8892          * Reset chip in case UNDI or EFI driver did not shutdown
8893          * DMA self test will enable WDMAC and we'll see (spurious)
8894          * pending DMA on the PCI bus at that point.
8895          */
8896         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8897             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8898                 pci_save_state(tp->pdev);
8899                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8900                 tg3_halt(tp);
8901         }
8902
8903         err = tg3_test_dma(tp);
8904         if (err) {
8905                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8906                 goto err_out_iounmap;
8907         }
8908
8909         /* Tigon3 can do ipv4 only... and some chips have buggy
8910          * checksumming.
8911          */
8912         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8913                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8914                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8915         } else
8916                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8917
8918         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8919                 dev->features &= ~NETIF_F_HIGHDMA;
8920
8921         /* flow control autonegotiation is default behavior */
8922         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8923
8924         err = register_netdev(dev);
8925         if (err) {
8926                 printk(KERN_ERR PFX "Cannot register net device, "
8927                        "aborting.\n");
8928                 goto err_out_iounmap;
8929         }
8930
8931         pci_set_drvdata(pdev, dev);
8932
8933         /* Now that we have fully setup the chip, save away a snapshot
8934          * of the PCI config space.  We need to restore this after
8935          * GRC_MISC_CFG core clock resets and some resume events.
8936          */
8937         pci_save_state(tp->pdev);
8938
8939         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8940                dev->name,
8941                tp->board_part_number,
8942                tp->pci_chip_rev_id,
8943                tg3_phy_string(tp),
8944                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8945                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8946                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8947                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8948                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8949                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8950
8951         for (i = 0; i < 6; i++)
8952                 printk("%2.2x%c", dev->dev_addr[i],
8953                        i == 5 ? '\n' : ':');
8954
8955         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8956                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8957                "TSOcap[%d] \n",
8958                dev->name,
8959                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8960                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8961                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8962                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8963                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8964                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8965                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8966
8967         return 0;
8968
8969 err_out_iounmap:
8970         iounmap(tp->regs);
8971
8972 err_out_free_dev:
8973         free_netdev(dev);
8974
8975 err_out_free_res:
8976         pci_release_regions(pdev);
8977
8978 err_out_disable_pdev:
8979         pci_disable_device(pdev);
8980         pci_set_drvdata(pdev, NULL);
8981         return err;
8982 }
8983
8984 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8985 {
8986         struct net_device *dev = pci_get_drvdata(pdev);
8987
8988         if (dev) {
8989                 struct tg3 *tp = netdev_priv(dev);
8990
8991                 unregister_netdev(dev);
8992                 iounmap(tp->regs);
8993                 free_netdev(dev);
8994                 pci_release_regions(pdev);
8995                 pci_disable_device(pdev);
8996                 pci_set_drvdata(pdev, NULL);
8997         }
8998 }
8999
9000 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9001 {
9002         struct net_device *dev = pci_get_drvdata(pdev);
9003         struct tg3 *tp = netdev_priv(dev);
9004         int err;
9005
9006         if (!netif_running(dev))
9007                 return 0;
9008
9009         tg3_netif_stop(tp);
9010
9011         del_timer_sync(&tp->timer);
9012
9013         spin_lock_irq(&tp->lock);
9014         spin_lock(&tp->tx_lock);
9015         tg3_disable_ints(tp);
9016         spin_unlock(&tp->tx_lock);
9017         spin_unlock_irq(&tp->lock);
9018
9019         netif_device_detach(dev);
9020
9021         spin_lock_irq(&tp->lock);
9022         spin_lock(&tp->tx_lock);
9023         tg3_halt(tp);
9024         spin_unlock(&tp->tx_lock);
9025         spin_unlock_irq(&tp->lock);
9026
9027         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9028         if (err) {
9029                 spin_lock_irq(&tp->lock);
9030                 spin_lock(&tp->tx_lock);
9031
9032                 tg3_init_hw(tp);
9033
9034                 tp->timer.expires = jiffies + tp->timer_offset;
9035                 add_timer(&tp->timer);
9036
9037                 netif_device_attach(dev);
9038                 tg3_netif_start(tp);
9039
9040                 spin_unlock(&tp->tx_lock);
9041                 spin_unlock_irq(&tp->lock);
9042         }
9043
9044         return err;
9045 }
9046
9047 static int tg3_resume(struct pci_dev *pdev)
9048 {
9049         struct net_device *dev = pci_get_drvdata(pdev);
9050         struct tg3 *tp = netdev_priv(dev);
9051         int err;
9052
9053         if (!netif_running(dev))
9054                 return 0;
9055
9056         pci_restore_state(tp->pdev);
9057
9058         err = tg3_set_power_state(tp, 0);
9059         if (err)
9060                 return err;
9061
9062         netif_device_attach(dev);
9063
9064         spin_lock_irq(&tp->lock);
9065         spin_lock(&tp->tx_lock);
9066
9067         tg3_init_hw(tp);
9068
9069         tp->timer.expires = jiffies + tp->timer_offset;
9070         add_timer(&tp->timer);
9071
9072         tg3_enable_ints(tp);
9073
9074         tg3_netif_start(tp);
9075
9076         spin_unlock(&tp->tx_lock);
9077         spin_unlock_irq(&tp->lock);
9078
9079         return 0;
9080 }
9081
9082 static struct pci_driver tg3_driver = {
9083         .name           = DRV_MODULE_NAME,
9084         .id_table       = tg3_pci_tbl,
9085         .probe          = tg3_init_one,
9086         .remove         = __devexit_p(tg3_remove_one),
9087         .suspend        = tg3_suspend,
9088         .resume         = tg3_resume
9089 };
9090
9091 static int __init tg3_init(void)
9092 {
9093         return pci_module_init(&tg3_driver);
9094 }
9095
9096 static void __exit tg3_cleanup(void)
9097 {
9098         pci_unregister_driver(&tg3_driver);
9099 }
9100
9101 module_init(tg3_init);
9102 module_exit(tg3_cleanup);