]> err.no Git - linux-2.6/blob - drivers/net/tg3.c
[TG3]: Fix the polarity bit.
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.77"
68 #define DRV_MODULE_RELDATE      "May 31, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
207         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
208         {}
209 };
210
211 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
212
213 static const struct {
214         const char string[ETH_GSTRING_LEN];
215 } ethtool_stats_keys[TG3_NUM_STATS] = {
216         { "rx_octets" },
217         { "rx_fragments" },
218         { "rx_ucast_packets" },
219         { "rx_mcast_packets" },
220         { "rx_bcast_packets" },
221         { "rx_fcs_errors" },
222         { "rx_align_errors" },
223         { "rx_xon_pause_rcvd" },
224         { "rx_xoff_pause_rcvd" },
225         { "rx_mac_ctrl_rcvd" },
226         { "rx_xoff_entered" },
227         { "rx_frame_too_long_errors" },
228         { "rx_jabbers" },
229         { "rx_undersize_packets" },
230         { "rx_in_length_errors" },
231         { "rx_out_length_errors" },
232         { "rx_64_or_less_octet_packets" },
233         { "rx_65_to_127_octet_packets" },
234         { "rx_128_to_255_octet_packets" },
235         { "rx_256_to_511_octet_packets" },
236         { "rx_512_to_1023_octet_packets" },
237         { "rx_1024_to_1522_octet_packets" },
238         { "rx_1523_to_2047_octet_packets" },
239         { "rx_2048_to_4095_octet_packets" },
240         { "rx_4096_to_8191_octet_packets" },
241         { "rx_8192_to_9022_octet_packets" },
242
243         { "tx_octets" },
244         { "tx_collisions" },
245
246         { "tx_xon_sent" },
247         { "tx_xoff_sent" },
248         { "tx_flow_control" },
249         { "tx_mac_errors" },
250         { "tx_single_collisions" },
251         { "tx_mult_collisions" },
252         { "tx_deferred" },
253         { "tx_excessive_collisions" },
254         { "tx_late_collisions" },
255         { "tx_collide_2times" },
256         { "tx_collide_3times" },
257         { "tx_collide_4times" },
258         { "tx_collide_5times" },
259         { "tx_collide_6times" },
260         { "tx_collide_7times" },
261         { "tx_collide_8times" },
262         { "tx_collide_9times" },
263         { "tx_collide_10times" },
264         { "tx_collide_11times" },
265         { "tx_collide_12times" },
266         { "tx_collide_13times" },
267         { "tx_collide_14times" },
268         { "tx_collide_15times" },
269         { "tx_ucast_packets" },
270         { "tx_mcast_packets" },
271         { "tx_bcast_packets" },
272         { "tx_carrier_sense_errors" },
273         { "tx_discards" },
274         { "tx_errors" },
275
276         { "dma_writeq_full" },
277         { "dma_write_prioq_full" },
278         { "rxbds_empty" },
279         { "rx_discards" },
280         { "rx_errors" },
281         { "rx_threshold_hit" },
282
283         { "dma_readq_full" },
284         { "dma_read_prioq_full" },
285         { "tx_comp_queue_full" },
286
287         { "ring_set_send_prod_index" },
288         { "ring_status_update" },
289         { "nic_irqs" },
290         { "nic_avoided_irqs" },
291         { "nic_tx_threshold_hit" }
292 };
293
294 static const struct {
295         const char string[ETH_GSTRING_LEN];
296 } ethtool_test_keys[TG3_NUM_TEST] = {
297         { "nvram test     (online) " },
298         { "link test      (online) " },
299         { "register test  (offline)" },
300         { "memory test    (offline)" },
301         { "loopback test  (offline)" },
302         { "interrupt test (offline)" },
303 };
304
305 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
306 {
307         writel(val, tp->regs + off);
308 }
309
310 static u32 tg3_read32(struct tg3 *tp, u32 off)
311 {
312         return (readl(tp->regs + off));
313 }
314
315 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
316 {
317         unsigned long flags;
318
319         spin_lock_irqsave(&tp->indirect_lock, flags);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322         spin_unlock_irqrestore(&tp->indirect_lock, flags);
323 }
324
325 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
326 {
327         writel(val, tp->regs + off);
328         readl(tp->regs + off);
329 }
330
331 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
332 {
333         unsigned long flags;
334         u32 val;
335
336         spin_lock_irqsave(&tp->indirect_lock, flags);
337         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
338         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
339         spin_unlock_irqrestore(&tp->indirect_lock, flags);
340         return val;
341 }
342
343 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
348                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
349                                        TG3_64BIT_REG_LOW, val);
350                 return;
351         }
352         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
353                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
354                                        TG3_64BIT_REG_LOW, val);
355                 return;
356         }
357
358         spin_lock_irqsave(&tp->indirect_lock, flags);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361         spin_unlock_irqrestore(&tp->indirect_lock, flags);
362
363         /* In indirect mode when disabling interrupts, we also need
364          * to clear the interrupt bit in the GRC local ctrl register.
365          */
366         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
367             (val == 0x1)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
369                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
370         }
371 }
372
373 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
374 {
375         unsigned long flags;
376         u32 val;
377
378         spin_lock_irqsave(&tp->indirect_lock, flags);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382         return val;
383 }
384
385 /* usec_wait specifies the wait time in usec when writing to certain registers
386  * where it is unsafe to read back the register without some delay.
387  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
388  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
389  */
390 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
391 {
392         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
393             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
394                 /* Non-posted methods */
395                 tp->write32(tp, off, val);
396         else {
397                 /* Posted method */
398                 tg3_write32(tp, off, val);
399                 if (usec_wait)
400                         udelay(usec_wait);
401                 tp->read32(tp, off);
402         }
403         /* Wait again after the read for the posted method to guarantee that
404          * the wait time is met.
405          */
406         if (usec_wait)
407                 udelay(usec_wait);
408 }
409
410 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
411 {
412         tp->write32_mbox(tp, off, val);
413         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
414             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415                 tp->read32_mbox(tp, off);
416 }
417
418 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
419 {
420         void __iomem *mbox = tp->regs + off;
421         writel(val, mbox);
422         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
423                 writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
425                 readl(mbox);
426 }
427
428 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
429 {
430         return (readl(tp->regs + off + GRCMBOX_BASE));
431 }
432
433 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
434 {
435         writel(val, tp->regs + off + GRCMBOX_BASE);
436 }
437
438 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
439 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
440 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
441 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
442 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
443
444 #define tw32(reg,val)           tp->write32(tp, reg, val)
445 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
446 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
447 #define tr32(reg)               tp->read32(tp, reg)
448
449 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
454             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
455                 return;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
460                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
461
462                 /* Always leave this as zero. */
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
464         } else {
465                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         }
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
480                 *val = 0;
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489                 /* Always leave this as zero. */
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491         } else {
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493                 *val = tr32(TG3PCI_MEM_WIN_DATA);
494
495                 /* Always leave this as zero. */
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497         }
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_disable_ints(struct tg3 *tp)
502 {
503         tw32(TG3PCI_MISC_HOST_CTRL,
504              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
505         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
506 }
507
508 static inline void tg3_cond_int(struct tg3 *tp)
509 {
510         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
511             (tp->hw_status->status & SD_STATUS_UPDATED))
512                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
513         else
514                 tw32(HOSTCC_MODE, tp->coalesce_mode |
515                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
516 }
517
518 static void tg3_enable_ints(struct tg3 *tp)
519 {
520         tp->irq_sync = 0;
521         wmb();
522
523         tw32(TG3PCI_MISC_HOST_CTRL,
524              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
526                        (tp->last_tag << 24));
527         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
528                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529                                (tp->last_tag << 24));
530         tg3_cond_int(tp);
531 }
532
533 static inline unsigned int tg3_has_work(struct tg3 *tp)
534 {
535         struct tg3_hw_status *sblk = tp->hw_status;
536         unsigned int work_exists = 0;
537
538         /* check for phy events */
539         if (!(tp->tg3_flags &
540               (TG3_FLAG_USE_LINKCHG_REG |
541                TG3_FLAG_POLL_SERDES))) {
542                 if (sblk->status & SD_STATUS_LINK_CHG)
543                         work_exists = 1;
544         }
545         /* check for RX/TX work to do */
546         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
547             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
548                 work_exists = 1;
549
550         return work_exists;
551 }
552
553 /* tg3_restart_ints
554  *  similar to tg3_enable_ints, but it accurately determines whether there
555  *  is new work pending and can return without flushing the PIO write
556  *  which reenables interrupts
557  */
558 static void tg3_restart_ints(struct tg3 *tp)
559 {
560         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                      tp->last_tag << 24);
562         mmiowb();
563
564         /* When doing tagged status, this work check is unnecessary.
565          * The last_tag we write above tells the chip which piece of
566          * work we've completed.
567          */
568         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
569             tg3_has_work(tp))
570                 tw32(HOSTCC_MODE, tp->coalesce_mode |
571                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
572 }
573
574 static inline void tg3_netif_stop(struct tg3 *tp)
575 {
576         tp->dev->trans_start = jiffies; /* prevent tx timeout */
577         netif_poll_disable(tp->dev);
578         netif_tx_disable(tp->dev);
579 }
580
581 static inline void tg3_netif_start(struct tg3 *tp)
582 {
583         netif_wake_queue(tp->dev);
584         /* NOTE: unconditional netif_wake_queue is only appropriate
585          * so long as all callers are assured to have free tx slots
586          * (such as after tg3_init_hw)
587          */
588         netif_poll_enable(tp->dev);
589         tp->hw_status->status |= SD_STATUS_UPDATED;
590         tg3_enable_ints(tp);
591 }
592
593 static void tg3_switch_clocks(struct tg3 *tp)
594 {
595         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
596         u32 orig_clock_ctrl;
597
598         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
599                 return;
600
601         orig_clock_ctrl = clock_ctrl;
602         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
603                        CLOCK_CTRL_CLKRUN_OENABLE |
604                        0x1f);
605         tp->pci_clock_ctrl = clock_ctrl;
606
607         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
608                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
609                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
610                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
611                 }
612         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
613                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                             clock_ctrl |
615                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
616                             40);
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
619                             40);
620         }
621         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
622 }
623
624 #define PHY_BUSY_LOOPS  5000
625
626 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
627 {
628         u32 frame_val;
629         unsigned int loops;
630         int ret;
631
632         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
633                 tw32_f(MAC_MI_MODE,
634                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
635                 udelay(80);
636         }
637
638         *val = 0x0;
639
640         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
641                       MI_COM_PHY_ADDR_MASK);
642         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
643                       MI_COM_REG_ADDR_MASK);
644         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
645
646         tw32_f(MAC_MI_COM, frame_val);
647
648         loops = PHY_BUSY_LOOPS;
649         while (loops != 0) {
650                 udelay(10);
651                 frame_val = tr32(MAC_MI_COM);
652
653                 if ((frame_val & MI_COM_BUSY) == 0) {
654                         udelay(5);
655                         frame_val = tr32(MAC_MI_COM);
656                         break;
657                 }
658                 loops -= 1;
659         }
660
661         ret = -EBUSY;
662         if (loops != 0) {
663                 *val = frame_val & MI_COM_DATA_MASK;
664                 ret = 0;
665         }
666
667         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668                 tw32_f(MAC_MI_MODE, tp->mi_mode);
669                 udelay(80);
670         }
671
672         return ret;
673 }
674
675 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
676 {
677         u32 frame_val;
678         unsigned int loops;
679         int ret;
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
682             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
683                 return 0;
684
685         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
686                 tw32_f(MAC_MI_MODE,
687                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
688                 udelay(80);
689         }
690
691         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692                       MI_COM_PHY_ADDR_MASK);
693         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694                       MI_COM_REG_ADDR_MASK);
695         frame_val |= (val & MI_COM_DATA_MASK);
696         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
697
698         tw32_f(MAC_MI_COM, frame_val);
699
700         loops = PHY_BUSY_LOOPS;
701         while (loops != 0) {
702                 udelay(10);
703                 frame_val = tr32(MAC_MI_COM);
704                 if ((frame_val & MI_COM_BUSY) == 0) {
705                         udelay(5);
706                         frame_val = tr32(MAC_MI_COM);
707                         break;
708                 }
709                 loops -= 1;
710         }
711
712         ret = -EBUSY;
713         if (loops != 0)
714                 ret = 0;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE, tp->mi_mode);
718                 udelay(80);
719         }
720
721         return ret;
722 }
723
724 static void tg3_phy_set_wirespeed(struct tg3 *tp)
725 {
726         u32 val;
727
728         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
729                 return;
730
731         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
732             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
733                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
734                              (val | (1 << 15) | (1 << 4)));
735 }
736
737 static int tg3_bmcr_reset(struct tg3 *tp)
738 {
739         u32 phy_control;
740         int limit, err;
741
742         /* OK, reset it, and poll the BMCR_RESET bit until it
743          * clears or we time out.
744          */
745         phy_control = BMCR_RESET;
746         err = tg3_writephy(tp, MII_BMCR, phy_control);
747         if (err != 0)
748                 return -EBUSY;
749
750         limit = 5000;
751         while (limit--) {
752                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
753                 if (err != 0)
754                         return -EBUSY;
755
756                 if ((phy_control & BMCR_RESET) == 0) {
757                         udelay(40);
758                         break;
759                 }
760                 udelay(10);
761         }
762         if (limit <= 0)
763                 return -EBUSY;
764
765         return 0;
766 }
767
768 static int tg3_wait_macro_done(struct tg3 *tp)
769 {
770         int limit = 100;
771
772         while (limit--) {
773                 u32 tmp32;
774
775                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
776                         if ((tmp32 & 0x1000) == 0)
777                                 break;
778                 }
779         }
780         if (limit <= 0)
781                 return -EBUSY;
782
783         return 0;
784 }
785
786 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
787 {
788         static const u32 test_pat[4][6] = {
789         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
790         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
791         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
792         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
793         };
794         int chan;
795
796         for (chan = 0; chan < 4; chan++) {
797                 int i;
798
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800                              (chan * 0x2000) | 0x0200);
801                 tg3_writephy(tp, 0x16, 0x0002);
802
803                 for (i = 0; i < 6; i++)
804                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
805                                      test_pat[chan][i]);
806
807                 tg3_writephy(tp, 0x16, 0x0202);
808                 if (tg3_wait_macro_done(tp)) {
809                         *resetp = 1;
810                         return -EBUSY;
811                 }
812
813                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
814                              (chan * 0x2000) | 0x0200);
815                 tg3_writephy(tp, 0x16, 0x0082);
816                 if (tg3_wait_macro_done(tp)) {
817                         *resetp = 1;
818                         return -EBUSY;
819                 }
820
821                 tg3_writephy(tp, 0x16, 0x0802);
822                 if (tg3_wait_macro_done(tp)) {
823                         *resetp = 1;
824                         return -EBUSY;
825                 }
826
827                 for (i = 0; i < 6; i += 2) {
828                         u32 low, high;
829
830                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
831                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
832                             tg3_wait_macro_done(tp)) {
833                                 *resetp = 1;
834                                 return -EBUSY;
835                         }
836                         low &= 0x7fff;
837                         high &= 0x000f;
838                         if (low != test_pat[chan][i] ||
839                             high != test_pat[chan][i+1]) {
840                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
841                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
842                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
843
844                                 return -EBUSY;
845                         }
846                 }
847         }
848
849         return 0;
850 }
851
852 static int tg3_phy_reset_chanpat(struct tg3 *tp)
853 {
854         int chan;
855
856         for (chan = 0; chan < 4; chan++) {
857                 int i;
858
859                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
860                              (chan * 0x2000) | 0x0200);
861                 tg3_writephy(tp, 0x16, 0x0002);
862                 for (i = 0; i < 6; i++)
863                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
864                 tg3_writephy(tp, 0x16, 0x0202);
865                 if (tg3_wait_macro_done(tp))
866                         return -EBUSY;
867         }
868
869         return 0;
870 }
871
872 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
873 {
874         u32 reg32, phy9_orig;
875         int retries, do_phy_reset, err;
876
877         retries = 10;
878         do_phy_reset = 1;
879         do {
880                 if (do_phy_reset) {
881                         err = tg3_bmcr_reset(tp);
882                         if (err)
883                                 return err;
884                         do_phy_reset = 0;
885                 }
886
887                 /* Disable transmitter and interrupt.  */
888                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
889                         continue;
890
891                 reg32 |= 0x3000;
892                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
893
894                 /* Set full-duplex, 1000 mbps.  */
895                 tg3_writephy(tp, MII_BMCR,
896                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
897
898                 /* Set to master mode.  */
899                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
900                         continue;
901
902                 tg3_writephy(tp, MII_TG3_CTRL,
903                              (MII_TG3_CTRL_AS_MASTER |
904                               MII_TG3_CTRL_ENABLE_AS_MASTER));
905
906                 /* Enable SM_DSP_CLOCK and 6dB.  */
907                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
908
909                 /* Block the PHY control access.  */
910                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
911                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
912
913                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
914                 if (!err)
915                         break;
916         } while (--retries);
917
918         err = tg3_phy_reset_chanpat(tp);
919         if (err)
920                 return err;
921
922         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
923         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
926         tg3_writephy(tp, 0x16, 0x0000);
927
928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
930                 /* Set Extended packet length bit for jumbo frames */
931                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
932         }
933         else {
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
935         }
936
937         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
938
939         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
940                 reg32 &= ~0x3000;
941                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
942         } else if (!err)
943                 err = -EBUSY;
944
945         return err;
946 }
947
948 static void tg3_link_report(struct tg3 *);
949
950 /* This will reset the tigon3 PHY if there is no valid
951  * link unless the FORCE argument is non-zero.
952  */
953 static int tg3_phy_reset(struct tg3 *tp)
954 {
955         u32 phy_status;
956         int err;
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
959                 u32 val;
960
961                 val = tr32(GRC_MISC_CFG);
962                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
963                 udelay(40);
964         }
965         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
966         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
967         if (err != 0)
968                 return -EBUSY;
969
970         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
971                 netif_carrier_off(tp->dev);
972                 tg3_link_report(tp);
973         }
974
975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
978                 err = tg3_phy_reset_5703_4_5(tp);
979                 if (err)
980                         return err;
981                 goto out;
982         }
983
984         err = tg3_bmcr_reset(tp);
985         if (err)
986                 return err;
987
988 out:
989         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
991                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
992                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
993                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
994                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
996         }
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
998                 tg3_writephy(tp, 0x1c, 0x8d68);
999                 tg3_writephy(tp, 0x1c, 0x8d68);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1002                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1003                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1004                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1005                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1006                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1009                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1010         }
1011         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1012                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1015                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1016                         tg3_writephy(tp, MII_TG3_TEST1,
1017                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1018                 } else
1019                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1020                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1021         }
1022         /* Set Extended packet length bit (bit 14) on all chips that */
1023         /* support jumbo frames */
1024         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1025                 /* Cannot do read-modify-write on 5401 */
1026                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1027         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1028                 u32 phy_reg;
1029
1030                 /* Set bit 14 with read-modify-write to preserve other bits */
1031                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1032                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1033                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1034         }
1035
1036         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1037          * jumbo frames transmission.
1038          */
1039         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1040                 u32 phy_reg;
1041
1042                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1043                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1044                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1045         }
1046
1047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048                 u32 phy_reg;
1049
1050                 /* adjust output voltage */
1051                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054                         u32 phy_reg2;
1055
1056                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058                         /* Enable auto-MDIX */
1059                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062                 }
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1166 {
1167         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1168                 return 1;
1169         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1170                 if (speed != SPEED_10)
1171                         return 1;
1172         } else if (speed == SPEED_10)
1173                 return 1;
1174
1175         return 0;
1176 }
1177
1178 static int tg3_setup_phy(struct tg3 *, int);
1179
1180 #define RESET_KIND_SHUTDOWN     0
1181 #define RESET_KIND_INIT         1
1182 #define RESET_KIND_SUSPEND      2
1183
1184 static void tg3_write_sig_post_reset(struct tg3 *, int);
1185 static int tg3_halt_cpu(struct tg3 *, u32);
1186 static int tg3_nvram_lock(struct tg3 *);
1187 static void tg3_nvram_unlock(struct tg3 *);
1188
1189 static void tg3_power_down_phy(struct tg3 *tp)
1190 {
1191         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1192                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1193                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1194                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1195
1196                         sg_dig_ctrl |=
1197                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1198                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1199                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1200                 }
1201                 return;
1202         }
1203
1204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1205                 u32 val;
1206
1207                 tg3_bmcr_reset(tp);
1208                 val = tr32(GRC_MISC_CFG);
1209                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1210                 udelay(40);
1211                 return;
1212         } else {
1213                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1214                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1215                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1216         }
1217
1218         /* The PHY should not be powered down on some chips because
1219          * of bugs.
1220          */
1221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1223             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1224              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1225                 return;
1226         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1227 }
1228
1229 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1230 {
1231         u32 misc_host_ctrl;
1232         u16 power_control, power_caps;
1233         int pm = tp->pm_cap;
1234
1235         /* Make sure register accesses (indirect or otherwise)
1236          * will function correctly.
1237          */
1238         pci_write_config_dword(tp->pdev,
1239                                TG3PCI_MISC_HOST_CTRL,
1240                                tp->misc_host_ctrl);
1241
1242         pci_read_config_word(tp->pdev,
1243                              pm + PCI_PM_CTRL,
1244                              &power_control);
1245         power_control |= PCI_PM_CTRL_PME_STATUS;
1246         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1247         switch (state) {
1248         case PCI_D0:
1249                 power_control |= 0;
1250                 pci_write_config_word(tp->pdev,
1251                                       pm + PCI_PM_CTRL,
1252                                       power_control);
1253                 udelay(100);    /* Delay after power state change */
1254
1255                 /* Switch out of Vaux if it is a NIC */
1256                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1258
1259                 return 0;
1260
1261         case PCI_D1:
1262                 power_control |= 1;
1263                 break;
1264
1265         case PCI_D2:
1266                 power_control |= 2;
1267                 break;
1268
1269         case PCI_D3hot:
1270                 power_control |= 3;
1271                 break;
1272
1273         default:
1274                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1275                        "requested.\n",
1276                        tp->dev->name, state);
1277                 return -EINVAL;
1278         };
1279
1280         power_control |= PCI_PM_CTRL_PME_ENABLE;
1281
1282         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1283         tw32(TG3PCI_MISC_HOST_CTRL,
1284              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1285
1286         if (tp->link_config.phy_is_low_power == 0) {
1287                 tp->link_config.phy_is_low_power = 1;
1288                 tp->link_config.orig_speed = tp->link_config.speed;
1289                 tp->link_config.orig_duplex = tp->link_config.duplex;
1290                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1291         }
1292
1293         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1294                 tp->link_config.speed = SPEED_10;
1295                 tp->link_config.duplex = DUPLEX_HALF;
1296                 tp->link_config.autoneg = AUTONEG_ENABLE;
1297                 tg3_setup_phy(tp, 0);
1298         }
1299
1300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1301                 u32 val;
1302
1303                 val = tr32(GRC_VCPU_EXT_CTRL);
1304                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1305         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1306                 int i;
1307                 u32 val;
1308
1309                 for (i = 0; i < 200; i++) {
1310                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1311                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1312                                 break;
1313                         msleep(1);
1314                 }
1315         }
1316         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1317                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1318                                                      WOL_DRV_STATE_SHUTDOWN |
1319                                                      WOL_DRV_WOL |
1320                                                      WOL_SET_MAGIC_PKT);
1321
1322         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1323
1324         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1325                 u32 mac_mode;
1326
1327                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1328                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1329                         udelay(40);
1330
1331                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1332                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1333                         else
1334                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1335
1336                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1337                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1338                             ASIC_REV_5700) {
1339                                 u32 speed = (tp->tg3_flags &
1340                                              TG3_FLAG_WOL_SPEED_100MB) ?
1341                                              SPEED_100 : SPEED_10;
1342                                 if (tg3_5700_link_polarity(tp, speed))
1343                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1344                                 else
1345                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1346                         }
1347                 } else {
1348                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1349                 }
1350
1351                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1352                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1353
1354                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1355                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1356                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1357
1358                 tw32_f(MAC_MODE, mac_mode);
1359                 udelay(100);
1360
1361                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1362                 udelay(10);
1363         }
1364
1365         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1367              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1368                 u32 base_val;
1369
1370                 base_val = tp->pci_clock_ctrl;
1371                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1372                              CLOCK_CTRL_TXCLK_DISABLE);
1373
1374                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1375                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1376         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1377                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1378                 /* do nothing */
1379         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1380                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1381                 u32 newbits1, newbits2;
1382
1383                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1384                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1385                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1386                                     CLOCK_CTRL_TXCLK_DISABLE |
1387                                     CLOCK_CTRL_ALTCLK);
1388                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1389                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1390                         newbits1 = CLOCK_CTRL_625_CORE;
1391                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1392                 } else {
1393                         newbits1 = CLOCK_CTRL_ALTCLK;
1394                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1395                 }
1396
1397                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1398                             40);
1399
1400                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1401                             40);
1402
1403                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1404                         u32 newbits3;
1405
1406                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1407                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1408                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1409                                             CLOCK_CTRL_TXCLK_DISABLE |
1410                                             CLOCK_CTRL_44MHZ_CORE);
1411                         } else {
1412                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1413                         }
1414
1415                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1416                                     tp->pci_clock_ctrl | newbits3, 40);
1417                 }
1418         }
1419
1420         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1421             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1422                 tg3_power_down_phy(tp);
1423
1424         tg3_frob_aux_power(tp);
1425
1426         /* Workaround for unstable PLL clock */
1427         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1428             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1429                 u32 val = tr32(0x7d00);
1430
1431                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1432                 tw32(0x7d00, val);
1433                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1434                         int err;
1435
1436                         err = tg3_nvram_lock(tp);
1437                         tg3_halt_cpu(tp, RX_CPU_BASE);
1438                         if (!err)
1439                                 tg3_nvram_unlock(tp);
1440                 }
1441         }
1442
1443         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1444
1445         /* Finally, set the new power state. */
1446         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1447         udelay(100);    /* Delay after power state change */
1448
1449         return 0;
1450 }
1451
1452 static void tg3_link_report(struct tg3 *tp)
1453 {
1454         if (!netif_carrier_ok(tp->dev)) {
1455                 if (netif_msg_link(tp))
1456                         printk(KERN_INFO PFX "%s: Link is down.\n",
1457                                tp->dev->name);
1458         } else if (netif_msg_link(tp)) {
1459                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1460                        tp->dev->name,
1461                        (tp->link_config.active_speed == SPEED_1000 ?
1462                         1000 :
1463                         (tp->link_config.active_speed == SPEED_100 ?
1464                          100 : 10)),
1465                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1466                         "full" : "half"));
1467
1468                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1469                        "%s for RX.\n",
1470                        tp->dev->name,
1471                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1472                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1473         }
1474 }
1475
1476 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1477 {
1478         u32 new_tg3_flags = 0;
1479         u32 old_rx_mode = tp->rx_mode;
1480         u32 old_tx_mode = tp->tx_mode;
1481
1482         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1483
1484                 /* Convert 1000BaseX flow control bits to 1000BaseT
1485                  * bits before resolving flow control.
1486                  */
1487                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1488                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1489                                        ADVERTISE_PAUSE_ASYM);
1490                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1491
1492                         if (local_adv & ADVERTISE_1000XPAUSE)
1493                                 local_adv |= ADVERTISE_PAUSE_CAP;
1494                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1495                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1496                         if (remote_adv & LPA_1000XPAUSE)
1497                                 remote_adv |= LPA_PAUSE_CAP;
1498                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1499                                 remote_adv |= LPA_PAUSE_ASYM;
1500                 }
1501
1502                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1503                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1504                                 if (remote_adv & LPA_PAUSE_CAP)
1505                                         new_tg3_flags |=
1506                                                 (TG3_FLAG_RX_PAUSE |
1507                                                 TG3_FLAG_TX_PAUSE);
1508                                 else if (remote_adv & LPA_PAUSE_ASYM)
1509                                         new_tg3_flags |=
1510                                                 (TG3_FLAG_RX_PAUSE);
1511                         } else {
1512                                 if (remote_adv & LPA_PAUSE_CAP)
1513                                         new_tg3_flags |=
1514                                                 (TG3_FLAG_RX_PAUSE |
1515                                                 TG3_FLAG_TX_PAUSE);
1516                         }
1517                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1518                         if ((remote_adv & LPA_PAUSE_CAP) &&
1519                         (remote_adv & LPA_PAUSE_ASYM))
1520                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1521                 }
1522
1523                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1524                 tp->tg3_flags |= new_tg3_flags;
1525         } else {
1526                 new_tg3_flags = tp->tg3_flags;
1527         }
1528
1529         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1530                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1531         else
1532                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1533
1534         if (old_rx_mode != tp->rx_mode) {
1535                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1536         }
1537
1538         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1539                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1540         else
1541                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1542
1543         if (old_tx_mode != tp->tx_mode) {
1544                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1545         }
1546 }
1547
1548 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1549 {
1550         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1551         case MII_TG3_AUX_STAT_10HALF:
1552                 *speed = SPEED_10;
1553                 *duplex = DUPLEX_HALF;
1554                 break;
1555
1556         case MII_TG3_AUX_STAT_10FULL:
1557                 *speed = SPEED_10;
1558                 *duplex = DUPLEX_FULL;
1559                 break;
1560
1561         case MII_TG3_AUX_STAT_100HALF:
1562                 *speed = SPEED_100;
1563                 *duplex = DUPLEX_HALF;
1564                 break;
1565
1566         case MII_TG3_AUX_STAT_100FULL:
1567                 *speed = SPEED_100;
1568                 *duplex = DUPLEX_FULL;
1569                 break;
1570
1571         case MII_TG3_AUX_STAT_1000HALF:
1572                 *speed = SPEED_1000;
1573                 *duplex = DUPLEX_HALF;
1574                 break;
1575
1576         case MII_TG3_AUX_STAT_1000FULL:
1577                 *speed = SPEED_1000;
1578                 *duplex = DUPLEX_FULL;
1579                 break;
1580
1581         default:
1582                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1583                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1584                                  SPEED_10;
1585                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1586                                   DUPLEX_HALF;
1587                         break;
1588                 }
1589                 *speed = SPEED_INVALID;
1590                 *duplex = DUPLEX_INVALID;
1591                 break;
1592         };
1593 }
1594
1595 static void tg3_phy_copper_begin(struct tg3 *tp)
1596 {
1597         u32 new_adv;
1598         int i;
1599
1600         if (tp->link_config.phy_is_low_power) {
1601                 /* Entering low power mode.  Disable gigabit and
1602                  * 100baseT advertisements.
1603                  */
1604                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1605
1606                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1607                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1608                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1609                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1610
1611                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1612         } else if (tp->link_config.speed == SPEED_INVALID) {
1613                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1614                         tp->link_config.advertising &=
1615                                 ~(ADVERTISED_1000baseT_Half |
1616                                   ADVERTISED_1000baseT_Full);
1617
1618                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1619                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1620                         new_adv |= ADVERTISE_10HALF;
1621                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1622                         new_adv |= ADVERTISE_10FULL;
1623                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1624                         new_adv |= ADVERTISE_100HALF;
1625                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1626                         new_adv |= ADVERTISE_100FULL;
1627                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1628
1629                 if (tp->link_config.advertising &
1630                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1631                         new_adv = 0;
1632                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1633                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1634                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1635                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1636                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1637                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1638                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1639                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1640                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1641                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1642                 } else {
1643                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1644                 }
1645         } else {
1646                 /* Asking for a specific link mode. */
1647                 if (tp->link_config.speed == SPEED_1000) {
1648                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1649                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1650
1651                         if (tp->link_config.duplex == DUPLEX_FULL)
1652                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1653                         else
1654                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1655                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1656                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1657                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1658                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1659                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1660                 } else {
1661                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1662
1663                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1664                         if (tp->link_config.speed == SPEED_100) {
1665                                 if (tp->link_config.duplex == DUPLEX_FULL)
1666                                         new_adv |= ADVERTISE_100FULL;
1667                                 else
1668                                         new_adv |= ADVERTISE_100HALF;
1669                         } else {
1670                                 if (tp->link_config.duplex == DUPLEX_FULL)
1671                                         new_adv |= ADVERTISE_10FULL;
1672                                 else
1673                                         new_adv |= ADVERTISE_10HALF;
1674                         }
1675                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1676                 }
1677         }
1678
1679         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1680             tp->link_config.speed != SPEED_INVALID) {
1681                 u32 bmcr, orig_bmcr;
1682
1683                 tp->link_config.active_speed = tp->link_config.speed;
1684                 tp->link_config.active_duplex = tp->link_config.duplex;
1685
1686                 bmcr = 0;
1687                 switch (tp->link_config.speed) {
1688                 default:
1689                 case SPEED_10:
1690                         break;
1691
1692                 case SPEED_100:
1693                         bmcr |= BMCR_SPEED100;
1694                         break;
1695
1696                 case SPEED_1000:
1697                         bmcr |= TG3_BMCR_SPEED1000;
1698                         break;
1699                 };
1700
1701                 if (tp->link_config.duplex == DUPLEX_FULL)
1702                         bmcr |= BMCR_FULLDPLX;
1703
1704                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1705                     (bmcr != orig_bmcr)) {
1706                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1707                         for (i = 0; i < 1500; i++) {
1708                                 u32 tmp;
1709
1710                                 udelay(10);
1711                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1712                                     tg3_readphy(tp, MII_BMSR, &tmp))
1713                                         continue;
1714                                 if (!(tmp & BMSR_LSTATUS)) {
1715                                         udelay(40);
1716                                         break;
1717                                 }
1718                         }
1719                         tg3_writephy(tp, MII_BMCR, bmcr);
1720                         udelay(40);
1721                 }
1722         } else {
1723                 tg3_writephy(tp, MII_BMCR,
1724                              BMCR_ANENABLE | BMCR_ANRESTART);
1725         }
1726 }
1727
1728 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1729 {
1730         int err;
1731
1732         /* Turn off tap power management. */
1733         /* Set Extended packet length bit */
1734         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1735
1736         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1737         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1738
1739         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1740         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1741
1742         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1743         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1744
1745         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1746         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1747
1748         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1749         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1750
1751         udelay(40);
1752
1753         return err;
1754 }
1755
1756 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1757 {
1758         u32 adv_reg, all_mask = 0;
1759
1760         if (mask & ADVERTISED_10baseT_Half)
1761                 all_mask |= ADVERTISE_10HALF;
1762         if (mask & ADVERTISED_10baseT_Full)
1763                 all_mask |= ADVERTISE_10FULL;
1764         if (mask & ADVERTISED_100baseT_Half)
1765                 all_mask |= ADVERTISE_100HALF;
1766         if (mask & ADVERTISED_100baseT_Full)
1767                 all_mask |= ADVERTISE_100FULL;
1768
1769         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1770                 return 0;
1771
1772         if ((adv_reg & all_mask) != all_mask)
1773                 return 0;
1774         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1775                 u32 tg3_ctrl;
1776
1777                 all_mask = 0;
1778                 if (mask & ADVERTISED_1000baseT_Half)
1779                         all_mask |= ADVERTISE_1000HALF;
1780                 if (mask & ADVERTISED_1000baseT_Full)
1781                         all_mask |= ADVERTISE_1000FULL;
1782
1783                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1784                         return 0;
1785
1786                 if ((tg3_ctrl & all_mask) != all_mask)
1787                         return 0;
1788         }
1789         return 1;
1790 }
1791
1792 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1793 {
1794         int current_link_up;
1795         u32 bmsr, dummy;
1796         u16 current_speed;
1797         u8 current_duplex;
1798         int i, err;
1799
1800         tw32(MAC_EVENT, 0);
1801
1802         tw32_f(MAC_STATUS,
1803              (MAC_STATUS_SYNC_CHANGED |
1804               MAC_STATUS_CFG_CHANGED |
1805               MAC_STATUS_MI_COMPLETION |
1806               MAC_STATUS_LNKSTATE_CHANGED));
1807         udelay(40);
1808
1809         tp->mi_mode = MAC_MI_MODE_BASE;
1810         tw32_f(MAC_MI_MODE, tp->mi_mode);
1811         udelay(80);
1812
1813         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1814
1815         /* Some third-party PHYs need to be reset on link going
1816          * down.
1817          */
1818         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1819              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1820              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1821             netif_carrier_ok(tp->dev)) {
1822                 tg3_readphy(tp, MII_BMSR, &bmsr);
1823                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1824                     !(bmsr & BMSR_LSTATUS))
1825                         force_reset = 1;
1826         }
1827         if (force_reset)
1828                 tg3_phy_reset(tp);
1829
1830         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1831                 tg3_readphy(tp, MII_BMSR, &bmsr);
1832                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1833                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1834                         bmsr = 0;
1835
1836                 if (!(bmsr & BMSR_LSTATUS)) {
1837                         err = tg3_init_5401phy_dsp(tp);
1838                         if (err)
1839                                 return err;
1840
1841                         tg3_readphy(tp, MII_BMSR, &bmsr);
1842                         for (i = 0; i < 1000; i++) {
1843                                 udelay(10);
1844                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1845                                     (bmsr & BMSR_LSTATUS)) {
1846                                         udelay(40);
1847                                         break;
1848                                 }
1849                         }
1850
1851                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1852                             !(bmsr & BMSR_LSTATUS) &&
1853                             tp->link_config.active_speed == SPEED_1000) {
1854                                 err = tg3_phy_reset(tp);
1855                                 if (!err)
1856                                         err = tg3_init_5401phy_dsp(tp);
1857                                 if (err)
1858                                         return err;
1859                         }
1860                 }
1861         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1862                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1863                 /* 5701 {A0,B0} CRC bug workaround */
1864                 tg3_writephy(tp, 0x15, 0x0a75);
1865                 tg3_writephy(tp, 0x1c, 0x8c68);
1866                 tg3_writephy(tp, 0x1c, 0x8d68);
1867                 tg3_writephy(tp, 0x1c, 0x8c68);
1868         }
1869
1870         /* Clear pending interrupts... */
1871         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1872         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1873
1874         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1875                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1876         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1877                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1878
1879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1881                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1882                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1884                 else
1885                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1886         }
1887
1888         current_link_up = 0;
1889         current_speed = SPEED_INVALID;
1890         current_duplex = DUPLEX_INVALID;
1891
1892         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1893                 u32 val;
1894
1895                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1896                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1897                 if (!(val & (1 << 10))) {
1898                         val |= (1 << 10);
1899                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1900                         goto relink;
1901                 }
1902         }
1903
1904         bmsr = 0;
1905         for (i = 0; i < 100; i++) {
1906                 tg3_readphy(tp, MII_BMSR, &bmsr);
1907                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1908                     (bmsr & BMSR_LSTATUS))
1909                         break;
1910                 udelay(40);
1911         }
1912
1913         if (bmsr & BMSR_LSTATUS) {
1914                 u32 aux_stat, bmcr;
1915
1916                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1917                 for (i = 0; i < 2000; i++) {
1918                         udelay(10);
1919                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1920                             aux_stat)
1921                                 break;
1922                 }
1923
1924                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1925                                              &current_speed,
1926                                              &current_duplex);
1927
1928                 bmcr = 0;
1929                 for (i = 0; i < 200; i++) {
1930                         tg3_readphy(tp, MII_BMCR, &bmcr);
1931                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1932                                 continue;
1933                         if (bmcr && bmcr != 0x7fff)
1934                                 break;
1935                         udelay(10);
1936                 }
1937
1938                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1939                         if (bmcr & BMCR_ANENABLE) {
1940                                 current_link_up = 1;
1941
1942                                 /* Force autoneg restart if we are exiting
1943                                  * low power mode.
1944                                  */
1945                                 if (!tg3_copper_is_advertising_all(tp,
1946                                                 tp->link_config.advertising))
1947                                         current_link_up = 0;
1948                         } else {
1949                                 current_link_up = 0;
1950                         }
1951                 } else {
1952                         if (!(bmcr & BMCR_ANENABLE) &&
1953                             tp->link_config.speed == current_speed &&
1954                             tp->link_config.duplex == current_duplex) {
1955                                 current_link_up = 1;
1956                         } else {
1957                                 current_link_up = 0;
1958                         }
1959                 }
1960
1961                 tp->link_config.active_speed = current_speed;
1962                 tp->link_config.active_duplex = current_duplex;
1963         }
1964
1965         if (current_link_up == 1 &&
1966             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1967             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1968                 u32 local_adv, remote_adv;
1969
1970                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1971                         local_adv = 0;
1972                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1973
1974                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1975                         remote_adv = 0;
1976
1977                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1978
1979                 /* If we are not advertising full pause capability,
1980                  * something is wrong.  Bring the link down and reconfigure.
1981                  */
1982                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1983                         current_link_up = 0;
1984                 } else {
1985                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1986                 }
1987         }
1988 relink:
1989         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1990                 u32 tmp;
1991
1992                 tg3_phy_copper_begin(tp);
1993
1994                 tg3_readphy(tp, MII_BMSR, &tmp);
1995                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1996                     (tmp & BMSR_LSTATUS))
1997                         current_link_up = 1;
1998         }
1999
2000         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2001         if (current_link_up == 1) {
2002                 if (tp->link_config.active_speed == SPEED_100 ||
2003                     tp->link_config.active_speed == SPEED_10)
2004                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2005                 else
2006                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2007         } else
2008                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2009
2010         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2011         if (tp->link_config.active_duplex == DUPLEX_HALF)
2012                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2013
2014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2015                 if (current_link_up == 1 &&
2016                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2017                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2018                 else
2019                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2020         }
2021
2022         /* ??? Without this setting Netgear GA302T PHY does not
2023          * ??? send/receive packets...
2024          */
2025         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2026             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2027                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2028                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2029                 udelay(80);
2030         }
2031
2032         tw32_f(MAC_MODE, tp->mac_mode);
2033         udelay(40);
2034
2035         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2036                 /* Polled via timer. */
2037                 tw32_f(MAC_EVENT, 0);
2038         } else {
2039                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2040         }
2041         udelay(40);
2042
2043         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2044             current_link_up == 1 &&
2045             tp->link_config.active_speed == SPEED_1000 &&
2046             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2047              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2048                 udelay(120);
2049                 tw32_f(MAC_STATUS,
2050                      (MAC_STATUS_SYNC_CHANGED |
2051                       MAC_STATUS_CFG_CHANGED));
2052                 udelay(40);
2053                 tg3_write_mem(tp,
2054                               NIC_SRAM_FIRMWARE_MBOX,
2055                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2056         }
2057
2058         if (current_link_up != netif_carrier_ok(tp->dev)) {
2059                 if (current_link_up)
2060                         netif_carrier_on(tp->dev);
2061                 else
2062                         netif_carrier_off(tp->dev);
2063                 tg3_link_report(tp);
2064         }
2065
2066         return 0;
2067 }
2068
2069 struct tg3_fiber_aneginfo {
2070         int state;
2071 #define ANEG_STATE_UNKNOWN              0
2072 #define ANEG_STATE_AN_ENABLE            1
2073 #define ANEG_STATE_RESTART_INIT         2
2074 #define ANEG_STATE_RESTART              3
2075 #define ANEG_STATE_DISABLE_LINK_OK      4
2076 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2077 #define ANEG_STATE_ABILITY_DETECT       6
2078 #define ANEG_STATE_ACK_DETECT_INIT      7
2079 #define ANEG_STATE_ACK_DETECT           8
2080 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2081 #define ANEG_STATE_COMPLETE_ACK         10
2082 #define ANEG_STATE_IDLE_DETECT_INIT     11
2083 #define ANEG_STATE_IDLE_DETECT          12
2084 #define ANEG_STATE_LINK_OK              13
2085 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2086 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2087
2088         u32 flags;
2089 #define MR_AN_ENABLE            0x00000001
2090 #define MR_RESTART_AN           0x00000002
2091 #define MR_AN_COMPLETE          0x00000004
2092 #define MR_PAGE_RX              0x00000008
2093 #define MR_NP_LOADED            0x00000010
2094 #define MR_TOGGLE_TX            0x00000020
2095 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2096 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2097 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2098 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2099 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2100 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2101 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2102 #define MR_TOGGLE_RX            0x00002000
2103 #define MR_NP_RX                0x00004000
2104
2105 #define MR_LINK_OK              0x80000000
2106
2107         unsigned long link_time, cur_time;
2108
2109         u32 ability_match_cfg;
2110         int ability_match_count;
2111
2112         char ability_match, idle_match, ack_match;
2113
2114         u32 txconfig, rxconfig;
2115 #define ANEG_CFG_NP             0x00000080
2116 #define ANEG_CFG_ACK            0x00000040
2117 #define ANEG_CFG_RF2            0x00000020
2118 #define ANEG_CFG_RF1            0x00000010
2119 #define ANEG_CFG_PS2            0x00000001
2120 #define ANEG_CFG_PS1            0x00008000
2121 #define ANEG_CFG_HD             0x00004000
2122 #define ANEG_CFG_FD             0x00002000
2123 #define ANEG_CFG_INVAL          0x00001f06
2124
2125 };
2126 #define ANEG_OK         0
2127 #define ANEG_DONE       1
2128 #define ANEG_TIMER_ENAB 2
2129 #define ANEG_FAILED     -1
2130
2131 #define ANEG_STATE_SETTLE_TIME  10000
2132
2133 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2134                                    struct tg3_fiber_aneginfo *ap)
2135 {
2136         unsigned long delta;
2137         u32 rx_cfg_reg;
2138         int ret;
2139
2140         if (ap->state == ANEG_STATE_UNKNOWN) {
2141                 ap->rxconfig = 0;
2142                 ap->link_time = 0;
2143                 ap->cur_time = 0;
2144                 ap->ability_match_cfg = 0;
2145                 ap->ability_match_count = 0;
2146                 ap->ability_match = 0;
2147                 ap->idle_match = 0;
2148                 ap->ack_match = 0;
2149         }
2150         ap->cur_time++;
2151
2152         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2153                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2154
2155                 if (rx_cfg_reg != ap->ability_match_cfg) {
2156                         ap->ability_match_cfg = rx_cfg_reg;
2157                         ap->ability_match = 0;
2158                         ap->ability_match_count = 0;
2159                 } else {
2160                         if (++ap->ability_match_count > 1) {
2161                                 ap->ability_match = 1;
2162                                 ap->ability_match_cfg = rx_cfg_reg;
2163                         }
2164                 }
2165                 if (rx_cfg_reg & ANEG_CFG_ACK)
2166                         ap->ack_match = 1;
2167                 else
2168                         ap->ack_match = 0;
2169
2170                 ap->idle_match = 0;
2171         } else {
2172                 ap->idle_match = 1;
2173                 ap->ability_match_cfg = 0;
2174                 ap->ability_match_count = 0;
2175                 ap->ability_match = 0;
2176                 ap->ack_match = 0;
2177
2178                 rx_cfg_reg = 0;
2179         }
2180
2181         ap->rxconfig = rx_cfg_reg;
2182         ret = ANEG_OK;
2183
2184         switch(ap->state) {
2185         case ANEG_STATE_UNKNOWN:
2186                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2187                         ap->state = ANEG_STATE_AN_ENABLE;
2188
2189                 /* fallthru */
2190         case ANEG_STATE_AN_ENABLE:
2191                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2192                 if (ap->flags & MR_AN_ENABLE) {
2193                         ap->link_time = 0;
2194                         ap->cur_time = 0;
2195                         ap->ability_match_cfg = 0;
2196                         ap->ability_match_count = 0;
2197                         ap->ability_match = 0;
2198                         ap->idle_match = 0;
2199                         ap->ack_match = 0;
2200
2201                         ap->state = ANEG_STATE_RESTART_INIT;
2202                 } else {
2203                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2204                 }
2205                 break;
2206
2207         case ANEG_STATE_RESTART_INIT:
2208                 ap->link_time = ap->cur_time;
2209                 ap->flags &= ~(MR_NP_LOADED);
2210                 ap->txconfig = 0;
2211                 tw32(MAC_TX_AUTO_NEG, 0);
2212                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2213                 tw32_f(MAC_MODE, tp->mac_mode);
2214                 udelay(40);
2215
2216                 ret = ANEG_TIMER_ENAB;
2217                 ap->state = ANEG_STATE_RESTART;
2218
2219                 /* fallthru */
2220         case ANEG_STATE_RESTART:
2221                 delta = ap->cur_time - ap->link_time;
2222                 if (delta > ANEG_STATE_SETTLE_TIME) {
2223                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2224                 } else {
2225                         ret = ANEG_TIMER_ENAB;
2226                 }
2227                 break;
2228
2229         case ANEG_STATE_DISABLE_LINK_OK:
2230                 ret = ANEG_DONE;
2231                 break;
2232
2233         case ANEG_STATE_ABILITY_DETECT_INIT:
2234                 ap->flags &= ~(MR_TOGGLE_TX);
2235                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2236                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2237                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2238                 tw32_f(MAC_MODE, tp->mac_mode);
2239                 udelay(40);
2240
2241                 ap->state = ANEG_STATE_ABILITY_DETECT;
2242                 break;
2243
2244         case ANEG_STATE_ABILITY_DETECT:
2245                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2246                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2247                 }
2248                 break;
2249
2250         case ANEG_STATE_ACK_DETECT_INIT:
2251                 ap->txconfig |= ANEG_CFG_ACK;
2252                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2253                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2254                 tw32_f(MAC_MODE, tp->mac_mode);
2255                 udelay(40);
2256
2257                 ap->state = ANEG_STATE_ACK_DETECT;
2258
2259                 /* fallthru */
2260         case ANEG_STATE_ACK_DETECT:
2261                 if (ap->ack_match != 0) {
2262                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2263                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2264                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2265                         } else {
2266                                 ap->state = ANEG_STATE_AN_ENABLE;
2267                         }
2268                 } else if (ap->ability_match != 0 &&
2269                            ap->rxconfig == 0) {
2270                         ap->state = ANEG_STATE_AN_ENABLE;
2271                 }
2272                 break;
2273
2274         case ANEG_STATE_COMPLETE_ACK_INIT:
2275                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2276                         ret = ANEG_FAILED;
2277                         break;
2278                 }
2279                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2280                                MR_LP_ADV_HALF_DUPLEX |
2281                                MR_LP_ADV_SYM_PAUSE |
2282                                MR_LP_ADV_ASYM_PAUSE |
2283                                MR_LP_ADV_REMOTE_FAULT1 |
2284                                MR_LP_ADV_REMOTE_FAULT2 |
2285                                MR_LP_ADV_NEXT_PAGE |
2286                                MR_TOGGLE_RX |
2287                                MR_NP_RX);
2288                 if (ap->rxconfig & ANEG_CFG_FD)
2289                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2290                 if (ap->rxconfig & ANEG_CFG_HD)
2291                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2292                 if (ap->rxconfig & ANEG_CFG_PS1)
2293                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2294                 if (ap->rxconfig & ANEG_CFG_PS2)
2295                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2296                 if (ap->rxconfig & ANEG_CFG_RF1)
2297                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2298                 if (ap->rxconfig & ANEG_CFG_RF2)
2299                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2300                 if (ap->rxconfig & ANEG_CFG_NP)
2301                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2302
2303                 ap->link_time = ap->cur_time;
2304
2305                 ap->flags ^= (MR_TOGGLE_TX);
2306                 if (ap->rxconfig & 0x0008)
2307                         ap->flags |= MR_TOGGLE_RX;
2308                 if (ap->rxconfig & ANEG_CFG_NP)
2309                         ap->flags |= MR_NP_RX;
2310                 ap->flags |= MR_PAGE_RX;
2311
2312                 ap->state = ANEG_STATE_COMPLETE_ACK;
2313                 ret = ANEG_TIMER_ENAB;
2314                 break;
2315
2316         case ANEG_STATE_COMPLETE_ACK:
2317                 if (ap->ability_match != 0 &&
2318                     ap->rxconfig == 0) {
2319                         ap->state = ANEG_STATE_AN_ENABLE;
2320                         break;
2321                 }
2322                 delta = ap->cur_time - ap->link_time;
2323                 if (delta > ANEG_STATE_SETTLE_TIME) {
2324                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2325                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2326                         } else {
2327                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2328                                     !(ap->flags & MR_NP_RX)) {
2329                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2330                                 } else {
2331                                         ret = ANEG_FAILED;
2332                                 }
2333                         }
2334                 }
2335                 break;
2336
2337         case ANEG_STATE_IDLE_DETECT_INIT:
2338                 ap->link_time = ap->cur_time;
2339                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2340                 tw32_f(MAC_MODE, tp->mac_mode);
2341                 udelay(40);
2342
2343                 ap->state = ANEG_STATE_IDLE_DETECT;
2344                 ret = ANEG_TIMER_ENAB;
2345                 break;
2346
2347         case ANEG_STATE_IDLE_DETECT:
2348                 if (ap->ability_match != 0 &&
2349                     ap->rxconfig == 0) {
2350                         ap->state = ANEG_STATE_AN_ENABLE;
2351                         break;
2352                 }
2353                 delta = ap->cur_time - ap->link_time;
2354                 if (delta > ANEG_STATE_SETTLE_TIME) {
2355                         /* XXX another gem from the Broadcom driver :( */
2356                         ap->state = ANEG_STATE_LINK_OK;
2357                 }
2358                 break;
2359
2360         case ANEG_STATE_LINK_OK:
2361                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2362                 ret = ANEG_DONE;
2363                 break;
2364
2365         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2366                 /* ??? unimplemented */
2367                 break;
2368
2369         case ANEG_STATE_NEXT_PAGE_WAIT:
2370                 /* ??? unimplemented */
2371                 break;
2372
2373         default:
2374                 ret = ANEG_FAILED;
2375                 break;
2376         };
2377
2378         return ret;
2379 }
2380
2381 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2382 {
2383         int res = 0;
2384         struct tg3_fiber_aneginfo aninfo;
2385         int status = ANEG_FAILED;
2386         unsigned int tick;
2387         u32 tmp;
2388
2389         tw32_f(MAC_TX_AUTO_NEG, 0);
2390
2391         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2392         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2393         udelay(40);
2394
2395         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2396         udelay(40);
2397
2398         memset(&aninfo, 0, sizeof(aninfo));
2399         aninfo.flags |= MR_AN_ENABLE;
2400         aninfo.state = ANEG_STATE_UNKNOWN;
2401         aninfo.cur_time = 0;
2402         tick = 0;
2403         while (++tick < 195000) {
2404                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2405                 if (status == ANEG_DONE || status == ANEG_FAILED)
2406                         break;
2407
2408                 udelay(1);
2409         }
2410
2411         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2412         tw32_f(MAC_MODE, tp->mac_mode);
2413         udelay(40);
2414
2415         *flags = aninfo.flags;
2416
2417         if (status == ANEG_DONE &&
2418             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2419                              MR_LP_ADV_FULL_DUPLEX)))
2420                 res = 1;
2421
2422         return res;
2423 }
2424
2425 static void tg3_init_bcm8002(struct tg3 *tp)
2426 {
2427         u32 mac_status = tr32(MAC_STATUS);
2428         int i;
2429
2430         /* Reset when initting first time or we have a link. */
2431         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2432             !(mac_status & MAC_STATUS_PCS_SYNCED))
2433                 return;
2434
2435         /* Set PLL lock range. */
2436         tg3_writephy(tp, 0x16, 0x8007);
2437
2438         /* SW reset */
2439         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2440
2441         /* Wait for reset to complete. */
2442         /* XXX schedule_timeout() ... */
2443         for (i = 0; i < 500; i++)
2444                 udelay(10);
2445
2446         /* Config mode; select PMA/Ch 1 regs. */
2447         tg3_writephy(tp, 0x10, 0x8411);
2448
2449         /* Enable auto-lock and comdet, select txclk for tx. */
2450         tg3_writephy(tp, 0x11, 0x0a10);
2451
2452         tg3_writephy(tp, 0x18, 0x00a0);
2453         tg3_writephy(tp, 0x16, 0x41ff);
2454
2455         /* Assert and deassert POR. */
2456         tg3_writephy(tp, 0x13, 0x0400);
2457         udelay(40);
2458         tg3_writephy(tp, 0x13, 0x0000);
2459
2460         tg3_writephy(tp, 0x11, 0x0a50);
2461         udelay(40);
2462         tg3_writephy(tp, 0x11, 0x0a10);
2463
2464         /* Wait for signal to stabilize */
2465         /* XXX schedule_timeout() ... */
2466         for (i = 0; i < 15000; i++)
2467                 udelay(10);
2468
2469         /* Deselect the channel register so we can read the PHYID
2470          * later.
2471          */
2472         tg3_writephy(tp, 0x10, 0x8011);
2473 }
2474
2475 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2476 {
2477         u32 sg_dig_ctrl, sg_dig_status;
2478         u32 serdes_cfg, expected_sg_dig_ctrl;
2479         int workaround, port_a;
2480         int current_link_up;
2481
2482         serdes_cfg = 0;
2483         expected_sg_dig_ctrl = 0;
2484         workaround = 0;
2485         port_a = 1;
2486         current_link_up = 0;
2487
2488         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2489             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2490                 workaround = 1;
2491                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2492                         port_a = 0;
2493
2494                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2495                 /* preserve bits 20-23 for voltage regulator */
2496                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2497         }
2498
2499         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2500
2501         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2502                 if (sg_dig_ctrl & (1 << 31)) {
2503                         if (workaround) {
2504                                 u32 val = serdes_cfg;
2505
2506                                 if (port_a)
2507                                         val |= 0xc010000;
2508                                 else
2509                                         val |= 0x4010000;
2510                                 tw32_f(MAC_SERDES_CFG, val);
2511                         }
2512                         tw32_f(SG_DIG_CTRL, 0x01388400);
2513                 }
2514                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2515                         tg3_setup_flow_control(tp, 0, 0);
2516                         current_link_up = 1;
2517                 }
2518                 goto out;
2519         }
2520
2521         /* Want auto-negotiation.  */
2522         expected_sg_dig_ctrl = 0x81388400;
2523
2524         /* Pause capability */
2525         expected_sg_dig_ctrl |= (1 << 11);
2526
2527         /* Asymettric pause */
2528         expected_sg_dig_ctrl |= (1 << 12);
2529
2530         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2531                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2532                     tp->serdes_counter &&
2533                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2534                                     MAC_STATUS_RCVD_CFG)) ==
2535                      MAC_STATUS_PCS_SYNCED)) {
2536                         tp->serdes_counter--;
2537                         current_link_up = 1;
2538                         goto out;
2539                 }
2540 restart_autoneg:
2541                 if (workaround)
2542                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2543                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2544                 udelay(5);
2545                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2546
2547                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2548                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2549         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2550                                  MAC_STATUS_SIGNAL_DET)) {
2551                 sg_dig_status = tr32(SG_DIG_STATUS);
2552                 mac_status = tr32(MAC_STATUS);
2553
2554                 if ((sg_dig_status & (1 << 1)) &&
2555                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2556                         u32 local_adv, remote_adv;
2557
2558                         local_adv = ADVERTISE_PAUSE_CAP;
2559                         remote_adv = 0;
2560                         if (sg_dig_status & (1 << 19))
2561                                 remote_adv |= LPA_PAUSE_CAP;
2562                         if (sg_dig_status & (1 << 20))
2563                                 remote_adv |= LPA_PAUSE_ASYM;
2564
2565                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2566                         current_link_up = 1;
2567                         tp->serdes_counter = 0;
2568                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2569                 } else if (!(sg_dig_status & (1 << 1))) {
2570                         if (tp->serdes_counter)
2571                                 tp->serdes_counter--;
2572                         else {
2573                                 if (workaround) {
2574                                         u32 val = serdes_cfg;
2575
2576                                         if (port_a)
2577                                                 val |= 0xc010000;
2578                                         else
2579                                                 val |= 0x4010000;
2580
2581                                         tw32_f(MAC_SERDES_CFG, val);
2582                                 }
2583
2584                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2585                                 udelay(40);
2586
2587                                 /* Link parallel detection - link is up */
2588                                 /* only if we have PCS_SYNC and not */
2589                                 /* receiving config code words */
2590                                 mac_status = tr32(MAC_STATUS);
2591                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2592                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2593                                         tg3_setup_flow_control(tp, 0, 0);
2594                                         current_link_up = 1;
2595                                         tp->tg3_flags2 |=
2596                                                 TG3_FLG2_PARALLEL_DETECT;
2597                                         tp->serdes_counter =
2598                                                 SERDES_PARALLEL_DET_TIMEOUT;
2599                                 } else
2600                                         goto restart_autoneg;
2601                         }
2602                 }
2603         } else {
2604                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2605                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2606         }
2607
2608 out:
2609         return current_link_up;
2610 }
2611
2612 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2613 {
2614         int current_link_up = 0;
2615
2616         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2617                 goto out;
2618
2619         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2620                 u32 flags;
2621                 int i;
2622
2623                 if (fiber_autoneg(tp, &flags)) {
2624                         u32 local_adv, remote_adv;
2625
2626                         local_adv = ADVERTISE_PAUSE_CAP;
2627                         remote_adv = 0;
2628                         if (flags & MR_LP_ADV_SYM_PAUSE)
2629                                 remote_adv |= LPA_PAUSE_CAP;
2630                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2631                                 remote_adv |= LPA_PAUSE_ASYM;
2632
2633                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2634
2635                         current_link_up = 1;
2636                 }
2637                 for (i = 0; i < 30; i++) {
2638                         udelay(20);
2639                         tw32_f(MAC_STATUS,
2640                                (MAC_STATUS_SYNC_CHANGED |
2641                                 MAC_STATUS_CFG_CHANGED));
2642                         udelay(40);
2643                         if ((tr32(MAC_STATUS) &
2644                              (MAC_STATUS_SYNC_CHANGED |
2645                               MAC_STATUS_CFG_CHANGED)) == 0)
2646                                 break;
2647                 }
2648
2649                 mac_status = tr32(MAC_STATUS);
2650                 if (current_link_up == 0 &&
2651                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2652                     !(mac_status & MAC_STATUS_RCVD_CFG))
2653                         current_link_up = 1;
2654         } else {
2655                 /* Forcing 1000FD link up. */
2656                 current_link_up = 1;
2657
2658                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2659                 udelay(40);
2660
2661                 tw32_f(MAC_MODE, tp->mac_mode);
2662                 udelay(40);
2663         }
2664
2665 out:
2666         return current_link_up;
2667 }
2668
2669 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2670 {
2671         u32 orig_pause_cfg;
2672         u16 orig_active_speed;
2673         u8 orig_active_duplex;
2674         u32 mac_status;
2675         int current_link_up;
2676         int i;
2677
2678         orig_pause_cfg =
2679                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2680                                   TG3_FLAG_TX_PAUSE));
2681         orig_active_speed = tp->link_config.active_speed;
2682         orig_active_duplex = tp->link_config.active_duplex;
2683
2684         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2685             netif_carrier_ok(tp->dev) &&
2686             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2687                 mac_status = tr32(MAC_STATUS);
2688                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2689                                MAC_STATUS_SIGNAL_DET |
2690                                MAC_STATUS_CFG_CHANGED |
2691                                MAC_STATUS_RCVD_CFG);
2692                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2693                                    MAC_STATUS_SIGNAL_DET)) {
2694                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2695                                             MAC_STATUS_CFG_CHANGED));
2696                         return 0;
2697                 }
2698         }
2699
2700         tw32_f(MAC_TX_AUTO_NEG, 0);
2701
2702         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2703         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2704         tw32_f(MAC_MODE, tp->mac_mode);
2705         udelay(40);
2706
2707         if (tp->phy_id == PHY_ID_BCM8002)
2708                 tg3_init_bcm8002(tp);
2709
2710         /* Enable link change event even when serdes polling.  */
2711         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2712         udelay(40);
2713
2714         current_link_up = 0;
2715         mac_status = tr32(MAC_STATUS);
2716
2717         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2718                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2719         else
2720                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2721
2722         tp->hw_status->status =
2723                 (SD_STATUS_UPDATED |
2724                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2725
2726         for (i = 0; i < 100; i++) {
2727                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2728                                     MAC_STATUS_CFG_CHANGED));
2729                 udelay(5);
2730                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2731                                          MAC_STATUS_CFG_CHANGED |
2732                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2733                         break;
2734         }
2735
2736         mac_status = tr32(MAC_STATUS);
2737         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2738                 current_link_up = 0;
2739                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2740                     tp->serdes_counter == 0) {
2741                         tw32_f(MAC_MODE, (tp->mac_mode |
2742                                           MAC_MODE_SEND_CONFIGS));
2743                         udelay(1);
2744                         tw32_f(MAC_MODE, tp->mac_mode);
2745                 }
2746         }
2747
2748         if (current_link_up == 1) {
2749                 tp->link_config.active_speed = SPEED_1000;
2750                 tp->link_config.active_duplex = DUPLEX_FULL;
2751                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2752                                     LED_CTRL_LNKLED_OVERRIDE |
2753                                     LED_CTRL_1000MBPS_ON));
2754         } else {
2755                 tp->link_config.active_speed = SPEED_INVALID;
2756                 tp->link_config.active_duplex = DUPLEX_INVALID;
2757                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2758                                     LED_CTRL_LNKLED_OVERRIDE |
2759                                     LED_CTRL_TRAFFIC_OVERRIDE));
2760         }
2761
2762         if (current_link_up != netif_carrier_ok(tp->dev)) {
2763                 if (current_link_up)
2764                         netif_carrier_on(tp->dev);
2765                 else
2766                         netif_carrier_off(tp->dev);
2767                 tg3_link_report(tp);
2768         } else {
2769                 u32 now_pause_cfg =
2770                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2771                                          TG3_FLAG_TX_PAUSE);
2772                 if (orig_pause_cfg != now_pause_cfg ||
2773                     orig_active_speed != tp->link_config.active_speed ||
2774                     orig_active_duplex != tp->link_config.active_duplex)
2775                         tg3_link_report(tp);
2776         }
2777
2778         return 0;
2779 }
2780
2781 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2782 {
2783         int current_link_up, err = 0;
2784         u32 bmsr, bmcr;
2785         u16 current_speed;
2786         u8 current_duplex;
2787
2788         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2789         tw32_f(MAC_MODE, tp->mac_mode);
2790         udelay(40);
2791
2792         tw32(MAC_EVENT, 0);
2793
2794         tw32_f(MAC_STATUS,
2795              (MAC_STATUS_SYNC_CHANGED |
2796               MAC_STATUS_CFG_CHANGED |
2797               MAC_STATUS_MI_COMPLETION |
2798               MAC_STATUS_LNKSTATE_CHANGED));
2799         udelay(40);
2800
2801         if (force_reset)
2802                 tg3_phy_reset(tp);
2803
2804         current_link_up = 0;
2805         current_speed = SPEED_INVALID;
2806         current_duplex = DUPLEX_INVALID;
2807
2808         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2809         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2811                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2812                         bmsr |= BMSR_LSTATUS;
2813                 else
2814                         bmsr &= ~BMSR_LSTATUS;
2815         }
2816
2817         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2818
2819         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2820             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2821                 /* do nothing, just check for link up at the end */
2822         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2823                 u32 adv, new_adv;
2824
2825                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2826                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2827                                   ADVERTISE_1000XPAUSE |
2828                                   ADVERTISE_1000XPSE_ASYM |
2829                                   ADVERTISE_SLCT);
2830
2831                 /* Always advertise symmetric PAUSE just like copper */
2832                 new_adv |= ADVERTISE_1000XPAUSE;
2833
2834                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2835                         new_adv |= ADVERTISE_1000XHALF;
2836                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2837                         new_adv |= ADVERTISE_1000XFULL;
2838
2839                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2840                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2841                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2842                         tg3_writephy(tp, MII_BMCR, bmcr);
2843
2844                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2846                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2847
2848                         return err;
2849                 }
2850         } else {
2851                 u32 new_bmcr;
2852
2853                 bmcr &= ~BMCR_SPEED1000;
2854                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2855
2856                 if (tp->link_config.duplex == DUPLEX_FULL)
2857                         new_bmcr |= BMCR_FULLDPLX;
2858
2859                 if (new_bmcr != bmcr) {
2860                         /* BMCR_SPEED1000 is a reserved bit that needs
2861                          * to be set on write.
2862                          */
2863                         new_bmcr |= BMCR_SPEED1000;
2864
2865                         /* Force a linkdown */
2866                         if (netif_carrier_ok(tp->dev)) {
2867                                 u32 adv;
2868
2869                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2870                                 adv &= ~(ADVERTISE_1000XFULL |
2871                                          ADVERTISE_1000XHALF |
2872                                          ADVERTISE_SLCT);
2873                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2874                                 tg3_writephy(tp, MII_BMCR, bmcr |
2875                                                            BMCR_ANRESTART |
2876                                                            BMCR_ANENABLE);
2877                                 udelay(10);
2878                                 netif_carrier_off(tp->dev);
2879                         }
2880                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2881                         bmcr = new_bmcr;
2882                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2883                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2884                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2885                             ASIC_REV_5714) {
2886                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2887                                         bmsr |= BMSR_LSTATUS;
2888                                 else
2889                                         bmsr &= ~BMSR_LSTATUS;
2890                         }
2891                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2892                 }
2893         }
2894
2895         if (bmsr & BMSR_LSTATUS) {
2896                 current_speed = SPEED_1000;
2897                 current_link_up = 1;
2898                 if (bmcr & BMCR_FULLDPLX)
2899                         current_duplex = DUPLEX_FULL;
2900                 else
2901                         current_duplex = DUPLEX_HALF;
2902
2903                 if (bmcr & BMCR_ANENABLE) {
2904                         u32 local_adv, remote_adv, common;
2905
2906                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2907                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2908                         common = local_adv & remote_adv;
2909                         if (common & (ADVERTISE_1000XHALF |
2910                                       ADVERTISE_1000XFULL)) {
2911                                 if (common & ADVERTISE_1000XFULL)
2912                                         current_duplex = DUPLEX_FULL;
2913                                 else
2914                                         current_duplex = DUPLEX_HALF;
2915
2916                                 tg3_setup_flow_control(tp, local_adv,
2917                                                        remote_adv);
2918                         }
2919                         else
2920                                 current_link_up = 0;
2921                 }
2922         }
2923
2924         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2925         if (tp->link_config.active_duplex == DUPLEX_HALF)
2926                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2927
2928         tw32_f(MAC_MODE, tp->mac_mode);
2929         udelay(40);
2930
2931         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2932
2933         tp->link_config.active_speed = current_speed;
2934         tp->link_config.active_duplex = current_duplex;
2935
2936         if (current_link_up != netif_carrier_ok(tp->dev)) {
2937                 if (current_link_up)
2938                         netif_carrier_on(tp->dev);
2939                 else {
2940                         netif_carrier_off(tp->dev);
2941                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2942                 }
2943                 tg3_link_report(tp);
2944         }
2945         return err;
2946 }
2947
2948 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2949 {
2950         if (tp->serdes_counter) {
2951                 /* Give autoneg time to complete. */
2952                 tp->serdes_counter--;
2953                 return;
2954         }
2955         if (!netif_carrier_ok(tp->dev) &&
2956             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2957                 u32 bmcr;
2958
2959                 tg3_readphy(tp, MII_BMCR, &bmcr);
2960                 if (bmcr & BMCR_ANENABLE) {
2961                         u32 phy1, phy2;
2962
2963                         /* Select shadow register 0x1f */
2964                         tg3_writephy(tp, 0x1c, 0x7c00);
2965                         tg3_readphy(tp, 0x1c, &phy1);
2966
2967                         /* Select expansion interrupt status register */
2968                         tg3_writephy(tp, 0x17, 0x0f01);
2969                         tg3_readphy(tp, 0x15, &phy2);
2970                         tg3_readphy(tp, 0x15, &phy2);
2971
2972                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2973                                 /* We have signal detect and not receiving
2974                                  * config code words, link is up by parallel
2975                                  * detection.
2976                                  */
2977
2978                                 bmcr &= ~BMCR_ANENABLE;
2979                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2980                                 tg3_writephy(tp, MII_BMCR, bmcr);
2981                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2982                         }
2983                 }
2984         }
2985         else if (netif_carrier_ok(tp->dev) &&
2986                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2987                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2988                 u32 phy2;
2989
2990                 /* Select expansion interrupt status register */
2991                 tg3_writephy(tp, 0x17, 0x0f01);
2992                 tg3_readphy(tp, 0x15, &phy2);
2993                 if (phy2 & 0x20) {
2994                         u32 bmcr;
2995
2996                         /* Config code words received, turn on autoneg. */
2997                         tg3_readphy(tp, MII_BMCR, &bmcr);
2998                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2999
3000                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3001
3002                 }
3003         }
3004 }
3005
3006 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3007 {
3008         int err;
3009
3010         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3011                 err = tg3_setup_fiber_phy(tp, force_reset);
3012         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3013                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3014         } else {
3015                 err = tg3_setup_copper_phy(tp, force_reset);
3016         }
3017
3018         if (tp->link_config.active_speed == SPEED_1000 &&
3019             tp->link_config.active_duplex == DUPLEX_HALF)
3020                 tw32(MAC_TX_LENGTHS,
3021                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3022                       (6 << TX_LENGTHS_IPG_SHIFT) |
3023                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3024         else
3025                 tw32(MAC_TX_LENGTHS,
3026                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3027                       (6 << TX_LENGTHS_IPG_SHIFT) |
3028                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3029
3030         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3031                 if (netif_carrier_ok(tp->dev)) {
3032                         tw32(HOSTCC_STAT_COAL_TICKS,
3033                              tp->coal.stats_block_coalesce_usecs);
3034                 } else {
3035                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3036                 }
3037         }
3038
3039         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3040                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3041                 if (!netif_carrier_ok(tp->dev))
3042                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3043                               tp->pwrmgmt_thresh;
3044                 else
3045                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3046                 tw32(PCIE_PWR_MGMT_THRESH, val);
3047         }
3048
3049         return err;
3050 }
3051
3052 /* This is called whenever we suspect that the system chipset is re-
3053  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3054  * is bogus tx completions. We try to recover by setting the
3055  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3056  * in the workqueue.
3057  */
3058 static void tg3_tx_recover(struct tg3 *tp)
3059 {
3060         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3061                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3062
3063         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3064                "mapped I/O cycles to the network device, attempting to "
3065                "recover. Please report the problem to the driver maintainer "
3066                "and include system chipset information.\n", tp->dev->name);
3067
3068         spin_lock(&tp->lock);
3069         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3070         spin_unlock(&tp->lock);
3071 }
3072
3073 static inline u32 tg3_tx_avail(struct tg3 *tp)
3074 {
3075         smp_mb();
3076         return (tp->tx_pending -
3077                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3078 }
3079
3080 /* Tigon3 never reports partial packet sends.  So we do not
3081  * need special logic to handle SKBs that have not had all
3082  * of their frags sent yet, like SunGEM does.
3083  */
3084 static void tg3_tx(struct tg3 *tp)
3085 {
3086         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3087         u32 sw_idx = tp->tx_cons;
3088
3089         while (sw_idx != hw_idx) {
3090                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3091                 struct sk_buff *skb = ri->skb;
3092                 int i, tx_bug = 0;
3093
3094                 if (unlikely(skb == NULL)) {
3095                         tg3_tx_recover(tp);
3096                         return;
3097                 }
3098
3099                 pci_unmap_single(tp->pdev,
3100                                  pci_unmap_addr(ri, mapping),
3101                                  skb_headlen(skb),
3102                                  PCI_DMA_TODEVICE);
3103
3104                 ri->skb = NULL;
3105
3106                 sw_idx = NEXT_TX(sw_idx);
3107
3108                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3109                         ri = &tp->tx_buffers[sw_idx];
3110                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3111                                 tx_bug = 1;
3112
3113                         pci_unmap_page(tp->pdev,
3114                                        pci_unmap_addr(ri, mapping),
3115                                        skb_shinfo(skb)->frags[i].size,
3116                                        PCI_DMA_TODEVICE);
3117
3118                         sw_idx = NEXT_TX(sw_idx);
3119                 }
3120
3121                 dev_kfree_skb(skb);
3122
3123                 if (unlikely(tx_bug)) {
3124                         tg3_tx_recover(tp);
3125                         return;
3126                 }
3127         }
3128
3129         tp->tx_cons = sw_idx;
3130
3131         /* Need to make the tx_cons update visible to tg3_start_xmit()
3132          * before checking for netif_queue_stopped().  Without the
3133          * memory barrier, there is a small possibility that tg3_start_xmit()
3134          * will miss it and cause the queue to be stopped forever.
3135          */
3136         smp_mb();
3137
3138         if (unlikely(netif_queue_stopped(tp->dev) &&
3139                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3140                 netif_tx_lock(tp->dev);
3141                 if (netif_queue_stopped(tp->dev) &&
3142                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3143                         netif_wake_queue(tp->dev);
3144                 netif_tx_unlock(tp->dev);
3145         }
3146 }
3147
3148 /* Returns size of skb allocated or < 0 on error.
3149  *
3150  * We only need to fill in the address because the other members
3151  * of the RX descriptor are invariant, see tg3_init_rings.
3152  *
3153  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3154  * posting buffers we only dirty the first cache line of the RX
3155  * descriptor (containing the address).  Whereas for the RX status
3156  * buffers the cpu only reads the last cacheline of the RX descriptor
3157  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3158  */
3159 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3160                             int src_idx, u32 dest_idx_unmasked)
3161 {
3162         struct tg3_rx_buffer_desc *desc;
3163         struct ring_info *map, *src_map;
3164         struct sk_buff *skb;
3165         dma_addr_t mapping;
3166         int skb_size, dest_idx;
3167
3168         src_map = NULL;
3169         switch (opaque_key) {
3170         case RXD_OPAQUE_RING_STD:
3171                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3172                 desc = &tp->rx_std[dest_idx];
3173                 map = &tp->rx_std_buffers[dest_idx];
3174                 if (src_idx >= 0)
3175                         src_map = &tp->rx_std_buffers[src_idx];
3176                 skb_size = tp->rx_pkt_buf_sz;
3177                 break;
3178
3179         case RXD_OPAQUE_RING_JUMBO:
3180                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3181                 desc = &tp->rx_jumbo[dest_idx];
3182                 map = &tp->rx_jumbo_buffers[dest_idx];
3183                 if (src_idx >= 0)
3184                         src_map = &tp->rx_jumbo_buffers[src_idx];
3185                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3186                 break;
3187
3188         default:
3189                 return -EINVAL;
3190         };
3191
3192         /* Do not overwrite any of the map or rp information
3193          * until we are sure we can commit to a new buffer.
3194          *
3195          * Callers depend upon this behavior and assume that
3196          * we leave everything unchanged if we fail.
3197          */
3198         skb = netdev_alloc_skb(tp->dev, skb_size);
3199         if (skb == NULL)
3200                 return -ENOMEM;
3201
3202         skb_reserve(skb, tp->rx_offset);
3203
3204         mapping = pci_map_single(tp->pdev, skb->data,
3205                                  skb_size - tp->rx_offset,
3206                                  PCI_DMA_FROMDEVICE);
3207
3208         map->skb = skb;
3209         pci_unmap_addr_set(map, mapping, mapping);
3210
3211         if (src_map != NULL)
3212                 src_map->skb = NULL;
3213
3214         desc->addr_hi = ((u64)mapping >> 32);
3215         desc->addr_lo = ((u64)mapping & 0xffffffff);
3216
3217         return skb_size;
3218 }
3219
3220 /* We only need to move over in the address because the other
3221  * members of the RX descriptor are invariant.  See notes above
3222  * tg3_alloc_rx_skb for full details.
3223  */
3224 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3225                            int src_idx, u32 dest_idx_unmasked)
3226 {
3227         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3228         struct ring_info *src_map, *dest_map;
3229         int dest_idx;
3230
3231         switch (opaque_key) {
3232         case RXD_OPAQUE_RING_STD:
3233                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3234                 dest_desc = &tp->rx_std[dest_idx];
3235                 dest_map = &tp->rx_std_buffers[dest_idx];
3236                 src_desc = &tp->rx_std[src_idx];
3237                 src_map = &tp->rx_std_buffers[src_idx];
3238                 break;
3239
3240         case RXD_OPAQUE_RING_JUMBO:
3241                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3242                 dest_desc = &tp->rx_jumbo[dest_idx];
3243                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3244                 src_desc = &tp->rx_jumbo[src_idx];
3245                 src_map = &tp->rx_jumbo_buffers[src_idx];
3246                 break;
3247
3248         default:
3249                 return;
3250         };
3251
3252         dest_map->skb = src_map->skb;
3253         pci_unmap_addr_set(dest_map, mapping,
3254                            pci_unmap_addr(src_map, mapping));
3255         dest_desc->addr_hi = src_desc->addr_hi;
3256         dest_desc->addr_lo = src_desc->addr_lo;
3257
3258         src_map->skb = NULL;
3259 }
3260
3261 #if TG3_VLAN_TAG_USED
3262 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3263 {
3264         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3265 }
3266 #endif
3267
3268 /* The RX ring scheme is composed of multiple rings which post fresh
3269  * buffers to the chip, and one special ring the chip uses to report
3270  * status back to the host.
3271  *
3272  * The special ring reports the status of received packets to the
3273  * host.  The chip does not write into the original descriptor the
3274  * RX buffer was obtained from.  The chip simply takes the original
3275  * descriptor as provided by the host, updates the status and length
3276  * field, then writes this into the next status ring entry.
3277  *
3278  * Each ring the host uses to post buffers to the chip is described
3279  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3280  * it is first placed into the on-chip ram.  When the packet's length
3281  * is known, it walks down the TG3_BDINFO entries to select the ring.
3282  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3283  * which is within the range of the new packet's length is chosen.
3284  *
3285  * The "separate ring for rx status" scheme may sound queer, but it makes
3286  * sense from a cache coherency perspective.  If only the host writes
3287  * to the buffer post rings, and only the chip writes to the rx status
3288  * rings, then cache lines never move beyond shared-modified state.
3289  * If both the host and chip were to write into the same ring, cache line
3290  * eviction could occur since both entities want it in an exclusive state.
3291  */
3292 static int tg3_rx(struct tg3 *tp, int budget)
3293 {
3294         u32 work_mask, rx_std_posted = 0;
3295         u32 sw_idx = tp->rx_rcb_ptr;
3296         u16 hw_idx;
3297         int received;
3298
3299         hw_idx = tp->hw_status->idx[0].rx_producer;
3300         /*
3301          * We need to order the read of hw_idx and the read of
3302          * the opaque cookie.
3303          */
3304         rmb();
3305         work_mask = 0;
3306         received = 0;
3307         while (sw_idx != hw_idx && budget > 0) {
3308                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3309                 unsigned int len;
3310                 struct sk_buff *skb;
3311                 dma_addr_t dma_addr;
3312                 u32 opaque_key, desc_idx, *post_ptr;
3313
3314                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3315                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3316                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3317                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3318                                                   mapping);
3319                         skb = tp->rx_std_buffers[desc_idx].skb;
3320                         post_ptr = &tp->rx_std_ptr;
3321                         rx_std_posted++;
3322                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3323                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3324                                                   mapping);
3325                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3326                         post_ptr = &tp->rx_jumbo_ptr;
3327                 }
3328                 else {
3329                         goto next_pkt_nopost;
3330                 }
3331
3332                 work_mask |= opaque_key;
3333
3334                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3335                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3336                 drop_it:
3337                         tg3_recycle_rx(tp, opaque_key,
3338                                        desc_idx, *post_ptr);
3339                 drop_it_no_recycle:
3340                         /* Other statistics kept track of by card. */
3341                         tp->net_stats.rx_dropped++;
3342                         goto next_pkt;
3343                 }
3344
3345                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3346
3347                 if (len > RX_COPY_THRESHOLD
3348                         && tp->rx_offset == 2
3349                         /* rx_offset != 2 iff this is a 5701 card running
3350                          * in PCI-X mode [see tg3_get_invariants()] */
3351                 ) {
3352                         int skb_size;
3353
3354                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3355                                                     desc_idx, *post_ptr);
3356                         if (skb_size < 0)
3357                                 goto drop_it;
3358
3359                         pci_unmap_single(tp->pdev, dma_addr,
3360                                          skb_size - tp->rx_offset,
3361                                          PCI_DMA_FROMDEVICE);
3362
3363                         skb_put(skb, len);
3364                 } else {
3365                         struct sk_buff *copy_skb;
3366
3367                         tg3_recycle_rx(tp, opaque_key,
3368                                        desc_idx, *post_ptr);
3369
3370                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3371                         if (copy_skb == NULL)
3372                                 goto drop_it_no_recycle;
3373
3374                         skb_reserve(copy_skb, 2);
3375                         skb_put(copy_skb, len);
3376                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3377                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3378                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3379
3380                         /* We'll reuse the original ring buffer. */
3381                         skb = copy_skb;
3382                 }
3383
3384                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3385                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3386                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3387                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3388                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3389                 else
3390                         skb->ip_summed = CHECKSUM_NONE;
3391
3392                 skb->protocol = eth_type_trans(skb, tp->dev);
3393 #if TG3_VLAN_TAG_USED
3394                 if (tp->vlgrp != NULL &&
3395                     desc->type_flags & RXD_FLAG_VLAN) {
3396                         tg3_vlan_rx(tp, skb,
3397                                     desc->err_vlan & RXD_VLAN_MASK);
3398                 } else
3399 #endif
3400                         netif_receive_skb(skb);
3401
3402                 tp->dev->last_rx = jiffies;
3403                 received++;
3404                 budget--;
3405
3406 next_pkt:
3407                 (*post_ptr)++;
3408
3409                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3410                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3411
3412                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3413                                      TG3_64BIT_REG_LOW, idx);
3414                         work_mask &= ~RXD_OPAQUE_RING_STD;
3415                         rx_std_posted = 0;
3416                 }
3417 next_pkt_nopost:
3418                 sw_idx++;
3419                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3420
3421                 /* Refresh hw_idx to see if there is new work */
3422                 if (sw_idx == hw_idx) {
3423                         hw_idx = tp->hw_status->idx[0].rx_producer;
3424                         rmb();
3425                 }
3426         }
3427
3428         /* ACK the status ring. */
3429         tp->rx_rcb_ptr = sw_idx;
3430         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3431
3432         /* Refill RX ring(s). */
3433         if (work_mask & RXD_OPAQUE_RING_STD) {
3434                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3435                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3436                              sw_idx);
3437         }
3438         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3439                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3440                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3441                              sw_idx);
3442         }
3443         mmiowb();
3444
3445         return received;
3446 }
3447
3448 static int tg3_poll(struct net_device *netdev, int *budget)
3449 {
3450         struct tg3 *tp = netdev_priv(netdev);
3451         struct tg3_hw_status *sblk = tp->hw_status;
3452         int done;
3453
3454         /* handle link change and other phy events */
3455         if (!(tp->tg3_flags &
3456               (TG3_FLAG_USE_LINKCHG_REG |
3457                TG3_FLAG_POLL_SERDES))) {
3458                 if (sblk->status & SD_STATUS_LINK_CHG) {
3459                         sblk->status = SD_STATUS_UPDATED |
3460                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3461                         spin_lock(&tp->lock);
3462                         tg3_setup_phy(tp, 0);
3463                         spin_unlock(&tp->lock);
3464                 }
3465         }
3466
3467         /* run TX completion thread */
3468         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3469                 tg3_tx(tp);
3470                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3471                         netif_rx_complete(netdev);
3472                         schedule_work(&tp->reset_task);
3473                         return 0;
3474                 }
3475         }
3476
3477         /* run RX thread, within the bounds set by NAPI.
3478          * All RX "locking" is done by ensuring outside
3479          * code synchronizes with dev->poll()
3480          */
3481         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3482                 int orig_budget = *budget;
3483                 int work_done;
3484
3485                 if (orig_budget > netdev->quota)
3486                         orig_budget = netdev->quota;
3487
3488                 work_done = tg3_rx(tp, orig_budget);
3489
3490                 *budget -= work_done;
3491                 netdev->quota -= work_done;
3492         }
3493
3494         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3495                 tp->last_tag = sblk->status_tag;
3496                 rmb();
3497         } else
3498                 sblk->status &= ~SD_STATUS_UPDATED;
3499
3500         /* if no more work, tell net stack and NIC we're done */
3501         done = !tg3_has_work(tp);
3502         if (done) {
3503                 netif_rx_complete(netdev);
3504                 tg3_restart_ints(tp);
3505         }
3506
3507         return (done ? 0 : 1);
3508 }
3509
3510 static void tg3_irq_quiesce(struct tg3 *tp)
3511 {
3512         BUG_ON(tp->irq_sync);
3513
3514         tp->irq_sync = 1;
3515         smp_mb();
3516
3517         synchronize_irq(tp->pdev->irq);
3518 }
3519
3520 static inline int tg3_irq_sync(struct tg3 *tp)
3521 {
3522         return tp->irq_sync;
3523 }
3524
3525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3526  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3527  * with as well.  Most of the time, this is not necessary except when
3528  * shutting down the device.
3529  */
3530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3531 {
3532         spin_lock_bh(&tp->lock);
3533         if (irq_sync)
3534                 tg3_irq_quiesce(tp);
3535 }
3536
3537 static inline void tg3_full_unlock(struct tg3 *tp)
3538 {
3539         spin_unlock_bh(&tp->lock);
3540 }
3541
3542 /* One-shot MSI handler - Chip automatically disables interrupt
3543  * after sending MSI so driver doesn't have to do it.
3544  */
3545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3546 {
3547         struct net_device *dev = dev_id;
3548         struct tg3 *tp = netdev_priv(dev);
3549
3550         prefetch(tp->hw_status);
3551         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3552
3553         if (likely(!tg3_irq_sync(tp)))
3554                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3555
3556         return IRQ_HANDLED;
3557 }
3558
3559 /* MSI ISR - No need to check for interrupt sharing and no need to
3560  * flush status block and interrupt mailbox. PCI ordering rules
3561  * guarantee that MSI will arrive after the status block.
3562  */
3563 static irqreturn_t tg3_msi(int irq, void *dev_id)
3564 {
3565         struct net_device *dev = dev_id;
3566         struct tg3 *tp = netdev_priv(dev);
3567
3568         prefetch(tp->hw_status);
3569         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3570         /*
3571          * Writing any value to intr-mbox-0 clears PCI INTA# and
3572          * chip-internal interrupt pending events.
3573          * Writing non-zero to intr-mbox-0 additional tells the
3574          * NIC to stop sending us irqs, engaging "in-intr-handler"
3575          * event coalescing.
3576          */
3577         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3578         if (likely(!tg3_irq_sync(tp)))
3579                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3580
3581         return IRQ_RETVAL(1);
3582 }
3583
3584 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3585 {
3586         struct net_device *dev = dev_id;
3587         struct tg3 *tp = netdev_priv(dev);
3588         struct tg3_hw_status *sblk = tp->hw_status;
3589         unsigned int handled = 1;
3590
3591         /* In INTx mode, it is possible for the interrupt to arrive at
3592          * the CPU before the status block posted prior to the interrupt.
3593          * Reading the PCI State register will confirm whether the
3594          * interrupt is ours and will flush the status block.
3595          */
3596         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3597                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3598                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3599                         handled = 0;
3600                         goto out;
3601                 }
3602         }
3603
3604         /*
3605          * Writing any value to intr-mbox-0 clears PCI INTA# and
3606          * chip-internal interrupt pending events.
3607          * Writing non-zero to intr-mbox-0 additional tells the
3608          * NIC to stop sending us irqs, engaging "in-intr-handler"
3609          * event coalescing.
3610          *
3611          * Flush the mailbox to de-assert the IRQ immediately to prevent
3612          * spurious interrupts.  The flush impacts performance but
3613          * excessive spurious interrupts can be worse in some cases.
3614          */
3615         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3616         if (tg3_irq_sync(tp))
3617                 goto out;
3618         sblk->status &= ~SD_STATUS_UPDATED;
3619         if (likely(tg3_has_work(tp))) {
3620                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3621                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3622         } else {
3623                 /* No work, shared interrupt perhaps?  re-enable
3624                  * interrupts, and flush that PCI write
3625                  */
3626                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3627                                0x00000000);
3628         }
3629 out:
3630         return IRQ_RETVAL(handled);
3631 }
3632
3633 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3634 {
3635         struct net_device *dev = dev_id;
3636         struct tg3 *tp = netdev_priv(dev);
3637         struct tg3_hw_status *sblk = tp->hw_status;
3638         unsigned int handled = 1;
3639
3640         /* In INTx mode, it is possible for the interrupt to arrive at
3641          * the CPU before the status block posted prior to the interrupt.
3642          * Reading the PCI State register will confirm whether the
3643          * interrupt is ours and will flush the status block.
3644          */
3645         if (unlikely(sblk->status_tag == tp->last_tag)) {
3646                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3647                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3648                         handled = 0;
3649                         goto out;
3650                 }
3651         }
3652
3653         /*
3654          * writing any value to intr-mbox-0 clears PCI INTA# and
3655          * chip-internal interrupt pending events.
3656          * writing non-zero to intr-mbox-0 additional tells the
3657          * NIC to stop sending us irqs, engaging "in-intr-handler"
3658          * event coalescing.
3659          *
3660          * Flush the mailbox to de-assert the IRQ immediately to prevent
3661          * spurious interrupts.  The flush impacts performance but
3662          * excessive spurious interrupts can be worse in some cases.
3663          */
3664         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3665         if (tg3_irq_sync(tp))
3666                 goto out;
3667         if (netif_rx_schedule_prep(dev)) {
3668                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3669                 /* Update last_tag to mark that this status has been
3670                  * seen. Because interrupt may be shared, we may be
3671                  * racing with tg3_poll(), so only update last_tag
3672                  * if tg3_poll() is not scheduled.
3673                  */
3674                 tp->last_tag = sblk->status_tag;
3675                 __netif_rx_schedule(dev);
3676         }
3677 out:
3678         return IRQ_RETVAL(handled);
3679 }
3680
3681 /* ISR for interrupt test */
3682 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3683 {
3684         struct net_device *dev = dev_id;
3685         struct tg3 *tp = netdev_priv(dev);
3686         struct tg3_hw_status *sblk = tp->hw_status;
3687
3688         if ((sblk->status & SD_STATUS_UPDATED) ||
3689             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3690                 tg3_disable_ints(tp);
3691                 return IRQ_RETVAL(1);
3692         }
3693         return IRQ_RETVAL(0);
3694 }
3695
3696 static int tg3_init_hw(struct tg3 *, int);
3697 static int tg3_halt(struct tg3 *, int, int);
3698
3699 /* Restart hardware after configuration changes, self-test, etc.
3700  * Invoked with tp->lock held.
3701  */
3702 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3703 {
3704         int err;
3705
3706         err = tg3_init_hw(tp, reset_phy);
3707         if (err) {
3708                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3709                        "aborting.\n", tp->dev->name);
3710                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3711                 tg3_full_unlock(tp);
3712                 del_timer_sync(&tp->timer);
3713                 tp->irq_sync = 0;
3714                 netif_poll_enable(tp->dev);
3715                 dev_close(tp->dev);
3716                 tg3_full_lock(tp, 0);
3717         }
3718         return err;
3719 }
3720
3721 #ifdef CONFIG_NET_POLL_CONTROLLER
3722 static void tg3_poll_controller(struct net_device *dev)
3723 {
3724         struct tg3 *tp = netdev_priv(dev);
3725
3726         tg3_interrupt(tp->pdev->irq, dev);
3727 }
3728 #endif
3729
3730 static void tg3_reset_task(struct work_struct *work)
3731 {
3732         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3733         unsigned int restart_timer;
3734
3735         tg3_full_lock(tp, 0);
3736
3737         if (!netif_running(tp->dev)) {
3738                 tg3_full_unlock(tp);
3739                 return;
3740         }
3741
3742         tg3_full_unlock(tp);
3743
3744         tg3_netif_stop(tp);
3745
3746         tg3_full_lock(tp, 1);
3747
3748         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3749         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3750
3751         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3752                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3753                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3754                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3755                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3756         }
3757
3758         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3759         if (tg3_init_hw(tp, 1))
3760                 goto out;
3761
3762         tg3_netif_start(tp);
3763
3764         if (restart_timer)
3765                 mod_timer(&tp->timer, jiffies + 1);
3766
3767 out:
3768         tg3_full_unlock(tp);
3769 }
3770
3771 static void tg3_dump_short_state(struct tg3 *tp)
3772 {
3773         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3774                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3775         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3776                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3777 }
3778
3779 static void tg3_tx_timeout(struct net_device *dev)
3780 {
3781         struct tg3 *tp = netdev_priv(dev);
3782
3783         if (netif_msg_tx_err(tp)) {
3784                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3785                        dev->name);
3786                 tg3_dump_short_state(tp);
3787         }
3788
3789         schedule_work(&tp->reset_task);
3790 }
3791
3792 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3793 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3794 {
3795         u32 base = (u32) mapping & 0xffffffff;
3796
3797         return ((base > 0xffffdcc0) &&
3798                 (base + len + 8 < base));
3799 }
3800
3801 /* Test for DMA addresses > 40-bit */
3802 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3803                                           int len)
3804 {
3805 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3806         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3807                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3808         return 0;
3809 #else
3810         return 0;
3811 #endif
3812 }
3813
3814 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3815
3816 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3817 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3818                                        u32 last_plus_one, u32 *start,
3819                                        u32 base_flags, u32 mss)
3820 {
3821         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3822         dma_addr_t new_addr = 0;
3823         u32 entry = *start;
3824         int i, ret = 0;
3825
3826         if (!new_skb) {
3827                 ret = -1;
3828         } else {
3829                 /* New SKB is guaranteed to be linear. */
3830                 entry = *start;
3831                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3832                                           PCI_DMA_TODEVICE);
3833                 /* Make sure new skb does not cross any 4G boundaries.
3834                  * Drop the packet if it does.
3835                  */
3836                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3837                         ret = -1;
3838                         dev_kfree_skb(new_skb);
3839                         new_skb = NULL;
3840                 } else {
3841                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3842                                     base_flags, 1 | (mss << 1));
3843                         *start = NEXT_TX(entry);
3844                 }
3845         }
3846
3847         /* Now clean up the sw ring entries. */
3848         i = 0;
3849         while (entry != last_plus_one) {
3850                 int len;
3851
3852                 if (i == 0)
3853                         len = skb_headlen(skb);
3854                 else
3855                         len = skb_shinfo(skb)->frags[i-1].size;
3856                 pci_unmap_single(tp->pdev,
3857                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3858                                  len, PCI_DMA_TODEVICE);
3859                 if (i == 0) {
3860                         tp->tx_buffers[entry].skb = new_skb;
3861                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3862                 } else {
3863                         tp->tx_buffers[entry].skb = NULL;
3864                 }
3865                 entry = NEXT_TX(entry);
3866                 i++;
3867         }
3868
3869         dev_kfree_skb(skb);
3870
3871         return ret;
3872 }
3873
3874 static void tg3_set_txd(struct tg3 *tp, int entry,
3875                         dma_addr_t mapping, int len, u32 flags,
3876                         u32 mss_and_is_end)
3877 {
3878         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3879         int is_end = (mss_and_is_end & 0x1);
3880         u32 mss = (mss_and_is_end >> 1);
3881         u32 vlan_tag = 0;
3882
3883         if (is_end)
3884                 flags |= TXD_FLAG_END;
3885         if (flags & TXD_FLAG_VLAN) {
3886                 vlan_tag = flags >> 16;
3887                 flags &= 0xffff;
3888         }
3889         vlan_tag |= (mss << TXD_MSS_SHIFT);
3890
3891         txd->addr_hi = ((u64) mapping >> 32);
3892         txd->addr_lo = ((u64) mapping & 0xffffffff);
3893         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3894         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3895 }
3896
3897 /* hard_start_xmit for devices that don't have any bugs and
3898  * support TG3_FLG2_HW_TSO_2 only.
3899  */
3900 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3901 {
3902         struct tg3 *tp = netdev_priv(dev);
3903         dma_addr_t mapping;
3904         u32 len, entry, base_flags, mss;
3905
3906         len = skb_headlen(skb);
3907
3908         /* We are running in BH disabled context with netif_tx_lock
3909          * and TX reclaim runs via tp->poll inside of a software
3910          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3911          * no IRQ context deadlocks to worry about either.  Rejoice!
3912          */
3913         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3914                 if (!netif_queue_stopped(dev)) {
3915                         netif_stop_queue(dev);
3916
3917                         /* This is a hard error, log it. */
3918                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3919                                "queue awake!\n", dev->name);
3920                 }
3921                 return NETDEV_TX_BUSY;
3922         }
3923
3924         entry = tp->tx_prod;
3925         base_flags = 0;
3926         mss = 0;
3927         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3928                 int tcp_opt_len, ip_tcp_len;
3929
3930                 if (skb_header_cloned(skb) &&
3931                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3932                         dev_kfree_skb(skb);
3933                         goto out_unlock;
3934                 }
3935
3936                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3937                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3938                 else {
3939                         struct iphdr *iph = ip_hdr(skb);
3940
3941                         tcp_opt_len = tcp_optlen(skb);
3942                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3943
3944                         iph->check = 0;
3945                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3946                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3947                 }
3948
3949                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3950                                TXD_FLAG_CPU_POST_DMA);
3951
3952                 tcp_hdr(skb)->check = 0;
3953
3954         }
3955         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3956                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3957 #if TG3_VLAN_TAG_USED
3958         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3959                 base_flags |= (TXD_FLAG_VLAN |
3960                                (vlan_tx_tag_get(skb) << 16));
3961 #endif
3962
3963         /* Queue skb data, a.k.a. the main skb fragment. */
3964         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3965
3966         tp->tx_buffers[entry].skb = skb;
3967         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3968
3969         tg3_set_txd(tp, entry, mapping, len, base_flags,
3970                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3971
3972         entry = NEXT_TX(entry);
3973
3974         /* Now loop through additional data fragments, and queue them. */
3975         if (skb_shinfo(skb)->nr_frags > 0) {
3976                 unsigned int i, last;
3977
3978                 last = skb_shinfo(skb)->nr_frags - 1;
3979                 for (i = 0; i <= last; i++) {
3980                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3981
3982                         len = frag->size;
3983                         mapping = pci_map_page(tp->pdev,
3984                                                frag->page,
3985                                                frag->page_offset,
3986                                                len, PCI_DMA_TODEVICE);
3987
3988                         tp->tx_buffers[entry].skb = NULL;
3989                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3990
3991                         tg3_set_txd(tp, entry, mapping, len,
3992                                     base_flags, (i == last) | (mss << 1));
3993
3994                         entry = NEXT_TX(entry);
3995                 }
3996         }
3997
3998         /* Packets are ready, update Tx producer idx local and on card. */
3999         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4000
4001         tp->tx_prod = entry;
4002         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4003                 netif_stop_queue(dev);
4004                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4005                         netif_wake_queue(tp->dev);
4006         }
4007
4008 out_unlock:
4009         mmiowb();
4010
4011         dev->trans_start = jiffies;
4012
4013         return NETDEV_TX_OK;
4014 }
4015
4016 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4017
4018 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4019  * TSO header is greater than 80 bytes.
4020  */
4021 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4022 {
4023         struct sk_buff *segs, *nskb;
4024
4025         /* Estimate the number of fragments in the worst case */
4026         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4027                 netif_stop_queue(tp->dev);
4028                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4029                         return NETDEV_TX_BUSY;
4030
4031                 netif_wake_queue(tp->dev);
4032         }
4033
4034         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4035         if (unlikely(IS_ERR(segs)))
4036                 goto tg3_tso_bug_end;
4037
4038         do {
4039                 nskb = segs;
4040                 segs = segs->next;
4041                 nskb->next = NULL;
4042                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4043         } while (segs);
4044
4045 tg3_tso_bug_end:
4046         dev_kfree_skb(skb);
4047
4048         return NETDEV_TX_OK;
4049 }
4050
4051 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4052  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4053  */
4054 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4055 {
4056         struct tg3 *tp = netdev_priv(dev);
4057         dma_addr_t mapping;
4058         u32 len, entry, base_flags, mss;
4059         int would_hit_hwbug;
4060
4061         len = skb_headlen(skb);
4062
4063         /* We are running in BH disabled context with netif_tx_lock
4064          * and TX reclaim runs via tp->poll inside of a software
4065          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4066          * no IRQ context deadlocks to worry about either.  Rejoice!
4067          */
4068         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4069                 if (!netif_queue_stopped(dev)) {
4070                         netif_stop_queue(dev);
4071
4072                         /* This is a hard error, log it. */
4073                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4074                                "queue awake!\n", dev->name);
4075                 }
4076                 return NETDEV_TX_BUSY;
4077         }
4078
4079         entry = tp->tx_prod;
4080         base_flags = 0;
4081         if (skb->ip_summed == CHECKSUM_PARTIAL)
4082                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4083         mss = 0;
4084         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4085                 struct iphdr *iph;
4086                 int tcp_opt_len, ip_tcp_len, hdr_len;
4087
4088                 if (skb_header_cloned(skb) &&
4089                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4090                         dev_kfree_skb(skb);
4091                         goto out_unlock;
4092                 }
4093
4094                 tcp_opt_len = tcp_optlen(skb);
4095                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4096
4097                 hdr_len = ip_tcp_len + tcp_opt_len;
4098                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4099                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4100                         return (tg3_tso_bug(tp, skb));
4101
4102                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4103                                TXD_FLAG_CPU_POST_DMA);
4104
4105                 iph = ip_hdr(skb);
4106                 iph->check = 0;
4107                 iph->tot_len = htons(mss + hdr_len);
4108                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4109                         tcp_hdr(skb)->check = 0;
4110                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4111                 } else
4112                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4113                                                                  iph->daddr, 0,
4114                                                                  IPPROTO_TCP,
4115                                                                  0);
4116
4117                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4118                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4119                         if (tcp_opt_len || iph->ihl > 5) {
4120                                 int tsflags;
4121
4122                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4123                                 mss |= (tsflags << 11);
4124                         }
4125                 } else {
4126                         if (tcp_opt_len || iph->ihl > 5) {
4127                                 int tsflags;
4128
4129                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4130                                 base_flags |= tsflags << 12;
4131                         }
4132                 }
4133         }
4134 #if TG3_VLAN_TAG_USED
4135         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4136                 base_flags |= (TXD_FLAG_VLAN |
4137                                (vlan_tx_tag_get(skb) << 16));
4138 #endif
4139
4140         /* Queue skb data, a.k.a. the main skb fragment. */
4141         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4142
4143         tp->tx_buffers[entry].skb = skb;
4144         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4145
4146         would_hit_hwbug = 0;
4147
4148         if (tg3_4g_overflow_test(mapping, len))
4149                 would_hit_hwbug = 1;
4150
4151         tg3_set_txd(tp, entry, mapping, len, base_flags,
4152                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4153
4154         entry = NEXT_TX(entry);
4155
4156         /* Now loop through additional data fragments, and queue them. */
4157         if (skb_shinfo(skb)->nr_frags > 0) {
4158                 unsigned int i, last;
4159
4160                 last = skb_shinfo(skb)->nr_frags - 1;
4161                 for (i = 0; i <= last; i++) {
4162                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4163
4164                         len = frag->size;
4165                         mapping = pci_map_page(tp->pdev,
4166                                                frag->page,
4167                                                frag->page_offset,
4168                                                len, PCI_DMA_TODEVICE);
4169
4170                         tp->tx_buffers[entry].skb = NULL;
4171                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4172
4173                         if (tg3_4g_overflow_test(mapping, len))
4174                                 would_hit_hwbug = 1;
4175
4176                         if (tg3_40bit_overflow_test(tp, mapping, len))
4177                                 would_hit_hwbug = 1;
4178
4179                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4180                                 tg3_set_txd(tp, entry, mapping, len,
4181                                             base_flags, (i == last)|(mss << 1));
4182                         else
4183                                 tg3_set_txd(tp, entry, mapping, len,
4184                                             base_flags, (i == last));
4185
4186                         entry = NEXT_TX(entry);
4187                 }
4188         }
4189
4190         if (would_hit_hwbug) {
4191                 u32 last_plus_one = entry;
4192                 u32 start;
4193
4194                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4195                 start &= (TG3_TX_RING_SIZE - 1);
4196
4197                 /* If the workaround fails due to memory/mapping
4198                  * failure, silently drop this packet.
4199                  */
4200                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4201                                                 &start, base_flags, mss))
4202                         goto out_unlock;
4203
4204                 entry = start;
4205         }
4206
4207         /* Packets are ready, update Tx producer idx local and on card. */
4208         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4209
4210         tp->tx_prod = entry;
4211         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4212                 netif_stop_queue(dev);
4213                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4214                         netif_wake_queue(tp->dev);
4215         }
4216
4217 out_unlock:
4218         mmiowb();
4219
4220         dev->trans_start = jiffies;
4221
4222         return NETDEV_TX_OK;
4223 }
4224
4225 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4226                                int new_mtu)
4227 {
4228         dev->mtu = new_mtu;
4229
4230         if (new_mtu > ETH_DATA_LEN) {
4231                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4232                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4233                         ethtool_op_set_tso(dev, 0);
4234                 }
4235                 else
4236                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4237         } else {
4238                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4239                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4240                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4241         }
4242 }
4243
4244 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4245 {
4246         struct tg3 *tp = netdev_priv(dev);
4247         int err;
4248
4249         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4250                 return -EINVAL;
4251
4252         if (!netif_running(dev)) {
4253                 /* We'll just catch it later when the
4254                  * device is up'd.
4255                  */
4256                 tg3_set_mtu(dev, tp, new_mtu);
4257                 return 0;
4258         }
4259
4260         tg3_netif_stop(tp);
4261
4262         tg3_full_lock(tp, 1);
4263
4264         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4265
4266         tg3_set_mtu(dev, tp, new_mtu);
4267
4268         err = tg3_restart_hw(tp, 0);
4269
4270         if (!err)
4271                 tg3_netif_start(tp);
4272
4273         tg3_full_unlock(tp);
4274
4275         return err;
4276 }
4277
4278 /* Free up pending packets in all rx/tx rings.
4279  *
4280  * The chip has been shut down and the driver detached from
4281  * the networking, so no interrupts or new tx packets will
4282  * end up in the driver.  tp->{tx,}lock is not held and we are not
4283  * in an interrupt context and thus may sleep.
4284  */
4285 static void tg3_free_rings(struct tg3 *tp)
4286 {
4287         struct ring_info *rxp;
4288         int i;
4289
4290         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4291                 rxp = &tp->rx_std_buffers[i];
4292
4293                 if (rxp->skb == NULL)
4294                         continue;
4295                 pci_unmap_single(tp->pdev,
4296                                  pci_unmap_addr(rxp, mapping),
4297                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4298                                  PCI_DMA_FROMDEVICE);
4299                 dev_kfree_skb_any(rxp->skb);
4300                 rxp->skb = NULL;
4301         }
4302
4303         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4304                 rxp = &tp->rx_jumbo_buffers[i];
4305
4306                 if (rxp->skb == NULL)
4307                         continue;
4308                 pci_unmap_single(tp->pdev,
4309                                  pci_unmap_addr(rxp, mapping),
4310                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4311                                  PCI_DMA_FROMDEVICE);
4312                 dev_kfree_skb_any(rxp->skb);
4313                 rxp->skb = NULL;
4314         }
4315
4316         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4317                 struct tx_ring_info *txp;
4318                 struct sk_buff *skb;
4319                 int j;
4320
4321                 txp = &tp->tx_buffers[i];
4322                 skb = txp->skb;
4323
4324                 if (skb == NULL) {
4325                         i++;
4326                         continue;
4327                 }
4328
4329                 pci_unmap_single(tp->pdev,
4330                                  pci_unmap_addr(txp, mapping),
4331                                  skb_headlen(skb),
4332                                  PCI_DMA_TODEVICE);
4333                 txp->skb = NULL;
4334
4335                 i++;
4336
4337                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4338                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4339                         pci_unmap_page(tp->pdev,
4340                                        pci_unmap_addr(txp, mapping),
4341                                        skb_shinfo(skb)->frags[j].size,
4342                                        PCI_DMA_TODEVICE);
4343                         i++;
4344                 }
4345
4346                 dev_kfree_skb_any(skb);
4347         }
4348 }
4349
4350 /* Initialize tx/rx rings for packet processing.
4351  *
4352  * The chip has been shut down and the driver detached from
4353  * the networking, so no interrupts or new tx packets will
4354  * end up in the driver.  tp->{tx,}lock are held and thus
4355  * we may not sleep.
4356  */
4357 static int tg3_init_rings(struct tg3 *tp)
4358 {
4359         u32 i;
4360
4361         /* Free up all the SKBs. */
4362         tg3_free_rings(tp);
4363
4364         /* Zero out all descriptors. */
4365         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4366         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4367         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4368         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4369
4370         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4371         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4372             (tp->dev->mtu > ETH_DATA_LEN))
4373                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4374
4375         /* Initialize invariants of the rings, we only set this
4376          * stuff once.  This works because the card does not
4377          * write into the rx buffer posting rings.
4378          */
4379         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4380                 struct tg3_rx_buffer_desc *rxd;
4381
4382                 rxd = &tp->rx_std[i];
4383                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4384                         << RXD_LEN_SHIFT;
4385                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4386                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4387                                (i << RXD_OPAQUE_INDEX_SHIFT));
4388         }
4389
4390         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4391                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4392                         struct tg3_rx_buffer_desc *rxd;
4393
4394                         rxd = &tp->rx_jumbo[i];
4395                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4396                                 << RXD_LEN_SHIFT;
4397                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4398                                 RXD_FLAG_JUMBO;
4399                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4400                                (i << RXD_OPAQUE_INDEX_SHIFT));
4401                 }
4402         }
4403
4404         /* Now allocate fresh SKBs for each rx ring. */
4405         for (i = 0; i < tp->rx_pending; i++) {
4406                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4407                         printk(KERN_WARNING PFX
4408                                "%s: Using a smaller RX standard ring, "
4409                                "only %d out of %d buffers were allocated "
4410                                "successfully.\n",
4411                                tp->dev->name, i, tp->rx_pending);
4412                         if (i == 0)
4413                                 return -ENOMEM;
4414                         tp->rx_pending = i;
4415                         break;
4416                 }
4417         }
4418
4419         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4420                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4421                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4422                                              -1, i) < 0) {
4423                                 printk(KERN_WARNING PFX
4424                                        "%s: Using a smaller RX jumbo ring, "
4425                                        "only %d out of %d buffers were "
4426                                        "allocated successfully.\n",
4427                                        tp->dev->name, i, tp->rx_jumbo_pending);
4428                                 if (i == 0) {
4429                                         tg3_free_rings(tp);
4430                                         return -ENOMEM;
4431                                 }
4432                                 tp->rx_jumbo_pending = i;
4433                                 break;
4434                         }
4435                 }
4436         }
4437         return 0;
4438 }
4439
4440 /*
4441  * Must not be invoked with interrupt sources disabled and
4442  * the hardware shutdown down.
4443  */
4444 static void tg3_free_consistent(struct tg3 *tp)
4445 {
4446         kfree(tp->rx_std_buffers);
4447         tp->rx_std_buffers = NULL;
4448         if (tp->rx_std) {
4449                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4450                                     tp->rx_std, tp->rx_std_mapping);
4451                 tp->rx_std = NULL;
4452         }
4453         if (tp->rx_jumbo) {
4454                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4455                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4456                 tp->rx_jumbo = NULL;
4457         }
4458         if (tp->rx_rcb) {
4459                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4460                                     tp->rx_rcb, tp->rx_rcb_mapping);
4461                 tp->rx_rcb = NULL;
4462         }
4463         if (tp->tx_ring) {
4464                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4465                         tp->tx_ring, tp->tx_desc_mapping);
4466                 tp->tx_ring = NULL;
4467         }
4468         if (tp->hw_status) {
4469                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4470                                     tp->hw_status, tp->status_mapping);
4471                 tp->hw_status = NULL;
4472         }
4473         if (tp->hw_stats) {
4474                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4475                                     tp->hw_stats, tp->stats_mapping);
4476                 tp->hw_stats = NULL;
4477         }
4478 }
4479
4480 /*
4481  * Must not be invoked with interrupt sources disabled and
4482  * the hardware shutdown down.  Can sleep.
4483  */
4484 static int tg3_alloc_consistent(struct tg3 *tp)
4485 {
4486         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4487                                       (TG3_RX_RING_SIZE +
4488                                        TG3_RX_JUMBO_RING_SIZE)) +
4489                                      (sizeof(struct tx_ring_info) *
4490                                       TG3_TX_RING_SIZE),
4491                                      GFP_KERNEL);
4492         if (!tp->rx_std_buffers)
4493                 return -ENOMEM;
4494
4495         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4496         tp->tx_buffers = (struct tx_ring_info *)
4497                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4498
4499         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4500                                           &tp->rx_std_mapping);
4501         if (!tp->rx_std)
4502                 goto err_out;
4503
4504         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4505                                             &tp->rx_jumbo_mapping);
4506
4507         if (!tp->rx_jumbo)
4508                 goto err_out;
4509
4510         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4511                                           &tp->rx_rcb_mapping);
4512         if (!tp->rx_rcb)
4513                 goto err_out;
4514
4515         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4516                                            &tp->tx_desc_mapping);
4517         if (!tp->tx_ring)
4518                 goto err_out;
4519
4520         tp->hw_status = pci_alloc_consistent(tp->pdev,
4521                                              TG3_HW_STATUS_SIZE,
4522                                              &tp->status_mapping);
4523         if (!tp->hw_status)
4524                 goto err_out;
4525
4526         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4527                                             sizeof(struct tg3_hw_stats),
4528                                             &tp->stats_mapping);
4529         if (!tp->hw_stats)
4530                 goto err_out;
4531
4532         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4533         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4534
4535         return 0;
4536
4537 err_out:
4538         tg3_free_consistent(tp);
4539         return -ENOMEM;
4540 }
4541
4542 #define MAX_WAIT_CNT 1000
4543
4544 /* To stop a block, clear the enable bit and poll till it
4545  * clears.  tp->lock is held.
4546  */
4547 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4548 {
4549         unsigned int i;
4550         u32 val;
4551
4552         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4553                 switch (ofs) {
4554                 case RCVLSC_MODE:
4555                 case DMAC_MODE:
4556                 case MBFREE_MODE:
4557                 case BUFMGR_MODE:
4558                 case MEMARB_MODE:
4559                         /* We can't enable/disable these bits of the
4560                          * 5705/5750, just say success.
4561                          */
4562                         return 0;
4563
4564                 default:
4565                         break;
4566                 };
4567         }
4568
4569         val = tr32(ofs);
4570         val &= ~enable_bit;
4571         tw32_f(ofs, val);
4572
4573         for (i = 0; i < MAX_WAIT_CNT; i++) {
4574                 udelay(100);
4575                 val = tr32(ofs);
4576                 if ((val & enable_bit) == 0)
4577                         break;
4578         }
4579
4580         if (i == MAX_WAIT_CNT && !silent) {
4581                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4582                        "ofs=%lx enable_bit=%x\n",
4583                        ofs, enable_bit);
4584                 return -ENODEV;
4585         }
4586
4587         return 0;
4588 }
4589
4590 /* tp->lock is held. */
4591 static int tg3_abort_hw(struct tg3 *tp, int silent)
4592 {
4593         int i, err;
4594
4595         tg3_disable_ints(tp);
4596
4597         tp->rx_mode &= ~RX_MODE_ENABLE;
4598         tw32_f(MAC_RX_MODE, tp->rx_mode);
4599         udelay(10);
4600
4601         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4602         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4603         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4604         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4605         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4606         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4607
4608         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4609         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4610         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4611         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4612         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4613         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4614         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4615
4616         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4617         tw32_f(MAC_MODE, tp->mac_mode);
4618         udelay(40);
4619
4620         tp->tx_mode &= ~TX_MODE_ENABLE;
4621         tw32_f(MAC_TX_MODE, tp->tx_mode);
4622
4623         for (i = 0; i < MAX_WAIT_CNT; i++) {
4624                 udelay(100);
4625                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4626                         break;
4627         }
4628         if (i >= MAX_WAIT_CNT) {
4629                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4630                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4631                        tp->dev->name, tr32(MAC_TX_MODE));
4632                 err |= -ENODEV;
4633         }
4634
4635         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4636         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4637         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4638
4639         tw32(FTQ_RESET, 0xffffffff);
4640         tw32(FTQ_RESET, 0x00000000);
4641
4642         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4643         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4644
4645         if (tp->hw_status)
4646                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4647         if (tp->hw_stats)
4648                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4649
4650         return err;
4651 }
4652
4653 /* tp->lock is held. */
4654 static int tg3_nvram_lock(struct tg3 *tp)
4655 {
4656         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4657                 int i;
4658
4659                 if (tp->nvram_lock_cnt == 0) {
4660                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4661                         for (i = 0; i < 8000; i++) {
4662                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4663                                         break;
4664                                 udelay(20);
4665                         }
4666                         if (i == 8000) {
4667                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4668                                 return -ENODEV;
4669                         }
4670                 }
4671                 tp->nvram_lock_cnt++;
4672         }
4673         return 0;
4674 }
4675
4676 /* tp->lock is held. */
4677 static void tg3_nvram_unlock(struct tg3 *tp)
4678 {
4679         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4680                 if (tp->nvram_lock_cnt > 0)
4681                         tp->nvram_lock_cnt--;
4682                 if (tp->nvram_lock_cnt == 0)
4683                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4684         }
4685 }
4686
4687 /* tp->lock is held. */
4688 static void tg3_enable_nvram_access(struct tg3 *tp)
4689 {
4690         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4691             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4692                 u32 nvaccess = tr32(NVRAM_ACCESS);
4693
4694                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4695         }
4696 }
4697
4698 /* tp->lock is held. */
4699 static void tg3_disable_nvram_access(struct tg3 *tp)
4700 {
4701         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4702             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4703                 u32 nvaccess = tr32(NVRAM_ACCESS);
4704
4705                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4706         }
4707 }
4708
4709 /* tp->lock is held. */
4710 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4711 {
4712         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4713                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4714
4715         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4716                 switch (kind) {
4717                 case RESET_KIND_INIT:
4718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4719                                       DRV_STATE_START);
4720                         break;
4721
4722                 case RESET_KIND_SHUTDOWN:
4723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4724                                       DRV_STATE_UNLOAD);
4725                         break;
4726
4727                 case RESET_KIND_SUSPEND:
4728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4729                                       DRV_STATE_SUSPEND);
4730                         break;
4731
4732                 default:
4733                         break;
4734                 };
4735         }
4736 }
4737
4738 /* tp->lock is held. */
4739 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4740 {
4741         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4742                 switch (kind) {
4743                 case RESET_KIND_INIT:
4744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4745                                       DRV_STATE_START_DONE);
4746                         break;
4747
4748                 case RESET_KIND_SHUTDOWN:
4749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4750                                       DRV_STATE_UNLOAD_DONE);
4751                         break;
4752
4753                 default:
4754                         break;
4755                 };
4756         }
4757 }
4758
4759 /* tp->lock is held. */
4760 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4761 {
4762         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4763                 switch (kind) {
4764                 case RESET_KIND_INIT:
4765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4766                                       DRV_STATE_START);
4767                         break;
4768
4769                 case RESET_KIND_SHUTDOWN:
4770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4771                                       DRV_STATE_UNLOAD);
4772                         break;
4773
4774                 case RESET_KIND_SUSPEND:
4775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4776                                       DRV_STATE_SUSPEND);
4777                         break;
4778
4779                 default:
4780                         break;
4781                 };
4782         }
4783 }
4784
4785 static int tg3_poll_fw(struct tg3 *tp)
4786 {
4787         int i;
4788         u32 val;
4789
4790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4791                 /* Wait up to 20ms for init done. */
4792                 for (i = 0; i < 200; i++) {
4793                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4794                                 return 0;
4795                         udelay(100);
4796                 }
4797                 return -ENODEV;
4798         }
4799
4800         /* Wait for firmware initialization to complete. */
4801         for (i = 0; i < 100000; i++) {
4802                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4803                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4804                         break;
4805                 udelay(10);
4806         }
4807
4808         /* Chip might not be fitted with firmware.  Some Sun onboard
4809          * parts are configured like that.  So don't signal the timeout
4810          * of the above loop as an error, but do report the lack of
4811          * running firmware once.
4812          */
4813         if (i >= 100000 &&
4814             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4815                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4816
4817                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4818                        tp->dev->name);
4819         }
4820
4821         return 0;
4822 }
4823
4824 static void tg3_stop_fw(struct tg3 *);
4825
4826 /* tp->lock is held. */
4827 static int tg3_chip_reset(struct tg3 *tp)
4828 {
4829         u32 val;
4830         void (*write_op)(struct tg3 *, u32, u32);
4831         int err;
4832
4833         tg3_nvram_lock(tp);
4834
4835         /* No matching tg3_nvram_unlock() after this because
4836          * chip reset below will undo the nvram lock.
4837          */
4838         tp->nvram_lock_cnt = 0;
4839
4840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4841             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4843                 tw32(GRC_FASTBOOT_PC, 0);
4844
4845         /*
4846          * We must avoid the readl() that normally takes place.
4847          * It locks machines, causes machine checks, and other
4848          * fun things.  So, temporarily disable the 5701
4849          * hardware workaround, while we do the reset.
4850          */
4851         write_op = tp->write32;
4852         if (write_op == tg3_write_flush_reg32)
4853                 tp->write32 = tg3_write32;
4854
4855         /* Prevent the irq handler from reading or writing PCI registers
4856          * during chip reset when the memory enable bit in the PCI command
4857          * register may be cleared.  The chip does not generate interrupt
4858          * at this time, but the irq handler may still be called due to irq
4859          * sharing or irqpoll.
4860          */
4861         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4862         if (tp->hw_status) {
4863                 tp->hw_status->status = 0;
4864                 tp->hw_status->status_tag = 0;
4865         }
4866         tp->last_tag = 0;
4867         smp_mb();
4868         synchronize_irq(tp->pdev->irq);
4869
4870         /* do the reset */
4871         val = GRC_MISC_CFG_CORECLK_RESET;
4872
4873         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4874                 if (tr32(0x7e2c) == 0x60) {
4875                         tw32(0x7e2c, 0x20);
4876                 }
4877                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4878                         tw32(GRC_MISC_CFG, (1 << 29));
4879                         val |= (1 << 29);
4880                 }
4881         }
4882
4883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4884                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4885                 tw32(GRC_VCPU_EXT_CTRL,
4886                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4887         }
4888
4889         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4890                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4891         tw32(GRC_MISC_CFG, val);
4892
4893         /* restore 5701 hardware bug workaround write method */
4894         tp->write32 = write_op;
4895
4896         /* Unfortunately, we have to delay before the PCI read back.
4897          * Some 575X chips even will not respond to a PCI cfg access
4898          * when the reset command is given to the chip.
4899          *
4900          * How do these hardware designers expect things to work
4901          * properly if the PCI write is posted for a long period
4902          * of time?  It is always necessary to have some method by
4903          * which a register read back can occur to push the write
4904          * out which does the reset.
4905          *
4906          * For most tg3 variants the trick below was working.
4907          * Ho hum...
4908          */
4909         udelay(120);
4910
4911         /* Flush PCI posted writes.  The normal MMIO registers
4912          * are inaccessible at this time so this is the only
4913          * way to make this reliably (actually, this is no longer
4914          * the case, see above).  I tried to use indirect
4915          * register read/write but this upset some 5701 variants.
4916          */
4917         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4918
4919         udelay(120);
4920
4921         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4922                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4923                         int i;
4924                         u32 cfg_val;
4925
4926                         /* Wait for link training to complete.  */
4927                         for (i = 0; i < 5000; i++)
4928                                 udelay(100);
4929
4930                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4931                         pci_write_config_dword(tp->pdev, 0xc4,
4932                                                cfg_val | (1 << 15));
4933                 }
4934                 /* Set PCIE max payload size and clear error status.  */
4935                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4936         }
4937
4938         /* Re-enable indirect register accesses. */
4939         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4940                                tp->misc_host_ctrl);
4941
4942         /* Set MAX PCI retry to zero. */
4943         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4944         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4945             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4946                 val |= PCISTATE_RETRY_SAME_DMA;
4947         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4948
4949         pci_restore_state(tp->pdev);
4950
4951         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
4952
4953         /* Make sure PCI-X relaxed ordering bit is clear. */
4954         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4955         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4956         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4957
4958         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4959                 u32 val;
4960
4961                 /* Chip reset on 5780 will reset MSI enable bit,
4962                  * so need to restore it.
4963                  */
4964                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4965                         u16 ctrl;
4966
4967                         pci_read_config_word(tp->pdev,
4968                                              tp->msi_cap + PCI_MSI_FLAGS,
4969                                              &ctrl);
4970                         pci_write_config_word(tp->pdev,
4971                                               tp->msi_cap + PCI_MSI_FLAGS,
4972                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4973                         val = tr32(MSGINT_MODE);
4974                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4975                 }
4976
4977                 val = tr32(MEMARB_MODE);
4978                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4979
4980         } else
4981                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4982
4983         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4984                 tg3_stop_fw(tp);
4985                 tw32(0x5000, 0x400);
4986         }
4987
4988         tw32(GRC_MODE, tp->grc_mode);
4989
4990         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4991                 u32 val = tr32(0xc4);
4992
4993                 tw32(0xc4, val | (1 << 15));
4994         }
4995
4996         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4998                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4999                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5000                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5001                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5002         }
5003
5004         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5005                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5006                 tw32_f(MAC_MODE, tp->mac_mode);
5007         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5008                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5009                 tw32_f(MAC_MODE, tp->mac_mode);
5010         } else
5011                 tw32_f(MAC_MODE, 0);
5012         udelay(40);
5013
5014         err = tg3_poll_fw(tp);
5015         if (err)
5016                 return err;
5017
5018         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5019             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5020                 u32 val = tr32(0x7c00);
5021
5022                 tw32(0x7c00, val | (1 << 25));
5023         }
5024
5025         /* Reprobe ASF enable state.  */
5026         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5027         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5028         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5029         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5030                 u32 nic_cfg;
5031
5032                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5033                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5034                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5035                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5036                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5037                 }
5038         }
5039
5040         return 0;
5041 }
5042
5043 /* tp->lock is held. */
5044 static void tg3_stop_fw(struct tg3 *tp)
5045 {
5046         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5047                 u32 val;
5048                 int i;
5049
5050                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5051                 val = tr32(GRC_RX_CPU_EVENT);
5052                 val |= (1 << 14);
5053                 tw32(GRC_RX_CPU_EVENT, val);
5054
5055                 /* Wait for RX cpu to ACK the event.  */
5056                 for (i = 0; i < 100; i++) {
5057                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5058                                 break;
5059                         udelay(1);
5060                 }
5061         }
5062 }
5063
5064 /* tp->lock is held. */
5065 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5066 {
5067         int err;
5068
5069         tg3_stop_fw(tp);
5070
5071         tg3_write_sig_pre_reset(tp, kind);
5072
5073         tg3_abort_hw(tp, silent);
5074         err = tg3_chip_reset(tp);
5075
5076         tg3_write_sig_legacy(tp, kind);
5077         tg3_write_sig_post_reset(tp, kind);
5078
5079         if (err)
5080                 return err;
5081
5082         return 0;
5083 }
5084
5085 #define TG3_FW_RELEASE_MAJOR    0x0
5086 #define TG3_FW_RELASE_MINOR     0x0
5087 #define TG3_FW_RELEASE_FIX      0x0
5088 #define TG3_FW_START_ADDR       0x08000000
5089 #define TG3_FW_TEXT_ADDR        0x08000000
5090 #define TG3_FW_TEXT_LEN         0x9c0
5091 #define TG3_FW_RODATA_ADDR      0x080009c0
5092 #define TG3_FW_RODATA_LEN       0x60
5093 #define TG3_FW_DATA_ADDR        0x08000a40
5094 #define TG3_FW_DATA_LEN         0x20
5095 #define TG3_FW_SBSS_ADDR        0x08000a60
5096 #define TG3_FW_SBSS_LEN         0xc
5097 #define TG3_FW_BSS_ADDR         0x08000a70
5098 #define TG3_FW_BSS_LEN          0x10
5099
5100 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5101         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5102         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5103         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5104         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5105         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5106         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5107         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5108         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5109         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5110         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5111         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5112         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5113         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5114         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5115         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5116         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5117         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5118         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5119         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5120         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5121         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5122         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5123         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5124         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5125         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5126         0, 0, 0, 0, 0, 0,
5127         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5128         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5129         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5130         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5131         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5132         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5133         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5134         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5135         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5136         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5137         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5138         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5139         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5140         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5141         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5142         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5143         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5144         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5145         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5146         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5147         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5148         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5149         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5150         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5151         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5152         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5153         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5154         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5155         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5156         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5157         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5158         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5159         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5160         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5161         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5162         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5163         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5164         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5165         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5166         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5167         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5168         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5169         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5170         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5171         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5172         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5173         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5174         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5175         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5176         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5177         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5178         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5179         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5180         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5181         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5182         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5183         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5184         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5185         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5186         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5187         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5188         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5189         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5190         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5191         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5192 };
5193
5194 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5195         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5196         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5197         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5198         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5199         0x00000000
5200 };
5201
5202 #if 0 /* All zeros, don't eat up space with it. */
5203 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5204         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5205         0x00000000, 0x00000000, 0x00000000, 0x00000000
5206 };
5207 #endif
5208
5209 #define RX_CPU_SCRATCH_BASE     0x30000
5210 #define RX_CPU_SCRATCH_SIZE     0x04000
5211 #define TX_CPU_SCRATCH_BASE     0x34000
5212 #define TX_CPU_SCRATCH_SIZE     0x04000
5213
5214 /* tp->lock is held. */
5215 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5216 {
5217         int i;
5218
5219         BUG_ON(offset == TX_CPU_BASE &&
5220             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5221
5222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5223                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5224
5225                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5226                 return 0;
5227         }
5228         if (offset == RX_CPU_BASE) {
5229                 for (i = 0; i < 10000; i++) {
5230                         tw32(offset + CPU_STATE, 0xffffffff);
5231                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5232                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5233                                 break;
5234                 }
5235
5236                 tw32(offset + CPU_STATE, 0xffffffff);
5237                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5238                 udelay(10);
5239         } else {
5240                 for (i = 0; i < 10000; i++) {
5241                         tw32(offset + CPU_STATE, 0xffffffff);
5242                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5243                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5244                                 break;
5245                 }
5246         }
5247
5248         if (i >= 10000) {
5249                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5250                        "and %s CPU\n",
5251                        tp->dev->name,
5252                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5253                 return -ENODEV;
5254         }
5255
5256         /* Clear firmware's nvram arbitration. */
5257         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5258                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5259         return 0;
5260 }
5261
5262 struct fw_info {
5263         unsigned int text_base;
5264         unsigned int text_len;
5265         const u32 *text_data;
5266         unsigned int rodata_base;
5267         unsigned int rodata_len;
5268         const u32 *rodata_data;
5269         unsigned int data_base;
5270         unsigned int data_len;
5271         const u32 *data_data;
5272 };
5273
5274 /* tp->lock is held. */
5275 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5276                                  int cpu_scratch_size, struct fw_info *info)
5277 {
5278         int err, lock_err, i;
5279         void (*write_op)(struct tg3 *, u32, u32);
5280
5281         if (cpu_base == TX_CPU_BASE &&
5282             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5283                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5284                        "TX cpu firmware on %s which is 5705.\n",
5285                        tp->dev->name);
5286                 return -EINVAL;
5287         }
5288
5289         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5290                 write_op = tg3_write_mem;
5291         else
5292                 write_op = tg3_write_indirect_reg32;
5293
5294         /* It is possible that bootcode is still loading at this point.
5295          * Get the nvram lock first before halting the cpu.
5296          */
5297         lock_err = tg3_nvram_lock(tp);
5298         err = tg3_halt_cpu(tp, cpu_base);
5299         if (!lock_err)
5300                 tg3_nvram_unlock(tp);
5301         if (err)
5302                 goto out;
5303
5304         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5305                 write_op(tp, cpu_scratch_base + i, 0);
5306         tw32(cpu_base + CPU_STATE, 0xffffffff);
5307         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5308         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5309                 write_op(tp, (cpu_scratch_base +
5310                               (info->text_base & 0xffff) +
5311                               (i * sizeof(u32))),
5312                          (info->text_data ?
5313                           info->text_data[i] : 0));
5314         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5315                 write_op(tp, (cpu_scratch_base +
5316                               (info->rodata_base & 0xffff) +
5317                               (i * sizeof(u32))),
5318                          (info->rodata_data ?
5319                           info->rodata_data[i] : 0));
5320         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5321                 write_op(tp, (cpu_scratch_base +
5322                               (info->data_base & 0xffff) +
5323                               (i * sizeof(u32))),
5324                          (info->data_data ?
5325                           info->data_data[i] : 0));
5326
5327         err = 0;
5328
5329 out:
5330         return err;
5331 }
5332
5333 /* tp->lock is held. */
5334 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5335 {
5336         struct fw_info info;
5337         int err, i;
5338
5339         info.text_base = TG3_FW_TEXT_ADDR;
5340         info.text_len = TG3_FW_TEXT_LEN;
5341         info.text_data = &tg3FwText[0];
5342         info.rodata_base = TG3_FW_RODATA_ADDR;
5343         info.rodata_len = TG3_FW_RODATA_LEN;
5344         info.rodata_data = &tg3FwRodata[0];
5345         info.data_base = TG3_FW_DATA_ADDR;
5346         info.data_len = TG3_FW_DATA_LEN;
5347         info.data_data = NULL;
5348
5349         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5350                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5351                                     &info);
5352         if (err)
5353                 return err;
5354
5355         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5356                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5357                                     &info);
5358         if (err)
5359                 return err;
5360
5361         /* Now startup only the RX cpu. */
5362         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5363         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5364
5365         for (i = 0; i < 5; i++) {
5366                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5367                         break;
5368                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5369                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5370                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5371                 udelay(1000);
5372         }
5373         if (i >= 5) {
5374                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5375                        "to set RX CPU PC, is %08x should be %08x\n",
5376                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5377                        TG3_FW_TEXT_ADDR);
5378                 return -ENODEV;
5379         }
5380         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5381         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5382
5383         return 0;
5384 }
5385
5386
5387 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5388 #define TG3_TSO_FW_RELASE_MINOR         0x6
5389 #define TG3_TSO_FW_RELEASE_FIX          0x0
5390 #define TG3_TSO_FW_START_ADDR           0x08000000
5391 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5392 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5393 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5394 #define TG3_TSO_FW_RODATA_LEN           0x60
5395 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5396 #define TG3_TSO_FW_DATA_LEN             0x30
5397 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5398 #define TG3_TSO_FW_SBSS_LEN             0x2c
5399 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5400 #define TG3_TSO_FW_BSS_LEN              0x894
5401
5402 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5403         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5404         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5405         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5406         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5407         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5408         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5409         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5410         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5411         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5412         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5413         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5414         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5415         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5416         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5417         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5418         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5419         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5420         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5421         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5422         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5423         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5424         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5425         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5426         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5427         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5428         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5429         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5430         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5431         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5432         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5433         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5434         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5435         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5436         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5437         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5438         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5439         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5440         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5441         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5442         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5443         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5444         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5445         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5446         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5447         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5448         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5449         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5450         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5451         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5452         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5453         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5454         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5455         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5456         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5457         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5458         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5459         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5460         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5461         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5462         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5463         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5464         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5465         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5466         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5467         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5468         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5469         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5470         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5471         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5472         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5473         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5474         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5475         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5476         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5477         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5478         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5479         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5480         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5481         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5482         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5483         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5484         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5485         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5486         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5487         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5488         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5489         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5490         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5491         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5492         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5493         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5494         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5495         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5496         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5497         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5498         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5499         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5500         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5501         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5502         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5503         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5504         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5505         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5506         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5507         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5508         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5509         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5510         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5511         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5512         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5513         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5514         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5515         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5516         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5517         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5518         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5519         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5520         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5521         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5522         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5523         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5524         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5525         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5526         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5527         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5528         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5529         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5530         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5531         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5532         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5533         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5534         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5535         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5536         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5537         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5538         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5539         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5540         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5541         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5542         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5543         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5544         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5545         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5546         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5547         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5548         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5549         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5550         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5551         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5552         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5553         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5554         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5555         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5556         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5557         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5558         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5559         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5560         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5561         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5562         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5563         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5564         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5565         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5566         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5567         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5568         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5569         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5570         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5571         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5572         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5573         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5574         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5575         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5576         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5577         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5578         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5579         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5580         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5581         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5582         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5583         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5584         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5585         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5586         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5587         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5588         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5589         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5590         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5591         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5592         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5593         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5594         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5595         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5596         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5597         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5598         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5599         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5600         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5601         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5602         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5603         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5604         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5605         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5606         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5607         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5608         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5609         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5610         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5611         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5612         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5613         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5614         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5615         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5616         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5617         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5618         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5619         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5620         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5621         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5622         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5623         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5624         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5625         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5626         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5627         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5628         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5629         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5630         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5631         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5632         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5633         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5634         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5635         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5636         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5637         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5638         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5639         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5640         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5641         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5642         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5643         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5644         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5645         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5646         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5647         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5648         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5649         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5650         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5651         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5652         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5653         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5654         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5655         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5656         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5657         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5658         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5659         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5660         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5661         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5662         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5663         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5664         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5665         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5666         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5667         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5668         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5669         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5670         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5671         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5672         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5673         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5674         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5675         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5676         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5677         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5678         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5679         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5680         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5681         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5682         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5683         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5684         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5685         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5686         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5687 };
5688
5689 static const u32 tg3TsoFwRodata[] = {
5690         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5691         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5692         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5693         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5694         0x00000000,
5695 };
5696
5697 static const u32 tg3TsoFwData[] = {
5698         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5699         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5700         0x00000000,
5701 };
5702
5703 /* 5705 needs a special version of the TSO firmware.  */
5704 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5705 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5706 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5707 #define TG3_TSO5_FW_START_ADDR          0x00010000
5708 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5709 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5710 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5711 #define TG3_TSO5_FW_RODATA_LEN          0x50
5712 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5713 #define TG3_TSO5_FW_DATA_LEN            0x20
5714 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5715 #define TG3_TSO5_FW_SBSS_LEN            0x28
5716 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5717 #define TG3_TSO5_FW_BSS_LEN             0x88
5718
5719 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5720         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5721         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5722         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5723         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5724         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5725         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5726         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5727         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5728         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5729         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5730         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5731         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5732         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5733         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5734         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5735         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5736         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5737         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5738         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5739         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5740         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5741         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5742         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5743         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5744         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5745         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5746         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5747         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5748         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5749         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5750         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5751         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5752         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5753         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5754         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5755         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5756         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5757         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5758         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5759         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5760         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5761         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5762         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5763         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5764         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5765         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5766         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5767         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5768         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5769         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5770         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5771         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5772         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5773         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5774         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5775         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5776         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5777         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5778         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5779         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5780         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5781         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5782         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5783         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5784         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5785         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5786         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5787         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5788         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5789         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5790         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5791         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5792         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5793         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5794         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5795         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5796         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5797         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5798         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5799         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5800         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5801         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5802         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5803         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5804         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5805         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5806         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5807         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5808         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5809         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5810         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5811         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5812         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5813         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5814         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5815         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5816         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5817         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5818         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5819         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5820         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5821         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5822         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5823         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5824         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5825         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5826         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5827         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5828         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5829         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5830         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5831         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5832         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5833         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5834         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5835         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5836         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5837         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5838         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5839         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5840         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5841         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5842         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5843         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5844         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5845         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5846         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5847         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5848         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5849         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5850         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5851         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5852         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5853         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5854         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5855         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5856         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5857         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5858         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5859         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5860         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5861         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5862         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5863         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5864         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5865         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5866         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5867         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5868         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5869         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5870         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5871         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5872         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5873         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5874         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5875         0x00000000, 0x00000000, 0x00000000,
5876 };
5877
5878 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5879         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5880         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5881         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5882         0x00000000, 0x00000000, 0x00000000,
5883 };
5884
5885 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5886         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5887         0x00000000, 0x00000000, 0x00000000,
5888 };
5889
5890 /* tp->lock is held. */
5891 static int tg3_load_tso_firmware(struct tg3 *tp)
5892 {
5893         struct fw_info info;
5894         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5895         int err, i;
5896
5897         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5898                 return 0;
5899
5900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5901                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5902                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5903                 info.text_data = &tg3Tso5FwText[0];
5904                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5905                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5906                 info.rodata_data = &tg3Tso5FwRodata[0];
5907                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5908                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5909                 info.data_data = &tg3Tso5FwData[0];
5910                 cpu_base = RX_CPU_BASE;
5911                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5912                 cpu_scratch_size = (info.text_len +
5913                                     info.rodata_len +
5914                                     info.data_len +
5915                                     TG3_TSO5_FW_SBSS_LEN +
5916                                     TG3_TSO5_FW_BSS_LEN);
5917         } else {
5918                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5919                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5920                 info.text_data = &tg3TsoFwText[0];
5921                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5922                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5923                 info.rodata_data = &tg3TsoFwRodata[0];
5924                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5925                 info.data_len = TG3_TSO_FW_DATA_LEN;
5926                 info.data_data = &tg3TsoFwData[0];
5927                 cpu_base = TX_CPU_BASE;
5928                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5929                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5930         }
5931
5932         err = tg3_load_firmware_cpu(tp, cpu_base,
5933                                     cpu_scratch_base, cpu_scratch_size,
5934                                     &info);
5935         if (err)
5936                 return err;
5937
5938         /* Now startup the cpu. */
5939         tw32(cpu_base + CPU_STATE, 0xffffffff);
5940         tw32_f(cpu_base + CPU_PC,    info.text_base);
5941
5942         for (i = 0; i < 5; i++) {
5943                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5944                         break;
5945                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5946                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5947                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5948                 udelay(1000);
5949         }
5950         if (i >= 5) {
5951                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5952                        "to set CPU PC, is %08x should be %08x\n",
5953                        tp->dev->name, tr32(cpu_base + CPU_PC),
5954                        info.text_base);
5955                 return -ENODEV;
5956         }
5957         tw32(cpu_base + CPU_STATE, 0xffffffff);
5958         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5959         return 0;
5960 }
5961
5962
5963 /* tp->lock is held. */
5964 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
5965 {
5966         u32 addr_high, addr_low;
5967         int i;
5968
5969         addr_high = ((tp->dev->dev_addr[0] << 8) |
5970                      tp->dev->dev_addr[1]);
5971         addr_low = ((tp->dev->dev_addr[2] << 24) |
5972                     (tp->dev->dev_addr[3] << 16) |
5973                     (tp->dev->dev_addr[4] <<  8) |
5974                     (tp->dev->dev_addr[5] <<  0));
5975         for (i = 0; i < 4; i++) {
5976                 if (i == 1 && skip_mac_1)
5977                         continue;
5978                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5979                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5980         }
5981
5982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5983             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5984                 for (i = 0; i < 12; i++) {
5985                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5986                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5987                 }
5988         }
5989
5990         addr_high = (tp->dev->dev_addr[0] +
5991                      tp->dev->dev_addr[1] +
5992                      tp->dev->dev_addr[2] +
5993                      tp->dev->dev_addr[3] +
5994                      tp->dev->dev_addr[4] +
5995                      tp->dev->dev_addr[5]) &
5996                 TX_BACKOFF_SEED_MASK;
5997         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5998 }
5999
6000 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6001 {
6002         struct tg3 *tp = netdev_priv(dev);
6003         struct sockaddr *addr = p;
6004         int err = 0, skip_mac_1 = 0;
6005
6006         if (!is_valid_ether_addr(addr->sa_data))
6007                 return -EINVAL;
6008
6009         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6010
6011         if (!netif_running(dev))
6012                 return 0;
6013
6014         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6015                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6016
6017                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6018                 addr0_low = tr32(MAC_ADDR_0_LOW);
6019                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6020                 addr1_low = tr32(MAC_ADDR_1_LOW);
6021
6022                 /* Skip MAC addr 1 if ASF is using it. */
6023                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6024                     !(addr1_high == 0 && addr1_low == 0))
6025                         skip_mac_1 = 1;
6026         }
6027         spin_lock_bh(&tp->lock);
6028         __tg3_set_mac_addr(tp, skip_mac_1);
6029         spin_unlock_bh(&tp->lock);
6030
6031         return err;
6032 }
6033
6034 /* tp->lock is held. */
6035 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6036                            dma_addr_t mapping, u32 maxlen_flags,
6037                            u32 nic_addr)
6038 {
6039         tg3_write_mem(tp,
6040                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6041                       ((u64) mapping >> 32));
6042         tg3_write_mem(tp,
6043                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6044                       ((u64) mapping & 0xffffffff));
6045         tg3_write_mem(tp,
6046                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6047                        maxlen_flags);
6048
6049         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6050                 tg3_write_mem(tp,
6051                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6052                               nic_addr);
6053 }
6054
6055 static void __tg3_set_rx_mode(struct net_device *);
6056 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6057 {
6058         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6059         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6060         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6061         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6062         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6063                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6064                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6065         }
6066         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6067         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6068         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6069                 u32 val = ec->stats_block_coalesce_usecs;
6070
6071                 if (!netif_carrier_ok(tp->dev))
6072                         val = 0;
6073
6074                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6075         }
6076 }
6077
6078 /* tp->lock is held. */
6079 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6080 {
6081         u32 val, rdmac_mode;
6082         int i, err, limit;
6083
6084         tg3_disable_ints(tp);
6085
6086         tg3_stop_fw(tp);
6087
6088         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6089
6090         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6091                 tg3_abort_hw(tp, 1);
6092         }
6093
6094         if (reset_phy)
6095                 tg3_phy_reset(tp);
6096
6097         err = tg3_chip_reset(tp);
6098         if (err)
6099                 return err;
6100
6101         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6102
6103         /* This works around an issue with Athlon chipsets on
6104          * B3 tigon3 silicon.  This bit has no effect on any
6105          * other revision.  But do not set this on PCI Express
6106          * chips.
6107          */
6108         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6109                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6110         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6111
6112         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6113             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6114                 val = tr32(TG3PCI_PCISTATE);
6115                 val |= PCISTATE_RETRY_SAME_DMA;
6116                 tw32(TG3PCI_PCISTATE, val);
6117         }
6118
6119         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6120                 /* Enable some hw fixes.  */
6121                 val = tr32(TG3PCI_MSI_DATA);
6122                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6123                 tw32(TG3PCI_MSI_DATA, val);
6124         }
6125
6126         /* Descriptor ring init may make accesses to the
6127          * NIC SRAM area to setup the TX descriptors, so we
6128          * can only do this after the hardware has been
6129          * successfully reset.
6130          */
6131         err = tg3_init_rings(tp);
6132         if (err)
6133                 return err;
6134
6135         /* This value is determined during the probe time DMA
6136          * engine test, tg3_test_dma.
6137          */
6138         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6139
6140         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6141                           GRC_MODE_4X_NIC_SEND_RINGS |
6142                           GRC_MODE_NO_TX_PHDR_CSUM |
6143                           GRC_MODE_NO_RX_PHDR_CSUM);
6144         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6145
6146         /* Pseudo-header checksum is done by hardware logic and not
6147          * the offload processers, so make the chip do the pseudo-
6148          * header checksums on receive.  For transmit it is more
6149          * convenient to do the pseudo-header checksum in software
6150          * as Linux does that on transmit for us in all cases.
6151          */
6152         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6153
6154         tw32(GRC_MODE,
6155              tp->grc_mode |
6156              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6157
6158         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6159         val = tr32(GRC_MISC_CFG);
6160         val &= ~0xff;
6161         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6162         tw32(GRC_MISC_CFG, val);
6163
6164         /* Initialize MBUF/DESC pool. */
6165         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6166                 /* Do nothing.  */
6167         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6168                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6170                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6171                 else
6172                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6173                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6174                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6175         }
6176         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6177                 int fw_len;
6178
6179                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6180                           TG3_TSO5_FW_RODATA_LEN +
6181                           TG3_TSO5_FW_DATA_LEN +
6182                           TG3_TSO5_FW_SBSS_LEN +
6183                           TG3_TSO5_FW_BSS_LEN);
6184                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6185                 tw32(BUFMGR_MB_POOL_ADDR,
6186                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6187                 tw32(BUFMGR_MB_POOL_SIZE,
6188                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6189         }
6190
6191         if (tp->dev->mtu <= ETH_DATA_LEN) {
6192                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6193                      tp->bufmgr_config.mbuf_read_dma_low_water);
6194                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6195                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6196                 tw32(BUFMGR_MB_HIGH_WATER,
6197                      tp->bufmgr_config.mbuf_high_water);
6198         } else {
6199                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6200                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6201                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6202                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6203                 tw32(BUFMGR_MB_HIGH_WATER,
6204                      tp->bufmgr_config.mbuf_high_water_jumbo);
6205         }
6206         tw32(BUFMGR_DMA_LOW_WATER,
6207              tp->bufmgr_config.dma_low_water);
6208         tw32(BUFMGR_DMA_HIGH_WATER,
6209              tp->bufmgr_config.dma_high_water);
6210
6211         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6212         for (i = 0; i < 2000; i++) {
6213                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6214                         break;
6215                 udelay(10);
6216         }
6217         if (i >= 2000) {
6218                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6219                        tp->dev->name);
6220                 return -ENODEV;
6221         }
6222
6223         /* Setup replenish threshold. */
6224         val = tp->rx_pending / 8;
6225         if (val == 0)
6226                 val = 1;
6227         else if (val > tp->rx_std_max_post)
6228                 val = tp->rx_std_max_post;
6229         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6230                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6231                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6232
6233                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6234                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6235         }
6236
6237         tw32(RCVBDI_STD_THRESH, val);
6238
6239         /* Initialize TG3_BDINFO's at:
6240          *  RCVDBDI_STD_BD:     standard eth size rx ring
6241          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6242          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6243          *
6244          * like so:
6245          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6246          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6247          *                              ring attribute flags
6248          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6249          *
6250          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6251          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6252          *
6253          * The size of each ring is fixed in the firmware, but the location is
6254          * configurable.
6255          */
6256         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6257              ((u64) tp->rx_std_mapping >> 32));
6258         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6259              ((u64) tp->rx_std_mapping & 0xffffffff));
6260         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6261              NIC_SRAM_RX_BUFFER_DESC);
6262
6263         /* Don't even try to program the JUMBO/MINI buffer descriptor
6264          * configs on 5705.
6265          */
6266         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6267                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6268                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6269         } else {
6270                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6271                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6272
6273                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6274                      BDINFO_FLAGS_DISABLED);
6275
6276                 /* Setup replenish threshold. */
6277                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6278
6279                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6280                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6281                              ((u64) tp->rx_jumbo_mapping >> 32));
6282                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6283                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6284                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6285                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6286                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6287                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6288                 } else {
6289                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6290                              BDINFO_FLAGS_DISABLED);
6291                 }
6292
6293         }
6294
6295         /* There is only one send ring on 5705/5750, no need to explicitly
6296          * disable the others.
6297          */
6298         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6299                 /* Clear out send RCB ring in SRAM. */
6300                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6301                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6302                                       BDINFO_FLAGS_DISABLED);
6303         }
6304
6305         tp->tx_prod = 0;
6306         tp->tx_cons = 0;
6307         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6308         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6309
6310         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6311                        tp->tx_desc_mapping,
6312                        (TG3_TX_RING_SIZE <<
6313                         BDINFO_FLAGS_MAXLEN_SHIFT),
6314                        NIC_SRAM_TX_BUFFER_DESC);
6315
6316         /* There is only one receive return ring on 5705/5750, no need
6317          * to explicitly disable the others.
6318          */
6319         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6320                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6321                      i += TG3_BDINFO_SIZE) {
6322                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6323                                       BDINFO_FLAGS_DISABLED);
6324                 }
6325         }
6326
6327         tp->rx_rcb_ptr = 0;
6328         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6329
6330         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6331                        tp->rx_rcb_mapping,
6332                        (TG3_RX_RCB_RING_SIZE(tp) <<
6333                         BDINFO_FLAGS_MAXLEN_SHIFT),
6334                        0);
6335
6336         tp->rx_std_ptr = tp->rx_pending;
6337         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6338                      tp->rx_std_ptr);
6339
6340         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6341                                                 tp->rx_jumbo_pending : 0;
6342         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6343                      tp->rx_jumbo_ptr);
6344
6345         /* Initialize MAC address and backoff seed. */
6346         __tg3_set_mac_addr(tp, 0);
6347
6348         /* MTU + ethernet header + FCS + optional VLAN tag */
6349         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6350
6351         /* The slot time is changed by tg3_setup_phy if we
6352          * run at gigabit with half duplex.
6353          */
6354         tw32(MAC_TX_LENGTHS,
6355              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6356              (6 << TX_LENGTHS_IPG_SHIFT) |
6357              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6358
6359         /* Receive rules. */
6360         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6361         tw32(RCVLPC_CONFIG, 0x0181);
6362
6363         /* Calculate RDMAC_MODE setting early, we need it to determine
6364          * the RCVLPC_STATE_ENABLE mask.
6365          */
6366         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6367                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6368                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6369                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6370                       RDMAC_MODE_LNGREAD_ENAB);
6371
6372         /* If statement applies to 5705 and 5750 PCI devices only */
6373         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6374              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6375             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6376                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6377                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6378                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6379                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6380                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6381                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6382                 }
6383         }
6384
6385         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6386                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6387
6388         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6389                 rdmac_mode |= (1 << 27);
6390
6391         /* Receive/send statistics. */
6392         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6393                 val = tr32(RCVLPC_STATS_ENABLE);
6394                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6395                 tw32(RCVLPC_STATS_ENABLE, val);
6396         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6397                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6398                 val = tr32(RCVLPC_STATS_ENABLE);
6399                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6400                 tw32(RCVLPC_STATS_ENABLE, val);
6401         } else {
6402                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6403         }
6404         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6405         tw32(SNDDATAI_STATSENAB, 0xffffff);
6406         tw32(SNDDATAI_STATSCTRL,
6407              (SNDDATAI_SCTRL_ENABLE |
6408               SNDDATAI_SCTRL_FASTUPD));
6409
6410         /* Setup host coalescing engine. */
6411         tw32(HOSTCC_MODE, 0);
6412         for (i = 0; i < 2000; i++) {
6413                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6414                         break;
6415                 udelay(10);
6416         }
6417
6418         __tg3_set_coalesce(tp, &tp->coal);
6419
6420         /* set status block DMA address */
6421         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6422              ((u64) tp->status_mapping >> 32));
6423         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6424              ((u64) tp->status_mapping & 0xffffffff));
6425
6426         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6427                 /* Status/statistics block address.  See tg3_timer,
6428                  * the tg3_periodic_fetch_stats call there, and
6429                  * tg3_get_stats to see how this works for 5705/5750 chips.
6430                  */
6431                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6432                      ((u64) tp->stats_mapping >> 32));
6433                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6434                      ((u64) tp->stats_mapping & 0xffffffff));
6435                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6436                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6437         }
6438
6439         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6440
6441         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6442         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6443         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6444                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6445
6446         /* Clear statistics/status block in chip, and status block in ram. */
6447         for (i = NIC_SRAM_STATS_BLK;
6448              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6449              i += sizeof(u32)) {
6450                 tg3_write_mem(tp, i, 0);
6451                 udelay(40);
6452         }
6453         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6454
6455         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6456                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6457                 /* reset to prevent losing 1st rx packet intermittently */
6458                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6459                 udelay(10);
6460         }
6461
6462         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6463                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6464         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6465             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6466             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6467                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6468         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6469         udelay(40);
6470
6471         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6472          * If TG3_FLG2_IS_NIC is zero, we should read the
6473          * register to preserve the GPIO settings for LOMs. The GPIOs,
6474          * whether used as inputs or outputs, are set by boot code after
6475          * reset.
6476          */
6477         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6478                 u32 gpio_mask;
6479
6480                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6481                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6482                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6483
6484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6485                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6486                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6487
6488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6489                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6490
6491                 tp->grc_local_ctrl &= ~gpio_mask;
6492                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6493
6494                 /* GPIO1 must be driven high for eeprom write protect */
6495                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6496                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6497                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6498         }
6499         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6500         udelay(100);
6501
6502         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6503         tp->last_tag = 0;
6504
6505         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6506                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6507                 udelay(40);
6508         }
6509
6510         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6511                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6512                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6513                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6514                WDMAC_MODE_LNGREAD_ENAB);
6515
6516         /* If statement applies to 5705 and 5750 PCI devices only */
6517         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6518              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6520                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6521                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6522                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6523                         /* nothing */
6524                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6525                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6526                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6527                         val |= WDMAC_MODE_RX_ACCEL;
6528                 }
6529         }
6530
6531         /* Enable host coalescing bug fix */
6532         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6533             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6534                 val |= (1 << 29);
6535
6536         tw32_f(WDMAC_MODE, val);
6537         udelay(40);
6538
6539         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6540                 val = tr32(TG3PCI_X_CAPS);
6541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6542                         val &= ~PCIX_CAPS_BURST_MASK;
6543                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6544                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6545                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6546                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6547                 }
6548                 tw32(TG3PCI_X_CAPS, val);
6549         }
6550
6551         tw32_f(RDMAC_MODE, rdmac_mode);
6552         udelay(40);
6553
6554         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6555         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6556                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6557         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6558         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6559         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6560         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6561         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6562         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6563                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6564         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6565         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6566
6567         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6568                 err = tg3_load_5701_a0_firmware_fix(tp);
6569                 if (err)
6570                         return err;
6571         }
6572
6573         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6574                 err = tg3_load_tso_firmware(tp);
6575                 if (err)
6576                         return err;
6577         }
6578
6579         tp->tx_mode = TX_MODE_ENABLE;
6580         tw32_f(MAC_TX_MODE, tp->tx_mode);
6581         udelay(100);
6582
6583         tp->rx_mode = RX_MODE_ENABLE;
6584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6585                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6586
6587         tw32_f(MAC_RX_MODE, tp->rx_mode);
6588         udelay(10);
6589
6590         if (tp->link_config.phy_is_low_power) {
6591                 tp->link_config.phy_is_low_power = 0;
6592                 tp->link_config.speed = tp->link_config.orig_speed;
6593                 tp->link_config.duplex = tp->link_config.orig_duplex;
6594                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6595         }
6596
6597         tp->mi_mode = MAC_MI_MODE_BASE;
6598         tw32_f(MAC_MI_MODE, tp->mi_mode);
6599         udelay(80);
6600
6601         tw32(MAC_LED_CTRL, tp->led_ctrl);
6602
6603         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6604         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6605                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6606                 udelay(10);
6607         }
6608         tw32_f(MAC_RX_MODE, tp->rx_mode);
6609         udelay(10);
6610
6611         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6612                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6613                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6614                         /* Set drive transmission level to 1.2V  */
6615                         /* only if the signal pre-emphasis bit is not set  */
6616                         val = tr32(MAC_SERDES_CFG);
6617                         val &= 0xfffff000;
6618                         val |= 0x880;
6619                         tw32(MAC_SERDES_CFG, val);
6620                 }
6621                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6622                         tw32(MAC_SERDES_CFG, 0x616000);
6623         }
6624
6625         /* Prevent chip from dropping frames when flow control
6626          * is enabled.
6627          */
6628         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6629
6630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6631             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6632                 /* Use hardware link auto-negotiation */
6633                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6634         }
6635
6636         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6637             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6638                 u32 tmp;
6639
6640                 tmp = tr32(SERDES_RX_CTRL);
6641                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6642                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6643                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6644                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6645         }
6646
6647         err = tg3_setup_phy(tp, 0);
6648         if (err)
6649                 return err;
6650
6651         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6652             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6653                 u32 tmp;
6654
6655                 /* Clear CRC stats. */
6656                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6657                         tg3_writephy(tp, MII_TG3_TEST1,
6658                                      tmp | MII_TG3_TEST1_CRC_EN);
6659                         tg3_readphy(tp, 0x14, &tmp);
6660                 }
6661         }
6662
6663         __tg3_set_rx_mode(tp->dev);
6664
6665         /* Initialize receive rules. */
6666         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6667         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6668         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6669         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6670
6671         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6672             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6673                 limit = 8;
6674         else
6675                 limit = 16;
6676         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6677                 limit -= 4;
6678         switch (limit) {
6679         case 16:
6680                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6681         case 15:
6682                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6683         case 14:
6684                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6685         case 13:
6686                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6687         case 12:
6688                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6689         case 11:
6690                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6691         case 10:
6692                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6693         case 9:
6694                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6695         case 8:
6696                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6697         case 7:
6698                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6699         case 6:
6700                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6701         case 5:
6702                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6703         case 4:
6704                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6705         case 3:
6706                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6707         case 2:
6708         case 1:
6709
6710         default:
6711                 break;
6712         };
6713
6714         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6715
6716         return 0;
6717 }
6718
6719 /* Called at device open time to get the chip ready for
6720  * packet processing.  Invoked with tp->lock held.
6721  */
6722 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6723 {
6724         int err;
6725
6726         /* Force the chip into D0. */
6727         err = tg3_set_power_state(tp, PCI_D0);
6728         if (err)
6729                 goto out;
6730
6731         tg3_switch_clocks(tp);
6732
6733         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6734
6735         err = tg3_reset_hw(tp, reset_phy);
6736
6737 out:
6738         return err;
6739 }
6740
6741 #define TG3_STAT_ADD32(PSTAT, REG) \
6742 do {    u32 __val = tr32(REG); \
6743         (PSTAT)->low += __val; \
6744         if ((PSTAT)->low < __val) \
6745                 (PSTAT)->high += 1; \
6746 } while (0)
6747
6748 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6749 {
6750         struct tg3_hw_stats *sp = tp->hw_stats;
6751
6752         if (!netif_carrier_ok(tp->dev))
6753                 return;
6754
6755         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6756         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6757         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6758         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6759         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6760         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6761         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6762         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6763         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6764         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6765         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6766         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6767         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6768
6769         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6770         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6771         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6772         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6773         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6774         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6775         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6776         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6777         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6778         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6779         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6780         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6781         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6782         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6783
6784         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6785         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6786         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6787 }
6788
6789 static void tg3_timer(unsigned long __opaque)
6790 {
6791         struct tg3 *tp = (struct tg3 *) __opaque;
6792
6793         if (tp->irq_sync)
6794                 goto restart_timer;
6795
6796         spin_lock(&tp->lock);
6797
6798         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6799                 /* All of this garbage is because when using non-tagged
6800                  * IRQ status the mailbox/status_block protocol the chip
6801                  * uses with the cpu is race prone.
6802                  */
6803                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6804                         tw32(GRC_LOCAL_CTRL,
6805                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6806                 } else {
6807                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6808                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6809                 }
6810
6811                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6812                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6813                         spin_unlock(&tp->lock);
6814                         schedule_work(&tp->reset_task);
6815                         return;
6816                 }
6817         }
6818
6819         /* This part only runs once per second. */
6820         if (!--tp->timer_counter) {
6821                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6822                         tg3_periodic_fetch_stats(tp);
6823
6824                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6825                         u32 mac_stat;
6826                         int phy_event;
6827
6828                         mac_stat = tr32(MAC_STATUS);
6829
6830                         phy_event = 0;
6831                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6832                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6833                                         phy_event = 1;
6834                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6835                                 phy_event = 1;
6836
6837                         if (phy_event)
6838                                 tg3_setup_phy(tp, 0);
6839                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6840                         u32 mac_stat = tr32(MAC_STATUS);
6841                         int need_setup = 0;
6842
6843                         if (netif_carrier_ok(tp->dev) &&
6844                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6845                                 need_setup = 1;
6846                         }
6847                         if (! netif_carrier_ok(tp->dev) &&
6848                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6849                                          MAC_STATUS_SIGNAL_DET))) {
6850                                 need_setup = 1;
6851                         }
6852                         if (need_setup) {
6853                                 if (!tp->serdes_counter) {
6854                                         tw32_f(MAC_MODE,
6855                                              (tp->mac_mode &
6856                                               ~MAC_MODE_PORT_MODE_MASK));
6857                                         udelay(40);
6858                                         tw32_f(MAC_MODE, tp->mac_mode);
6859                                         udelay(40);
6860                                 }
6861                                 tg3_setup_phy(tp, 0);
6862                         }
6863                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6864                         tg3_serdes_parallel_detect(tp);
6865
6866                 tp->timer_counter = tp->timer_multiplier;
6867         }
6868
6869         /* Heartbeat is only sent once every 2 seconds.
6870          *
6871          * The heartbeat is to tell the ASF firmware that the host
6872          * driver is still alive.  In the event that the OS crashes,
6873          * ASF needs to reset the hardware to free up the FIFO space
6874          * that may be filled with rx packets destined for the host.
6875          * If the FIFO is full, ASF will no longer function properly.
6876          *
6877          * Unintended resets have been reported on real time kernels
6878          * where the timer doesn't run on time.  Netpoll will also have
6879          * same problem.
6880          *
6881          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6882          * to check the ring condition when the heartbeat is expiring
6883          * before doing the reset.  This will prevent most unintended
6884          * resets.
6885          */
6886         if (!--tp->asf_counter) {
6887                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6888                         u32 val;
6889
6890                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6891                                       FWCMD_NICDRV_ALIVE3);
6892                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6893                         /* 5 seconds timeout */
6894                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6895                         val = tr32(GRC_RX_CPU_EVENT);
6896                         val |= (1 << 14);
6897                         tw32(GRC_RX_CPU_EVENT, val);
6898                 }
6899                 tp->asf_counter = tp->asf_multiplier;
6900         }
6901
6902         spin_unlock(&tp->lock);
6903
6904 restart_timer:
6905         tp->timer.expires = jiffies + tp->timer_offset;
6906         add_timer(&tp->timer);
6907 }
6908
6909 static int tg3_request_irq(struct tg3 *tp)
6910 {
6911         irq_handler_t fn;
6912         unsigned long flags;
6913         struct net_device *dev = tp->dev;
6914
6915         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6916                 fn = tg3_msi;
6917                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6918                         fn = tg3_msi_1shot;
6919                 flags = IRQF_SAMPLE_RANDOM;
6920         } else {
6921                 fn = tg3_interrupt;
6922                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6923                         fn = tg3_interrupt_tagged;
6924                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6925         }
6926         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6927 }
6928
6929 static int tg3_test_interrupt(struct tg3 *tp)
6930 {
6931         struct net_device *dev = tp->dev;
6932         int err, i, intr_ok = 0;
6933
6934         if (!netif_running(dev))
6935                 return -ENODEV;
6936
6937         tg3_disable_ints(tp);
6938
6939         free_irq(tp->pdev->irq, dev);
6940
6941         err = request_irq(tp->pdev->irq, tg3_test_isr,
6942                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6943         if (err)
6944                 return err;
6945
6946         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6947         tg3_enable_ints(tp);
6948
6949         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6950                HOSTCC_MODE_NOW);
6951
6952         for (i = 0; i < 5; i++) {
6953                 u32 int_mbox, misc_host_ctrl;
6954
6955                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6956                                         TG3_64BIT_REG_LOW);
6957                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6958
6959                 if ((int_mbox != 0) ||
6960                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6961                         intr_ok = 1;
6962                         break;
6963                 }
6964
6965                 msleep(10);
6966         }
6967
6968         tg3_disable_ints(tp);
6969
6970         free_irq(tp->pdev->irq, dev);
6971
6972         err = tg3_request_irq(tp);
6973
6974         if (err)
6975                 return err;
6976
6977         if (intr_ok)
6978                 return 0;
6979
6980         return -EIO;
6981 }
6982
6983 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6984  * successfully restored
6985  */
6986 static int tg3_test_msi(struct tg3 *tp)
6987 {
6988         struct net_device *dev = tp->dev;
6989         int err;
6990         u16 pci_cmd;
6991
6992         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6993                 return 0;
6994
6995         /* Turn off SERR reporting in case MSI terminates with Master
6996          * Abort.
6997          */
6998         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6999         pci_write_config_word(tp->pdev, PCI_COMMAND,
7000                               pci_cmd & ~PCI_COMMAND_SERR);
7001
7002         err = tg3_test_interrupt(tp);
7003
7004         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7005
7006         if (!err)
7007                 return 0;
7008
7009         /* other failures */
7010         if (err != -EIO)
7011                 return err;
7012
7013         /* MSI test failed, go back to INTx mode */
7014         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7015                "switching to INTx mode. Please report this failure to "
7016                "the PCI maintainer and include system chipset information.\n",
7017                        tp->dev->name);
7018
7019         free_irq(tp->pdev->irq, dev);
7020         pci_disable_msi(tp->pdev);
7021
7022         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7023
7024         err = tg3_request_irq(tp);
7025         if (err)
7026                 return err;
7027
7028         /* Need to reset the chip because the MSI cycle may have terminated
7029          * with Master Abort.
7030          */
7031         tg3_full_lock(tp, 1);
7032
7033         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7034         err = tg3_init_hw(tp, 1);
7035
7036         tg3_full_unlock(tp);
7037
7038         if (err)
7039                 free_irq(tp->pdev->irq, dev);
7040
7041         return err;
7042 }
7043
7044 static int tg3_open(struct net_device *dev)
7045 {
7046         struct tg3 *tp = netdev_priv(dev);
7047         int err;
7048
7049         netif_carrier_off(tp->dev);
7050
7051         tg3_full_lock(tp, 0);
7052
7053         err = tg3_set_power_state(tp, PCI_D0);
7054         if (err) {
7055                 tg3_full_unlock(tp);
7056                 return err;
7057         }
7058
7059         tg3_disable_ints(tp);
7060         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7061
7062         tg3_full_unlock(tp);
7063
7064         /* The placement of this call is tied
7065          * to the setup and use of Host TX descriptors.
7066          */
7067         err = tg3_alloc_consistent(tp);
7068         if (err)
7069                 return err;
7070
7071         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7072                 /* All MSI supporting chips should support tagged
7073                  * status.  Assert that this is the case.
7074                  */
7075                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7076                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7077                                "Not using MSI.\n", tp->dev->name);
7078                 } else if (pci_enable_msi(tp->pdev) == 0) {
7079                         u32 msi_mode;
7080
7081                         msi_mode = tr32(MSGINT_MODE);
7082                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7083                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7084                 }
7085         }
7086         err = tg3_request_irq(tp);
7087
7088         if (err) {
7089                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7090                         pci_disable_msi(tp->pdev);
7091                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7092                 }
7093                 tg3_free_consistent(tp);
7094                 return err;
7095         }
7096
7097         tg3_full_lock(tp, 0);
7098
7099         err = tg3_init_hw(tp, 1);
7100         if (err) {
7101                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7102                 tg3_free_rings(tp);
7103         } else {
7104                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7105                         tp->timer_offset = HZ;
7106                 else
7107                         tp->timer_offset = HZ / 10;
7108
7109                 BUG_ON(tp->timer_offset > HZ);
7110                 tp->timer_counter = tp->timer_multiplier =
7111                         (HZ / tp->timer_offset);
7112                 tp->asf_counter = tp->asf_multiplier =
7113                         ((HZ / tp->timer_offset) * 2);
7114
7115                 init_timer(&tp->timer);
7116                 tp->timer.expires = jiffies + tp->timer_offset;
7117                 tp->timer.data = (unsigned long) tp;
7118                 tp->timer.function = tg3_timer;
7119         }
7120
7121         tg3_full_unlock(tp);
7122
7123         if (err) {
7124                 free_irq(tp->pdev->irq, dev);
7125                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7126                         pci_disable_msi(tp->pdev);
7127                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7128                 }
7129                 tg3_free_consistent(tp);
7130                 return err;
7131         }
7132
7133         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7134                 err = tg3_test_msi(tp);
7135
7136                 if (err) {
7137                         tg3_full_lock(tp, 0);
7138
7139                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7140                                 pci_disable_msi(tp->pdev);
7141                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7142                         }
7143                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7144                         tg3_free_rings(tp);
7145                         tg3_free_consistent(tp);
7146
7147                         tg3_full_unlock(tp);
7148
7149                         return err;
7150                 }
7151
7152                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7153                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7154                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7155
7156                                 tw32(PCIE_TRANSACTION_CFG,
7157                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7158                         }
7159                 }
7160         }
7161
7162         tg3_full_lock(tp, 0);
7163
7164         add_timer(&tp->timer);
7165         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7166         tg3_enable_ints(tp);
7167
7168         tg3_full_unlock(tp);
7169
7170         netif_start_queue(dev);
7171
7172         return 0;
7173 }
7174
7175 #if 0
7176 /*static*/ void tg3_dump_state(struct tg3 *tp)
7177 {
7178         u32 val32, val32_2, val32_3, val32_4, val32_5;
7179         u16 val16;
7180         int i;
7181
7182         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7183         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7184         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7185                val16, val32);
7186
7187         /* MAC block */
7188         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7189                tr32(MAC_MODE), tr32(MAC_STATUS));
7190         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7191                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7192         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7193                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7194         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7195                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7196
7197         /* Send data initiator control block */
7198         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7199                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7200         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7201                tr32(SNDDATAI_STATSCTRL));
7202
7203         /* Send data completion control block */
7204         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7205
7206         /* Send BD ring selector block */
7207         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7208                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7209
7210         /* Send BD initiator control block */
7211         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7212                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7213
7214         /* Send BD completion control block */
7215         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7216
7217         /* Receive list placement control block */
7218         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7219                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7220         printk("       RCVLPC_STATSCTRL[%08x]\n",
7221                tr32(RCVLPC_STATSCTRL));
7222
7223         /* Receive data and receive BD initiator control block */
7224         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7225                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7226
7227         /* Receive data completion control block */
7228         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7229                tr32(RCVDCC_MODE));
7230
7231         /* Receive BD initiator control block */
7232         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7233                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7234
7235         /* Receive BD completion control block */
7236         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7237                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7238
7239         /* Receive list selector control block */
7240         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7241                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7242
7243         /* Mbuf cluster free block */
7244         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7245                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7246
7247         /* Host coalescing control block */
7248         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7249                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7250         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7251                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7252                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7253         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7254                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7255                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7256         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7257                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7258         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7259                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7260
7261         /* Memory arbiter control block */
7262         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7263                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7264
7265         /* Buffer manager control block */
7266         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7267                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7268         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7269                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7270         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7271                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7272                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7273                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7274
7275         /* Read DMA control block */
7276         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7277                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7278
7279         /* Write DMA control block */
7280         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7281                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7282
7283         /* DMA completion block */
7284         printk("DEBUG: DMAC_MODE[%08x]\n",
7285                tr32(DMAC_MODE));
7286
7287         /* GRC block */
7288         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7289                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7290         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7291                tr32(GRC_LOCAL_CTRL));
7292
7293         /* TG3_BDINFOs */
7294         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7295                tr32(RCVDBDI_JUMBO_BD + 0x0),
7296                tr32(RCVDBDI_JUMBO_BD + 0x4),
7297                tr32(RCVDBDI_JUMBO_BD + 0x8),
7298                tr32(RCVDBDI_JUMBO_BD + 0xc));
7299         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7300                tr32(RCVDBDI_STD_BD + 0x0),
7301                tr32(RCVDBDI_STD_BD + 0x4),
7302                tr32(RCVDBDI_STD_BD + 0x8),
7303                tr32(RCVDBDI_STD_BD + 0xc));
7304         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7305                tr32(RCVDBDI_MINI_BD + 0x0),
7306                tr32(RCVDBDI_MINI_BD + 0x4),
7307                tr32(RCVDBDI_MINI_BD + 0x8),
7308                tr32(RCVDBDI_MINI_BD + 0xc));
7309
7310         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7311         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7312         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7313         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7314         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7315                val32, val32_2, val32_3, val32_4);
7316
7317         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7318         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7319         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7320         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7321         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7322                val32, val32_2, val32_3, val32_4);
7323
7324         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7325         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7326         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7327         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7328         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7329         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7330                val32, val32_2, val32_3, val32_4, val32_5);
7331
7332         /* SW status block */
7333         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7334                tp->hw_status->status,
7335                tp->hw_status->status_tag,
7336                tp->hw_status->rx_jumbo_consumer,
7337                tp->hw_status->rx_consumer,
7338                tp->hw_status->rx_mini_consumer,
7339                tp->hw_status->idx[0].rx_producer,
7340                tp->hw_status->idx[0].tx_consumer);
7341
7342         /* SW statistics block */
7343         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7344                ((u32 *)tp->hw_stats)[0],
7345                ((u32 *)tp->hw_stats)[1],
7346                ((u32 *)tp->hw_stats)[2],
7347                ((u32 *)tp->hw_stats)[3]);
7348
7349         /* Mailboxes */
7350         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7351                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7352                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7353                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7354                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7355
7356         /* NIC side send descriptors. */
7357         for (i = 0; i < 6; i++) {
7358                 unsigned long txd;
7359
7360                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7361                         + (i * sizeof(struct tg3_tx_buffer_desc));
7362                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7363                        i,
7364                        readl(txd + 0x0), readl(txd + 0x4),
7365                        readl(txd + 0x8), readl(txd + 0xc));
7366         }
7367
7368         /* NIC side RX descriptors. */
7369         for (i = 0; i < 6; i++) {
7370                 unsigned long rxd;
7371
7372                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7373                         + (i * sizeof(struct tg3_rx_buffer_desc));
7374                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7375                        i,
7376                        readl(rxd + 0x0), readl(rxd + 0x4),
7377                        readl(rxd + 0x8), readl(rxd + 0xc));
7378                 rxd += (4 * sizeof(u32));
7379                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7380                        i,
7381                        readl(rxd + 0x0), readl(rxd + 0x4),
7382                        readl(rxd + 0x8), readl(rxd + 0xc));
7383         }
7384
7385         for (i = 0; i < 6; i++) {
7386                 unsigned long rxd;
7387
7388                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7389                         + (i * sizeof(struct tg3_rx_buffer_desc));
7390                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7391                        i,
7392                        readl(rxd + 0x0), readl(rxd + 0x4),
7393                        readl(rxd + 0x8), readl(rxd + 0xc));
7394                 rxd += (4 * sizeof(u32));
7395                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7396                        i,
7397                        readl(rxd + 0x0), readl(rxd + 0x4),
7398                        readl(rxd + 0x8), readl(rxd + 0xc));
7399         }
7400 }
7401 #endif
7402
7403 static struct net_device_stats *tg3_get_stats(struct net_device *);
7404 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7405
7406 static int tg3_close(struct net_device *dev)
7407 {
7408         struct tg3 *tp = netdev_priv(dev);
7409
7410         cancel_work_sync(&tp->reset_task);
7411
7412         netif_stop_queue(dev);
7413
7414         del_timer_sync(&tp->timer);
7415
7416         tg3_full_lock(tp, 1);
7417 #if 0
7418         tg3_dump_state(tp);
7419 #endif
7420
7421         tg3_disable_ints(tp);
7422
7423         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7424         tg3_free_rings(tp);
7425         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7426
7427         tg3_full_unlock(tp);
7428
7429         free_irq(tp->pdev->irq, dev);
7430         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7431                 pci_disable_msi(tp->pdev);
7432                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7433         }
7434
7435         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7436                sizeof(tp->net_stats_prev));
7437         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7438                sizeof(tp->estats_prev));
7439
7440         tg3_free_consistent(tp);
7441
7442         tg3_set_power_state(tp, PCI_D3hot);
7443
7444         netif_carrier_off(tp->dev);
7445
7446         return 0;
7447 }
7448
7449 static inline unsigned long get_stat64(tg3_stat64_t *val)
7450 {
7451         unsigned long ret;
7452
7453 #if (BITS_PER_LONG == 32)
7454         ret = val->low;
7455 #else
7456         ret = ((u64)val->high << 32) | ((u64)val->low);
7457 #endif
7458         return ret;
7459 }
7460
7461 static unsigned long calc_crc_errors(struct tg3 *tp)
7462 {
7463         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7464
7465         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7466             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7467              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7468                 u32 val;
7469
7470                 spin_lock_bh(&tp->lock);
7471                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7472                         tg3_writephy(tp, MII_TG3_TEST1,
7473                                      val | MII_TG3_TEST1_CRC_EN);
7474                         tg3_readphy(tp, 0x14, &val);
7475                 } else
7476                         val = 0;
7477                 spin_unlock_bh(&tp->lock);
7478
7479                 tp->phy_crc_errors += val;
7480
7481                 return tp->phy_crc_errors;
7482         }
7483
7484         return get_stat64(&hw_stats->rx_fcs_errors);
7485 }
7486
7487 #define ESTAT_ADD(member) \
7488         estats->member =        old_estats->member + \
7489                                 get_stat64(&hw_stats->member)
7490
7491 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7492 {
7493         struct tg3_ethtool_stats *estats = &tp->estats;
7494         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7495         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7496
7497         if (!hw_stats)
7498                 return old_estats;
7499
7500         ESTAT_ADD(rx_octets);
7501         ESTAT_ADD(rx_fragments);
7502         ESTAT_ADD(rx_ucast_packets);
7503         ESTAT_ADD(rx_mcast_packets);
7504         ESTAT_ADD(rx_bcast_packets);
7505         ESTAT_ADD(rx_fcs_errors);
7506         ESTAT_ADD(rx_align_errors);
7507         ESTAT_ADD(rx_xon_pause_rcvd);
7508         ESTAT_ADD(rx_xoff_pause_rcvd);
7509         ESTAT_ADD(rx_mac_ctrl_rcvd);
7510         ESTAT_ADD(rx_xoff_entered);
7511         ESTAT_ADD(rx_frame_too_long_errors);
7512         ESTAT_ADD(rx_jabbers);
7513         ESTAT_ADD(rx_undersize_packets);
7514         ESTAT_ADD(rx_in_length_errors);
7515         ESTAT_ADD(rx_out_length_errors);
7516         ESTAT_ADD(rx_64_or_less_octet_packets);
7517         ESTAT_ADD(rx_65_to_127_octet_packets);
7518         ESTAT_ADD(rx_128_to_255_octet_packets);
7519         ESTAT_ADD(rx_256_to_511_octet_packets);
7520         ESTAT_ADD(rx_512_to_1023_octet_packets);
7521         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7522         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7523         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7524         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7525         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7526
7527         ESTAT_ADD(tx_octets);
7528         ESTAT_ADD(tx_collisions);
7529         ESTAT_ADD(tx_xon_sent);
7530         ESTAT_ADD(tx_xoff_sent);
7531         ESTAT_ADD(tx_flow_control);
7532         ESTAT_ADD(tx_mac_errors);
7533         ESTAT_ADD(tx_single_collisions);
7534         ESTAT_ADD(tx_mult_collisions);
7535         ESTAT_ADD(tx_deferred);
7536         ESTAT_ADD(tx_excessive_collisions);
7537         ESTAT_ADD(tx_late_collisions);
7538         ESTAT_ADD(tx_collide_2times);
7539         ESTAT_ADD(tx_collide_3times);
7540         ESTAT_ADD(tx_collide_4times);
7541         ESTAT_ADD(tx_collide_5times);
7542         ESTAT_ADD(tx_collide_6times);
7543         ESTAT_ADD(tx_collide_7times);
7544         ESTAT_ADD(tx_collide_8times);
7545         ESTAT_ADD(tx_collide_9times);
7546         ESTAT_ADD(tx_collide_10times);
7547         ESTAT_ADD(tx_collide_11times);
7548         ESTAT_ADD(tx_collide_12times);
7549         ESTAT_ADD(tx_collide_13times);
7550         ESTAT_ADD(tx_collide_14times);
7551         ESTAT_ADD(tx_collide_15times);
7552         ESTAT_ADD(tx_ucast_packets);
7553         ESTAT_ADD(tx_mcast_packets);
7554         ESTAT_ADD(tx_bcast_packets);
7555         ESTAT_ADD(tx_carrier_sense_errors);
7556         ESTAT_ADD(tx_discards);
7557         ESTAT_ADD(tx_errors);
7558
7559         ESTAT_ADD(dma_writeq_full);
7560         ESTAT_ADD(dma_write_prioq_full);
7561         ESTAT_ADD(rxbds_empty);
7562         ESTAT_ADD(rx_discards);
7563         ESTAT_ADD(rx_errors);
7564         ESTAT_ADD(rx_threshold_hit);
7565
7566         ESTAT_ADD(dma_readq_full);
7567         ESTAT_ADD(dma_read_prioq_full);
7568         ESTAT_ADD(tx_comp_queue_full);
7569
7570         ESTAT_ADD(ring_set_send_prod_index);
7571         ESTAT_ADD(ring_status_update);
7572         ESTAT_ADD(nic_irqs);
7573         ESTAT_ADD(nic_avoided_irqs);
7574         ESTAT_ADD(nic_tx_threshold_hit);
7575
7576         return estats;
7577 }
7578
7579 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7580 {
7581         struct tg3 *tp = netdev_priv(dev);
7582         struct net_device_stats *stats = &tp->net_stats;
7583         struct net_device_stats *old_stats = &tp->net_stats_prev;
7584         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7585
7586         if (!hw_stats)
7587                 return old_stats;
7588
7589         stats->rx_packets = old_stats->rx_packets +
7590                 get_stat64(&hw_stats->rx_ucast_packets) +
7591                 get_stat64(&hw_stats->rx_mcast_packets) +
7592                 get_stat64(&hw_stats->rx_bcast_packets);
7593
7594         stats->tx_packets = old_stats->tx_packets +
7595                 get_stat64(&hw_stats->tx_ucast_packets) +
7596                 get_stat64(&hw_stats->tx_mcast_packets) +
7597                 get_stat64(&hw_stats->tx_bcast_packets);
7598
7599         stats->rx_bytes = old_stats->rx_bytes +
7600                 get_stat64(&hw_stats->rx_octets);
7601         stats->tx_bytes = old_stats->tx_bytes +
7602                 get_stat64(&hw_stats->tx_octets);
7603
7604         stats->rx_errors = old_stats->rx_errors +
7605                 get_stat64(&hw_stats->rx_errors);
7606         stats->tx_errors = old_stats->tx_errors +
7607                 get_stat64(&hw_stats->tx_errors) +
7608                 get_stat64(&hw_stats->tx_mac_errors) +
7609                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7610                 get_stat64(&hw_stats->tx_discards);
7611
7612         stats->multicast = old_stats->multicast +
7613                 get_stat64(&hw_stats->rx_mcast_packets);
7614         stats->collisions = old_stats->collisions +
7615                 get_stat64(&hw_stats->tx_collisions);
7616
7617         stats->rx_length_errors = old_stats->rx_length_errors +
7618                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7619                 get_stat64(&hw_stats->rx_undersize_packets);
7620
7621         stats->rx_over_errors = old_stats->rx_over_errors +
7622                 get_stat64(&hw_stats->rxbds_empty);
7623         stats->rx_frame_errors = old_stats->rx_frame_errors +
7624                 get_stat64(&hw_stats->rx_align_errors);
7625         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7626                 get_stat64(&hw_stats->tx_discards);
7627         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7628                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7629
7630         stats->rx_crc_errors = old_stats->rx_crc_errors +
7631                 calc_crc_errors(tp);
7632
7633         stats->rx_missed_errors = old_stats->rx_missed_errors +
7634                 get_stat64(&hw_stats->rx_discards);
7635
7636         return stats;
7637 }
7638
7639 static inline u32 calc_crc(unsigned char *buf, int len)
7640 {
7641         u32 reg;
7642         u32 tmp;
7643         int j, k;
7644
7645         reg = 0xffffffff;
7646
7647         for (j = 0; j < len; j++) {
7648                 reg ^= buf[j];
7649
7650                 for (k = 0; k < 8; k++) {
7651                         tmp = reg & 0x01;
7652
7653                         reg >>= 1;
7654
7655                         if (tmp) {
7656                                 reg ^= 0xedb88320;
7657                         }
7658                 }
7659         }
7660
7661         return ~reg;
7662 }
7663
7664 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7665 {
7666         /* accept or reject all multicast frames */
7667         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7668         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7669         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7670         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7671 }
7672
7673 static void __tg3_set_rx_mode(struct net_device *dev)
7674 {
7675         struct tg3 *tp = netdev_priv(dev);
7676         u32 rx_mode;
7677
7678         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7679                                   RX_MODE_KEEP_VLAN_TAG);
7680
7681         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7682          * flag clear.
7683          */
7684 #if TG3_VLAN_TAG_USED
7685         if (!tp->vlgrp &&
7686             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7687                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7688 #else
7689         /* By definition, VLAN is disabled always in this
7690          * case.
7691          */
7692         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7693                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7694 #endif
7695
7696         if (dev->flags & IFF_PROMISC) {
7697                 /* Promiscuous mode. */
7698                 rx_mode |= RX_MODE_PROMISC;
7699         } else if (dev->flags & IFF_ALLMULTI) {
7700                 /* Accept all multicast. */
7701                 tg3_set_multi (tp, 1);
7702         } else if (dev->mc_count < 1) {
7703                 /* Reject all multicast. */
7704                 tg3_set_multi (tp, 0);
7705         } else {
7706                 /* Accept one or more multicast(s). */
7707                 struct dev_mc_list *mclist;
7708                 unsigned int i;
7709                 u32 mc_filter[4] = { 0, };
7710                 u32 regidx;
7711                 u32 bit;
7712                 u32 crc;
7713
7714                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7715                      i++, mclist = mclist->next) {
7716
7717                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7718                         bit = ~crc & 0x7f;
7719                         regidx = (bit & 0x60) >> 5;
7720                         bit &= 0x1f;
7721                         mc_filter[regidx] |= (1 << bit);
7722                 }
7723
7724                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7725                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7726                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7727                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7728         }
7729
7730         if (rx_mode != tp->rx_mode) {
7731                 tp->rx_mode = rx_mode;
7732                 tw32_f(MAC_RX_MODE, rx_mode);
7733                 udelay(10);
7734         }
7735 }
7736
7737 static void tg3_set_rx_mode(struct net_device *dev)
7738 {
7739         struct tg3 *tp = netdev_priv(dev);
7740
7741         if (!netif_running(dev))
7742                 return;
7743
7744         tg3_full_lock(tp, 0);
7745         __tg3_set_rx_mode(dev);
7746         tg3_full_unlock(tp);
7747 }
7748
7749 #define TG3_REGDUMP_LEN         (32 * 1024)
7750
7751 static int tg3_get_regs_len(struct net_device *dev)
7752 {
7753         return TG3_REGDUMP_LEN;
7754 }
7755
7756 static void tg3_get_regs(struct net_device *dev,
7757                 struct ethtool_regs *regs, void *_p)
7758 {
7759         u32 *p = _p;
7760         struct tg3 *tp = netdev_priv(dev);
7761         u8 *orig_p = _p;
7762         int i;
7763
7764         regs->version = 0;
7765
7766         memset(p, 0, TG3_REGDUMP_LEN);
7767
7768         if (tp->link_config.phy_is_low_power)
7769                 return;
7770
7771         tg3_full_lock(tp, 0);
7772
7773 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7774 #define GET_REG32_LOOP(base,len)                \
7775 do {    p = (u32 *)(orig_p + (base));           \
7776         for (i = 0; i < len; i += 4)            \
7777                 __GET_REG32((base) + i);        \
7778 } while (0)
7779 #define GET_REG32_1(reg)                        \
7780 do {    p = (u32 *)(orig_p + (reg));            \
7781         __GET_REG32((reg));                     \
7782 } while (0)
7783
7784         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7785         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7786         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7787         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7788         GET_REG32_1(SNDDATAC_MODE);
7789         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7790         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7791         GET_REG32_1(SNDBDC_MODE);
7792         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7793         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7794         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7795         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7796         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7797         GET_REG32_1(RCVDCC_MODE);
7798         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7799         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7800         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7801         GET_REG32_1(MBFREE_MODE);
7802         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7803         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7804         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7805         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7806         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7807         GET_REG32_1(RX_CPU_MODE);
7808         GET_REG32_1(RX_CPU_STATE);
7809         GET_REG32_1(RX_CPU_PGMCTR);
7810         GET_REG32_1(RX_CPU_HWBKPT);
7811         GET_REG32_1(TX_CPU_MODE);
7812         GET_REG32_1(TX_CPU_STATE);
7813         GET_REG32_1(TX_CPU_PGMCTR);
7814         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7815         GET_REG32_LOOP(FTQ_RESET, 0x120);
7816         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7817         GET_REG32_1(DMAC_MODE);
7818         GET_REG32_LOOP(GRC_MODE, 0x4c);
7819         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7820                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7821
7822 #undef __GET_REG32
7823 #undef GET_REG32_LOOP
7824 #undef GET_REG32_1
7825
7826         tg3_full_unlock(tp);
7827 }
7828
7829 static int tg3_get_eeprom_len(struct net_device *dev)
7830 {
7831         struct tg3 *tp = netdev_priv(dev);
7832
7833         return tp->nvram_size;
7834 }
7835
7836 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7837 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7838
7839 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7840 {
7841         struct tg3 *tp = netdev_priv(dev);
7842         int ret;
7843         u8  *pd;
7844         u32 i, offset, len, val, b_offset, b_count;
7845
7846         if (tp->link_config.phy_is_low_power)
7847                 return -EAGAIN;
7848
7849         offset = eeprom->offset;
7850         len = eeprom->len;
7851         eeprom->len = 0;
7852
7853         eeprom->magic = TG3_EEPROM_MAGIC;
7854
7855         if (offset & 3) {
7856                 /* adjustments to start on required 4 byte boundary */
7857                 b_offset = offset & 3;
7858                 b_count = 4 - b_offset;
7859                 if (b_count > len) {
7860                         /* i.e. offset=1 len=2 */
7861                         b_count = len;
7862                 }
7863                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7864                 if (ret)
7865                         return ret;
7866                 val = cpu_to_le32(val);
7867                 memcpy(data, ((char*)&val) + b_offset, b_count);
7868                 len -= b_count;
7869                 offset += b_count;
7870                 eeprom->len += b_count;
7871         }
7872
7873         /* read bytes upto the last 4 byte boundary */
7874         pd = &data[eeprom->len];
7875         for (i = 0; i < (len - (len & 3)); i += 4) {
7876                 ret = tg3_nvram_read(tp, offset + i, &val);
7877                 if (ret) {
7878                         eeprom->len += i;
7879                         return ret;
7880                 }
7881                 val = cpu_to_le32(val);
7882                 memcpy(pd + i, &val, 4);
7883         }
7884         eeprom->len += i;
7885
7886         if (len & 3) {
7887                 /* read last bytes not ending on 4 byte boundary */
7888                 pd = &data[eeprom->len];
7889                 b_count = len & 3;
7890                 b_offset = offset + len - b_count;
7891                 ret = tg3_nvram_read(tp, b_offset, &val);
7892                 if (ret)
7893                         return ret;
7894                 val = cpu_to_le32(val);
7895                 memcpy(pd, ((char*)&val), b_count);
7896                 eeprom->len += b_count;
7897         }
7898         return 0;
7899 }
7900
7901 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7902
7903 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7904 {
7905         struct tg3 *tp = netdev_priv(dev);
7906         int ret;
7907         u32 offset, len, b_offset, odd_len, start, end;
7908         u8 *buf;
7909
7910         if (tp->link_config.phy_is_low_power)
7911                 return -EAGAIN;
7912
7913         if (eeprom->magic != TG3_EEPROM_MAGIC)
7914                 return -EINVAL;
7915
7916         offset = eeprom->offset;
7917         len = eeprom->len;
7918
7919         if ((b_offset = (offset & 3))) {
7920                 /* adjustments to start on required 4 byte boundary */
7921                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7922                 if (ret)
7923                         return ret;
7924                 start = cpu_to_le32(start);
7925                 len += b_offset;
7926                 offset &= ~3;
7927                 if (len < 4)
7928                         len = 4;
7929         }
7930
7931         odd_len = 0;
7932         if (len & 3) {
7933                 /* adjustments to end on required 4 byte boundary */
7934                 odd_len = 1;
7935                 len = (len + 3) & ~3;
7936                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7937                 if (ret)
7938                         return ret;
7939                 end = cpu_to_le32(end);
7940         }
7941
7942         buf = data;
7943         if (b_offset || odd_len) {
7944                 buf = kmalloc(len, GFP_KERNEL);
7945                 if (buf == 0)
7946                         return -ENOMEM;
7947                 if (b_offset)
7948                         memcpy(buf, &start, 4);
7949                 if (odd_len)
7950                         memcpy(buf+len-4, &end, 4);
7951                 memcpy(buf + b_offset, data, eeprom->len);
7952         }
7953
7954         ret = tg3_nvram_write_block(tp, offset, len, buf);
7955
7956         if (buf != data)
7957                 kfree(buf);
7958
7959         return ret;
7960 }
7961
7962 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7963 {
7964         struct tg3 *tp = netdev_priv(dev);
7965
7966         cmd->supported = (SUPPORTED_Autoneg);
7967
7968         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7969                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7970                                    SUPPORTED_1000baseT_Full);
7971
7972         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7973                 cmd->supported |= (SUPPORTED_100baseT_Half |
7974                                   SUPPORTED_100baseT_Full |
7975                                   SUPPORTED_10baseT_Half |
7976                                   SUPPORTED_10baseT_Full |
7977                                   SUPPORTED_MII);
7978                 cmd->port = PORT_TP;
7979         } else {
7980                 cmd->supported |= SUPPORTED_FIBRE;
7981                 cmd->port = PORT_FIBRE;
7982         }
7983
7984         cmd->advertising = tp->link_config.advertising;
7985         if (netif_running(dev)) {
7986                 cmd->speed = tp->link_config.active_speed;
7987                 cmd->duplex = tp->link_config.active_duplex;
7988         }
7989         cmd->phy_address = PHY_ADDR;
7990         cmd->transceiver = 0;
7991         cmd->autoneg = tp->link_config.autoneg;
7992         cmd->maxtxpkt = 0;
7993         cmd->maxrxpkt = 0;
7994         return 0;
7995 }
7996
7997 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7998 {
7999         struct tg3 *tp = netdev_priv(dev);
8000
8001         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8002                 /* These are the only valid advertisement bits allowed.  */
8003                 if (cmd->autoneg == AUTONEG_ENABLE &&
8004                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8005                                           ADVERTISED_1000baseT_Full |
8006                                           ADVERTISED_Autoneg |
8007                                           ADVERTISED_FIBRE)))
8008                         return -EINVAL;
8009                 /* Fiber can only do SPEED_1000.  */
8010                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8011                          (cmd->speed != SPEED_1000))
8012                         return -EINVAL;
8013         /* Copper cannot force SPEED_1000.  */
8014         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8015                    (cmd->speed == SPEED_1000))
8016                 return -EINVAL;
8017         else if ((cmd->speed == SPEED_1000) &&
8018                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8019                 return -EINVAL;
8020
8021         tg3_full_lock(tp, 0);
8022
8023         tp->link_config.autoneg = cmd->autoneg;
8024         if (cmd->autoneg == AUTONEG_ENABLE) {
8025                 tp->link_config.advertising = cmd->advertising;
8026                 tp->link_config.speed = SPEED_INVALID;
8027                 tp->link_config.duplex = DUPLEX_INVALID;
8028         } else {
8029                 tp->link_config.advertising = 0;
8030                 tp->link_config.speed = cmd->speed;
8031                 tp->link_config.duplex = cmd->duplex;
8032         }
8033
8034         tp->link_config.orig_speed = tp->link_config.speed;
8035         tp->link_config.orig_duplex = tp->link_config.duplex;
8036         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8037
8038         if (netif_running(dev))
8039                 tg3_setup_phy(tp, 1);
8040
8041         tg3_full_unlock(tp);
8042
8043         return 0;
8044 }
8045
8046 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8047 {
8048         struct tg3 *tp = netdev_priv(dev);
8049
8050         strcpy(info->driver, DRV_MODULE_NAME);
8051         strcpy(info->version, DRV_MODULE_VERSION);
8052         strcpy(info->fw_version, tp->fw_ver);
8053         strcpy(info->bus_info, pci_name(tp->pdev));
8054 }
8055
8056 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8057 {
8058         struct tg3 *tp = netdev_priv(dev);
8059
8060         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8061                 wol->supported = WAKE_MAGIC;
8062         else
8063                 wol->supported = 0;
8064         wol->wolopts = 0;
8065         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8066                 wol->wolopts = WAKE_MAGIC;
8067         memset(&wol->sopass, 0, sizeof(wol->sopass));
8068 }
8069
8070 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8071 {
8072         struct tg3 *tp = netdev_priv(dev);
8073
8074         if (wol->wolopts & ~WAKE_MAGIC)
8075                 return -EINVAL;
8076         if ((wol->wolopts & WAKE_MAGIC) &&
8077             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8078                 return -EINVAL;
8079
8080         spin_lock_bh(&tp->lock);
8081         if (wol->wolopts & WAKE_MAGIC)
8082                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8083         else
8084                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8085         spin_unlock_bh(&tp->lock);
8086
8087         return 0;
8088 }
8089
8090 static u32 tg3_get_msglevel(struct net_device *dev)
8091 {
8092         struct tg3 *tp = netdev_priv(dev);
8093         return tp->msg_enable;
8094 }
8095
8096 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8097 {
8098         struct tg3 *tp = netdev_priv(dev);
8099         tp->msg_enable = value;
8100 }
8101
8102 static int tg3_set_tso(struct net_device *dev, u32 value)
8103 {
8104         struct tg3 *tp = netdev_priv(dev);
8105
8106         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8107                 if (value)
8108                         return -EINVAL;
8109                 return 0;
8110         }
8111         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8112             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8113                 if (value)
8114                         dev->features |= NETIF_F_TSO6;
8115                 else
8116                         dev->features &= ~NETIF_F_TSO6;
8117         }
8118         return ethtool_op_set_tso(dev, value);
8119 }
8120
8121 static int tg3_nway_reset(struct net_device *dev)
8122 {
8123         struct tg3 *tp = netdev_priv(dev);
8124         u32 bmcr;
8125         int r;
8126
8127         if (!netif_running(dev))
8128                 return -EAGAIN;
8129
8130         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8131                 return -EINVAL;
8132
8133         spin_lock_bh(&tp->lock);
8134         r = -EINVAL;
8135         tg3_readphy(tp, MII_BMCR, &bmcr);
8136         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8137             ((bmcr & BMCR_ANENABLE) ||
8138              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8139                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8140                                            BMCR_ANENABLE);
8141                 r = 0;
8142         }
8143         spin_unlock_bh(&tp->lock);
8144
8145         return r;
8146 }
8147
8148 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8149 {
8150         struct tg3 *tp = netdev_priv(dev);
8151
8152         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8153         ering->rx_mini_max_pending = 0;
8154         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8155                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8156         else
8157                 ering->rx_jumbo_max_pending = 0;
8158
8159         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8160
8161         ering->rx_pending = tp->rx_pending;
8162         ering->rx_mini_pending = 0;
8163         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8164                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8165         else
8166                 ering->rx_jumbo_pending = 0;
8167
8168         ering->tx_pending = tp->tx_pending;
8169 }
8170
8171 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8172 {
8173         struct tg3 *tp = netdev_priv(dev);
8174         int irq_sync = 0, err = 0;
8175
8176         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8177             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8178             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8179             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8180             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8181              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8182                 return -EINVAL;
8183
8184         if (netif_running(dev)) {
8185                 tg3_netif_stop(tp);
8186                 irq_sync = 1;
8187         }
8188
8189         tg3_full_lock(tp, irq_sync);
8190
8191         tp->rx_pending = ering->rx_pending;
8192
8193         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8194             tp->rx_pending > 63)
8195                 tp->rx_pending = 63;
8196         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8197         tp->tx_pending = ering->tx_pending;
8198
8199         if (netif_running(dev)) {
8200                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8201                 err = tg3_restart_hw(tp, 1);
8202                 if (!err)
8203                         tg3_netif_start(tp);
8204         }
8205
8206         tg3_full_unlock(tp);
8207
8208         return err;
8209 }
8210
8211 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8212 {
8213         struct tg3 *tp = netdev_priv(dev);
8214
8215         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8216         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8217         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8218 }
8219
8220 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8221 {
8222         struct tg3 *tp = netdev_priv(dev);
8223         int irq_sync = 0, err = 0;
8224
8225         if (netif_running(dev)) {
8226                 tg3_netif_stop(tp);
8227                 irq_sync = 1;
8228         }
8229
8230         tg3_full_lock(tp, irq_sync);
8231
8232         if (epause->autoneg)
8233                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8234         else
8235                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8236         if (epause->rx_pause)
8237                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8238         else
8239                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8240         if (epause->tx_pause)
8241                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8242         else
8243                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8244
8245         if (netif_running(dev)) {
8246                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8247                 err = tg3_restart_hw(tp, 1);
8248                 if (!err)
8249                         tg3_netif_start(tp);
8250         }
8251
8252         tg3_full_unlock(tp);
8253
8254         return err;
8255 }
8256
8257 static u32 tg3_get_rx_csum(struct net_device *dev)
8258 {
8259         struct tg3 *tp = netdev_priv(dev);
8260         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8261 }
8262
8263 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8264 {
8265         struct tg3 *tp = netdev_priv(dev);
8266
8267         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8268                 if (data != 0)
8269                         return -EINVAL;
8270                 return 0;
8271         }
8272
8273         spin_lock_bh(&tp->lock);
8274         if (data)
8275                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8276         else
8277                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8278         spin_unlock_bh(&tp->lock);
8279
8280         return 0;
8281 }
8282
8283 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8284 {
8285         struct tg3 *tp = netdev_priv(dev);
8286
8287         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8288                 if (data != 0)
8289                         return -EINVAL;
8290                 return 0;
8291         }
8292
8293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8295                 ethtool_op_set_tx_hw_csum(dev, data);
8296         else
8297                 ethtool_op_set_tx_csum(dev, data);
8298
8299         return 0;
8300 }
8301
8302 static int tg3_get_stats_count (struct net_device *dev)
8303 {
8304         return TG3_NUM_STATS;
8305 }
8306
8307 static int tg3_get_test_count (struct net_device *dev)
8308 {
8309         return TG3_NUM_TEST;
8310 }
8311
8312 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8313 {
8314         switch (stringset) {
8315         case ETH_SS_STATS:
8316                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8317                 break;
8318         case ETH_SS_TEST:
8319                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8320                 break;
8321         default:
8322                 WARN_ON(1);     /* we need a WARN() */
8323                 break;
8324         }
8325 }
8326
8327 static int tg3_phys_id(struct net_device *dev, u32 data)
8328 {
8329         struct tg3 *tp = netdev_priv(dev);
8330         int i;
8331
8332         if (!netif_running(tp->dev))
8333                 return -EAGAIN;
8334
8335         if (data == 0)
8336                 data = 2;
8337
8338         for (i = 0; i < (data * 2); i++) {
8339                 if ((i % 2) == 0)
8340                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8341                                            LED_CTRL_1000MBPS_ON |
8342                                            LED_CTRL_100MBPS_ON |
8343                                            LED_CTRL_10MBPS_ON |
8344                                            LED_CTRL_TRAFFIC_OVERRIDE |
8345                                            LED_CTRL_TRAFFIC_BLINK |
8346                                            LED_CTRL_TRAFFIC_LED);
8347
8348                 else
8349                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8350                                            LED_CTRL_TRAFFIC_OVERRIDE);
8351
8352                 if (msleep_interruptible(500))
8353                         break;
8354         }
8355         tw32(MAC_LED_CTRL, tp->led_ctrl);
8356         return 0;
8357 }
8358
8359 static void tg3_get_ethtool_stats (struct net_device *dev,
8360                                    struct ethtool_stats *estats, u64 *tmp_stats)
8361 {
8362         struct tg3 *tp = netdev_priv(dev);
8363         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8364 }
8365
8366 #define NVRAM_TEST_SIZE 0x100
8367 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8368 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8369 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8370
8371 static int tg3_test_nvram(struct tg3 *tp)
8372 {
8373         u32 *buf, csum, magic;
8374         int i, j, err = 0, size;
8375
8376         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8377                 return -EIO;
8378
8379         if (magic == TG3_EEPROM_MAGIC)
8380                 size = NVRAM_TEST_SIZE;
8381         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8382                 if ((magic & 0xe00000) == 0x200000)
8383                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8384                 else
8385                         return 0;
8386         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8387                 size = NVRAM_SELFBOOT_HW_SIZE;
8388         else
8389                 return -EIO;
8390
8391         buf = kmalloc(size, GFP_KERNEL);
8392         if (buf == NULL)
8393                 return -ENOMEM;
8394
8395         err = -EIO;
8396         for (i = 0, j = 0; i < size; i += 4, j++) {
8397                 u32 val;
8398
8399                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8400                         break;
8401                 buf[j] = cpu_to_le32(val);
8402         }
8403         if (i < size)
8404                 goto out;
8405
8406         /* Selfboot format */
8407         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8408             TG3_EEPROM_MAGIC_FW) {
8409                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8410
8411                 for (i = 0; i < size; i++)
8412                         csum8 += buf8[i];
8413
8414                 if (csum8 == 0) {
8415                         err = 0;
8416                         goto out;
8417                 }
8418
8419                 err = -EIO;
8420                 goto out;
8421         }
8422
8423         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8424             TG3_EEPROM_MAGIC_HW) {
8425                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8426                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8427                 u8 *buf8 = (u8 *) buf;
8428                 int j, k;
8429
8430                 /* Separate the parity bits and the data bytes.  */
8431                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8432                         if ((i == 0) || (i == 8)) {
8433                                 int l;
8434                                 u8 msk;
8435
8436                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8437                                         parity[k++] = buf8[i] & msk;
8438                                 i++;
8439                         }
8440                         else if (i == 16) {
8441                                 int l;
8442                                 u8 msk;
8443
8444                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8445                                         parity[k++] = buf8[i] & msk;
8446                                 i++;
8447
8448                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8449                                         parity[k++] = buf8[i] & msk;
8450                                 i++;
8451                         }
8452                         data[j++] = buf8[i];
8453                 }
8454
8455                 err = -EIO;
8456                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8457                         u8 hw8 = hweight8(data[i]);
8458
8459                         if ((hw8 & 0x1) && parity[i])
8460                                 goto out;
8461                         else if (!(hw8 & 0x1) && !parity[i])
8462                                 goto out;
8463                 }
8464                 err = 0;
8465                 goto out;
8466         }
8467
8468         /* Bootstrap checksum at offset 0x10 */
8469         csum = calc_crc((unsigned char *) buf, 0x10);
8470         if(csum != cpu_to_le32(buf[0x10/4]))
8471                 goto out;
8472
8473         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8474         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8475         if (csum != cpu_to_le32(buf[0xfc/4]))
8476                  goto out;
8477
8478         err = 0;
8479
8480 out:
8481         kfree(buf);
8482         return err;
8483 }
8484
8485 #define TG3_SERDES_TIMEOUT_SEC  2
8486 #define TG3_COPPER_TIMEOUT_SEC  6
8487
8488 static int tg3_test_link(struct tg3 *tp)
8489 {
8490         int i, max;
8491
8492         if (!netif_running(tp->dev))
8493                 return -ENODEV;
8494
8495         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8496                 max = TG3_SERDES_TIMEOUT_SEC;
8497         else
8498                 max = TG3_COPPER_TIMEOUT_SEC;
8499
8500         for (i = 0; i < max; i++) {
8501                 if (netif_carrier_ok(tp->dev))
8502                         return 0;
8503
8504                 if (msleep_interruptible(1000))
8505                         break;
8506         }
8507
8508         return -EIO;
8509 }
8510
8511 /* Only test the commonly used registers */
8512 static int tg3_test_registers(struct tg3 *tp)
8513 {
8514         int i, is_5705, is_5750;
8515         u32 offset, read_mask, write_mask, val, save_val, read_val;
8516         static struct {
8517                 u16 offset;
8518                 u16 flags;
8519 #define TG3_FL_5705     0x1
8520 #define TG3_FL_NOT_5705 0x2
8521 #define TG3_FL_NOT_5788 0x4
8522 #define TG3_FL_NOT_5750 0x8
8523                 u32 read_mask;
8524                 u32 write_mask;
8525         } reg_tbl[] = {
8526                 /* MAC Control Registers */
8527                 { MAC_MODE, TG3_FL_NOT_5705,
8528                         0x00000000, 0x00ef6f8c },
8529                 { MAC_MODE, TG3_FL_5705,
8530                         0x00000000, 0x01ef6b8c },
8531                 { MAC_STATUS, TG3_FL_NOT_5705,
8532                         0x03800107, 0x00000000 },
8533                 { MAC_STATUS, TG3_FL_5705,
8534                         0x03800100, 0x00000000 },
8535                 { MAC_ADDR_0_HIGH, 0x0000,
8536                         0x00000000, 0x0000ffff },
8537                 { MAC_ADDR_0_LOW, 0x0000,
8538                         0x00000000, 0xffffffff },
8539                 { MAC_RX_MTU_SIZE, 0x0000,
8540                         0x00000000, 0x0000ffff },
8541                 { MAC_TX_MODE, 0x0000,
8542                         0x00000000, 0x00000070 },
8543                 { MAC_TX_LENGTHS, 0x0000,
8544                         0x00000000, 0x00003fff },
8545                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8546                         0x00000000, 0x000007fc },
8547                 { MAC_RX_MODE, TG3_FL_5705,
8548                         0x00000000, 0x000007dc },
8549                 { MAC_HASH_REG_0, 0x0000,
8550                         0x00000000, 0xffffffff },
8551                 { MAC_HASH_REG_1, 0x0000,
8552                         0x00000000, 0xffffffff },
8553                 { MAC_HASH_REG_2, 0x0000,
8554                         0x00000000, 0xffffffff },
8555                 { MAC_HASH_REG_3, 0x0000,
8556                         0x00000000, 0xffffffff },
8557
8558                 /* Receive Data and Receive BD Initiator Control Registers. */
8559                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8560                         0x00000000, 0xffffffff },
8561                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8562                         0x00000000, 0xffffffff },
8563                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8564                         0x00000000, 0x00000003 },
8565                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8566                         0x00000000, 0xffffffff },
8567                 { RCVDBDI_STD_BD+0, 0x0000,
8568                         0x00000000, 0xffffffff },
8569                 { RCVDBDI_STD_BD+4, 0x0000,
8570                         0x00000000, 0xffffffff },
8571                 { RCVDBDI_STD_BD+8, 0x0000,
8572                         0x00000000, 0xffff0002 },
8573                 { RCVDBDI_STD_BD+0xc, 0x0000,
8574                         0x00000000, 0xffffffff },
8575
8576                 /* Receive BD Initiator Control Registers. */
8577                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8578                         0x00000000, 0xffffffff },
8579                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8580                         0x00000000, 0x000003ff },
8581                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8582                         0x00000000, 0xffffffff },
8583
8584                 /* Host Coalescing Control Registers. */
8585                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8586                         0x00000000, 0x00000004 },
8587                 { HOSTCC_MODE, TG3_FL_5705,
8588                         0x00000000, 0x000000f6 },
8589                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8590                         0x00000000, 0xffffffff },
8591                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8592                         0x00000000, 0x000003ff },
8593                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8594                         0x00000000, 0xffffffff },
8595                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8596                         0x00000000, 0x000003ff },
8597                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8598                         0x00000000, 0xffffffff },
8599                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8600                         0x00000000, 0x000000ff },
8601                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8602                         0x00000000, 0xffffffff },
8603                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8604                         0x00000000, 0x000000ff },
8605                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8606                         0x00000000, 0xffffffff },
8607                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8608                         0x00000000, 0xffffffff },
8609                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8610                         0x00000000, 0xffffffff },
8611                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8612                         0x00000000, 0x000000ff },
8613                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8614                         0x00000000, 0xffffffff },
8615                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8616                         0x00000000, 0x000000ff },
8617                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8618                         0x00000000, 0xffffffff },
8619                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8620                         0x00000000, 0xffffffff },
8621                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8622                         0x00000000, 0xffffffff },
8623                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8624                         0x00000000, 0xffffffff },
8625                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8626                         0x00000000, 0xffffffff },
8627                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8628                         0xffffffff, 0x00000000 },
8629                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8630                         0xffffffff, 0x00000000 },
8631
8632                 /* Buffer Manager Control Registers. */
8633                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8634                         0x00000000, 0x007fff80 },
8635                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8636                         0x00000000, 0x007fffff },
8637                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8638                         0x00000000, 0x0000003f },
8639                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8640                         0x00000000, 0x000001ff },
8641                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8642                         0x00000000, 0x000001ff },
8643                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8644                         0xffffffff, 0x00000000 },
8645                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8646                         0xffffffff, 0x00000000 },
8647
8648                 /* Mailbox Registers */
8649                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8650                         0x00000000, 0x000001ff },
8651                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8652                         0x00000000, 0x000001ff },
8653                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8654                         0x00000000, 0x000007ff },
8655                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8656                         0x00000000, 0x000001ff },
8657
8658                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8659         };
8660
8661         is_5705 = is_5750 = 0;
8662         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8663                 is_5705 = 1;
8664                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8665                         is_5750 = 1;
8666         }
8667
8668         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8669                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8670                         continue;
8671
8672                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8673                         continue;
8674
8675                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8676                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8677                         continue;
8678
8679                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8680                         continue;
8681
8682                 offset = (u32) reg_tbl[i].offset;
8683                 read_mask = reg_tbl[i].read_mask;
8684                 write_mask = reg_tbl[i].write_mask;
8685
8686                 /* Save the original register content */
8687                 save_val = tr32(offset);
8688
8689                 /* Determine the read-only value. */
8690                 read_val = save_val & read_mask;
8691
8692                 /* Write zero to the register, then make sure the read-only bits
8693                  * are not changed and the read/write bits are all zeros.
8694                  */
8695                 tw32(offset, 0);
8696
8697                 val = tr32(offset);
8698
8699                 /* Test the read-only and read/write bits. */
8700                 if (((val & read_mask) != read_val) || (val & write_mask))
8701                         goto out;
8702
8703                 /* Write ones to all the bits defined by RdMask and WrMask, then
8704                  * make sure the read-only bits are not changed and the
8705                  * read/write bits are all ones.
8706                  */
8707                 tw32(offset, read_mask | write_mask);
8708
8709                 val = tr32(offset);
8710
8711                 /* Test the read-only bits. */
8712                 if ((val & read_mask) != read_val)
8713                         goto out;
8714
8715                 /* Test the read/write bits. */
8716                 if ((val & write_mask) != write_mask)
8717                         goto out;
8718
8719                 tw32(offset, save_val);
8720         }
8721
8722         return 0;
8723
8724 out:
8725         if (netif_msg_hw(tp))
8726                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8727                        offset);
8728         tw32(offset, save_val);
8729         return -EIO;
8730 }
8731
8732 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8733 {
8734         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8735         int i;
8736         u32 j;
8737
8738         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8739                 for (j = 0; j < len; j += 4) {
8740                         u32 val;
8741
8742                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8743                         tg3_read_mem(tp, offset + j, &val);
8744                         if (val != test_pattern[i])
8745                                 return -EIO;
8746                 }
8747         }
8748         return 0;
8749 }
8750
8751 static int tg3_test_memory(struct tg3 *tp)
8752 {
8753         static struct mem_entry {
8754                 u32 offset;
8755                 u32 len;
8756         } mem_tbl_570x[] = {
8757                 { 0x00000000, 0x00b50},
8758                 { 0x00002000, 0x1c000},
8759                 { 0xffffffff, 0x00000}
8760         }, mem_tbl_5705[] = {
8761                 { 0x00000100, 0x0000c},
8762                 { 0x00000200, 0x00008},
8763                 { 0x00004000, 0x00800},
8764                 { 0x00006000, 0x01000},
8765                 { 0x00008000, 0x02000},
8766                 { 0x00010000, 0x0e000},
8767                 { 0xffffffff, 0x00000}
8768         }, mem_tbl_5755[] = {
8769                 { 0x00000200, 0x00008},
8770                 { 0x00004000, 0x00800},
8771                 { 0x00006000, 0x00800},
8772                 { 0x00008000, 0x02000},
8773                 { 0x00010000, 0x0c000},
8774                 { 0xffffffff, 0x00000}
8775         }, mem_tbl_5906[] = {
8776                 { 0x00000200, 0x00008},
8777                 { 0x00004000, 0x00400},
8778                 { 0x00006000, 0x00400},
8779                 { 0x00008000, 0x01000},
8780                 { 0x00010000, 0x01000},
8781                 { 0xffffffff, 0x00000}
8782         };
8783         struct mem_entry *mem_tbl;
8784         int err = 0;
8785         int i;
8786
8787         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8789                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8790                         mem_tbl = mem_tbl_5755;
8791                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8792                         mem_tbl = mem_tbl_5906;
8793                 else
8794                         mem_tbl = mem_tbl_5705;
8795         } else
8796                 mem_tbl = mem_tbl_570x;
8797
8798         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8799                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8800                     mem_tbl[i].len)) != 0)
8801                         break;
8802         }
8803
8804         return err;
8805 }
8806
8807 #define TG3_MAC_LOOPBACK        0
8808 #define TG3_PHY_LOOPBACK        1
8809
8810 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8811 {
8812         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8813         u32 desc_idx;
8814         struct sk_buff *skb, *rx_skb;
8815         u8 *tx_data;
8816         dma_addr_t map;
8817         int num_pkts, tx_len, rx_len, i, err;
8818         struct tg3_rx_buffer_desc *desc;
8819
8820         if (loopback_mode == TG3_MAC_LOOPBACK) {
8821                 /* HW errata - mac loopback fails in some cases on 5780.
8822                  * Normal traffic and PHY loopback are not affected by
8823                  * errata.
8824                  */
8825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8826                         return 0;
8827
8828                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8829                            MAC_MODE_PORT_INT_LPBACK;
8830                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8831                         mac_mode |= MAC_MODE_LINK_POLARITY;
8832                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8833                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8834                 else
8835                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8836                 tw32(MAC_MODE, mac_mode);
8837         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8838                 u32 val;
8839
8840                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8841                         u32 phytest;
8842
8843                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8844                                 u32 phy;
8845
8846                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8847                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8848                                 if (!tg3_readphy(tp, 0x1b, &phy))
8849                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8850                                 if (!tg3_readphy(tp, 0x10, &phy))
8851                                         tg3_writephy(tp, 0x10, phy & ~0x4000);
8852                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8853                         }
8854                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8855                 } else
8856                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8857
8858                 tg3_writephy(tp, MII_BMCR, val);
8859                 udelay(40);
8860
8861                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8863                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8864                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8865                 } else
8866                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8867
8868                 /* reset to prevent losing 1st rx packet intermittently */
8869                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8870                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8871                         udelay(10);
8872                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8873                 }
8874                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8875                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8876                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8877                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8878                                 mac_mode |= MAC_MODE_LINK_POLARITY;
8879                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8880                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8881                 }
8882                 tw32(MAC_MODE, mac_mode);
8883         }
8884         else
8885                 return -EINVAL;
8886
8887         err = -EIO;
8888
8889         tx_len = 1514;
8890         skb = netdev_alloc_skb(tp->dev, tx_len);
8891         if (!skb)
8892                 return -ENOMEM;
8893
8894         tx_data = skb_put(skb, tx_len);
8895         memcpy(tx_data, tp->dev->dev_addr, 6);
8896         memset(tx_data + 6, 0x0, 8);
8897
8898         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8899
8900         for (i = 14; i < tx_len; i++)
8901                 tx_data[i] = (u8) (i & 0xff);
8902
8903         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8904
8905         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8906              HOSTCC_MODE_NOW);
8907
8908         udelay(10);
8909
8910         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8911
8912         num_pkts = 0;
8913
8914         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8915
8916         tp->tx_prod++;
8917         num_pkts++;
8918
8919         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8920                      tp->tx_prod);
8921         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8922
8923         udelay(10);
8924
8925         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8926         for (i = 0; i < 25; i++) {
8927                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8928                        HOSTCC_MODE_NOW);
8929
8930                 udelay(10);
8931
8932                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8933                 rx_idx = tp->hw_status->idx[0].rx_producer;
8934                 if ((tx_idx == tp->tx_prod) &&
8935                     (rx_idx == (rx_start_idx + num_pkts)))
8936                         break;
8937         }
8938
8939         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8940         dev_kfree_skb(skb);
8941
8942         if (tx_idx != tp->tx_prod)
8943                 goto out;
8944
8945         if (rx_idx != rx_start_idx + num_pkts)
8946                 goto out;
8947
8948         desc = &tp->rx_rcb[rx_start_idx];
8949         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8950         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8951         if (opaque_key != RXD_OPAQUE_RING_STD)
8952                 goto out;
8953
8954         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8955             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8956                 goto out;
8957
8958         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8959         if (rx_len != tx_len)
8960                 goto out;
8961
8962         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8963
8964         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8965         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8966
8967         for (i = 14; i < tx_len; i++) {
8968                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8969                         goto out;
8970         }
8971         err = 0;
8972
8973         /* tg3_free_rings will unmap and free the rx_skb */
8974 out:
8975         return err;
8976 }
8977
8978 #define TG3_MAC_LOOPBACK_FAILED         1
8979 #define TG3_PHY_LOOPBACK_FAILED         2
8980 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8981                                          TG3_PHY_LOOPBACK_FAILED)
8982
8983 static int tg3_test_loopback(struct tg3 *tp)
8984 {
8985         int err = 0;
8986
8987         if (!netif_running(tp->dev))
8988                 return TG3_LOOPBACK_FAILED;
8989
8990         err = tg3_reset_hw(tp, 1);
8991         if (err)
8992                 return TG3_LOOPBACK_FAILED;
8993
8994         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8995                 err |= TG3_MAC_LOOPBACK_FAILED;
8996         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8997                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8998                         err |= TG3_PHY_LOOPBACK_FAILED;
8999         }
9000
9001         return err;
9002 }
9003
9004 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9005                           u64 *data)
9006 {
9007         struct tg3 *tp = netdev_priv(dev);
9008
9009         if (tp->link_config.phy_is_low_power)
9010                 tg3_set_power_state(tp, PCI_D0);
9011
9012         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9013
9014         if (tg3_test_nvram(tp) != 0) {
9015                 etest->flags |= ETH_TEST_FL_FAILED;
9016                 data[0] = 1;
9017         }
9018         if (tg3_test_link(tp) != 0) {
9019                 etest->flags |= ETH_TEST_FL_FAILED;
9020                 data[1] = 1;
9021         }
9022         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9023                 int err, irq_sync = 0;
9024
9025                 if (netif_running(dev)) {
9026                         tg3_netif_stop(tp);
9027                         irq_sync = 1;
9028                 }
9029
9030                 tg3_full_lock(tp, irq_sync);
9031
9032                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9033                 err = tg3_nvram_lock(tp);
9034                 tg3_halt_cpu(tp, RX_CPU_BASE);
9035                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9036                         tg3_halt_cpu(tp, TX_CPU_BASE);
9037                 if (!err)
9038                         tg3_nvram_unlock(tp);
9039
9040                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9041                         tg3_phy_reset(tp);
9042
9043                 if (tg3_test_registers(tp) != 0) {
9044                         etest->flags |= ETH_TEST_FL_FAILED;
9045                         data[2] = 1;
9046                 }
9047                 if (tg3_test_memory(tp) != 0) {
9048                         etest->flags |= ETH_TEST_FL_FAILED;
9049                         data[3] = 1;
9050                 }
9051                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9052                         etest->flags |= ETH_TEST_FL_FAILED;
9053
9054                 tg3_full_unlock(tp);
9055
9056                 if (tg3_test_interrupt(tp) != 0) {
9057                         etest->flags |= ETH_TEST_FL_FAILED;
9058                         data[5] = 1;
9059                 }
9060
9061                 tg3_full_lock(tp, 0);
9062
9063                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9064                 if (netif_running(dev)) {
9065                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9066                         if (!tg3_restart_hw(tp, 1))
9067                                 tg3_netif_start(tp);
9068                 }
9069
9070                 tg3_full_unlock(tp);
9071         }
9072         if (tp->link_config.phy_is_low_power)
9073                 tg3_set_power_state(tp, PCI_D3hot);
9074
9075 }
9076
9077 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9078 {
9079         struct mii_ioctl_data *data = if_mii(ifr);
9080         struct tg3 *tp = netdev_priv(dev);
9081         int err;
9082
9083         switch(cmd) {
9084         case SIOCGMIIPHY:
9085                 data->phy_id = PHY_ADDR;
9086
9087                 /* fallthru */
9088         case SIOCGMIIREG: {
9089                 u32 mii_regval;
9090
9091                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9092                         break;                  /* We have no PHY */
9093
9094                 if (tp->link_config.phy_is_low_power)
9095                         return -EAGAIN;
9096
9097                 spin_lock_bh(&tp->lock);
9098                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9099                 spin_unlock_bh(&tp->lock);
9100
9101                 data->val_out = mii_regval;
9102
9103                 return err;
9104         }
9105
9106         case SIOCSMIIREG:
9107                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9108                         break;                  /* We have no PHY */
9109
9110                 if (!capable(CAP_NET_ADMIN))
9111                         return -EPERM;
9112
9113                 if (tp->link_config.phy_is_low_power)
9114                         return -EAGAIN;
9115
9116                 spin_lock_bh(&tp->lock);
9117                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9118                 spin_unlock_bh(&tp->lock);
9119
9120                 return err;
9121
9122         default:
9123                 /* do nothing */
9124                 break;
9125         }
9126         return -EOPNOTSUPP;
9127 }
9128
9129 #if TG3_VLAN_TAG_USED
9130 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9131 {
9132         struct tg3 *tp = netdev_priv(dev);
9133
9134         if (netif_running(dev))
9135                 tg3_netif_stop(tp);
9136
9137         tg3_full_lock(tp, 0);
9138
9139         tp->vlgrp = grp;
9140
9141         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9142         __tg3_set_rx_mode(dev);
9143
9144         if (netif_running(dev))
9145                 tg3_netif_start(tp);
9146
9147         tg3_full_unlock(tp);
9148 }
9149 #endif
9150
9151 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9152 {
9153         struct tg3 *tp = netdev_priv(dev);
9154
9155         memcpy(ec, &tp->coal, sizeof(*ec));
9156         return 0;
9157 }
9158
9159 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9160 {
9161         struct tg3 *tp = netdev_priv(dev);
9162         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9163         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9164
9165         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9166                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9167                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9168                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9169                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9170         }
9171
9172         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9173             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9174             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9175             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9176             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9177             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9178             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9179             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9180             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9181             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9182                 return -EINVAL;
9183
9184         /* No rx interrupts will be generated if both are zero */
9185         if ((ec->rx_coalesce_usecs == 0) &&
9186             (ec->rx_max_coalesced_frames == 0))
9187                 return -EINVAL;
9188
9189         /* No tx interrupts will be generated if both are zero */
9190         if ((ec->tx_coalesce_usecs == 0) &&
9191             (ec->tx_max_coalesced_frames == 0))
9192                 return -EINVAL;
9193
9194         /* Only copy relevant parameters, ignore all others. */
9195         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9196         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9197         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9198         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9199         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9200         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9201         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9202         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9203         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9204
9205         if (netif_running(dev)) {
9206                 tg3_full_lock(tp, 0);
9207                 __tg3_set_coalesce(tp, &tp->coal);
9208                 tg3_full_unlock(tp);
9209         }
9210         return 0;
9211 }
9212
9213 static const struct ethtool_ops tg3_ethtool_ops = {
9214         .get_settings           = tg3_get_settings,
9215         .set_settings           = tg3_set_settings,
9216         .get_drvinfo            = tg3_get_drvinfo,
9217         .get_regs_len           = tg3_get_regs_len,
9218         .get_regs               = tg3_get_regs,
9219         .get_wol                = tg3_get_wol,
9220         .set_wol                = tg3_set_wol,
9221         .get_msglevel           = tg3_get_msglevel,
9222         .set_msglevel           = tg3_set_msglevel,
9223         .nway_reset             = tg3_nway_reset,
9224         .get_link               = ethtool_op_get_link,
9225         .get_eeprom_len         = tg3_get_eeprom_len,
9226         .get_eeprom             = tg3_get_eeprom,
9227         .set_eeprom             = tg3_set_eeprom,
9228         .get_ringparam          = tg3_get_ringparam,
9229         .set_ringparam          = tg3_set_ringparam,
9230         .get_pauseparam         = tg3_get_pauseparam,
9231         .set_pauseparam         = tg3_set_pauseparam,
9232         .get_rx_csum            = tg3_get_rx_csum,
9233         .set_rx_csum            = tg3_set_rx_csum,
9234         .get_tx_csum            = ethtool_op_get_tx_csum,
9235         .set_tx_csum            = tg3_set_tx_csum,
9236         .get_sg                 = ethtool_op_get_sg,
9237         .set_sg                 = ethtool_op_set_sg,
9238         .get_tso                = ethtool_op_get_tso,
9239         .set_tso                = tg3_set_tso,
9240         .self_test_count        = tg3_get_test_count,
9241         .self_test              = tg3_self_test,
9242         .get_strings            = tg3_get_strings,
9243         .phys_id                = tg3_phys_id,
9244         .get_stats_count        = tg3_get_stats_count,
9245         .get_ethtool_stats      = tg3_get_ethtool_stats,
9246         .get_coalesce           = tg3_get_coalesce,
9247         .set_coalesce           = tg3_set_coalesce,
9248         .get_perm_addr          = ethtool_op_get_perm_addr,
9249 };
9250
9251 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9252 {
9253         u32 cursize, val, magic;
9254
9255         tp->nvram_size = EEPROM_CHIP_SIZE;
9256
9257         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9258                 return;
9259
9260         if ((magic != TG3_EEPROM_MAGIC) &&
9261             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9262             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9263                 return;
9264
9265         /*
9266          * Size the chip by reading offsets at increasing powers of two.
9267          * When we encounter our validation signature, we know the addressing
9268          * has wrapped around, and thus have our chip size.
9269          */
9270         cursize = 0x10;
9271
9272         while (cursize < tp->nvram_size) {
9273                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9274                         return;
9275
9276                 if (val == magic)
9277                         break;
9278
9279                 cursize <<= 1;
9280         }
9281
9282         tp->nvram_size = cursize;
9283 }
9284
9285 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9286 {
9287         u32 val;
9288
9289         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9290                 return;
9291
9292         /* Selfboot format */
9293         if (val != TG3_EEPROM_MAGIC) {
9294                 tg3_get_eeprom_size(tp);
9295                 return;
9296         }
9297
9298         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9299                 if (val != 0) {
9300                         tp->nvram_size = (val >> 16) * 1024;
9301                         return;
9302                 }
9303         }
9304         tp->nvram_size = 0x80000;
9305 }
9306
9307 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9308 {
9309         u32 nvcfg1;
9310
9311         nvcfg1 = tr32(NVRAM_CFG1);
9312         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9313                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9314         }
9315         else {
9316                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9317                 tw32(NVRAM_CFG1, nvcfg1);
9318         }
9319
9320         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9321             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9322                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9323                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9324                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9325                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9326                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9327                                 break;
9328                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9329                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9330                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9331                                 break;
9332                         case FLASH_VENDOR_ATMEL_EEPROM:
9333                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9334                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9335                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9336                                 break;
9337                         case FLASH_VENDOR_ST:
9338                                 tp->nvram_jedecnum = JEDEC_ST;
9339                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9340                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9341                                 break;
9342                         case FLASH_VENDOR_SAIFUN:
9343                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9344                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9345                                 break;
9346                         case FLASH_VENDOR_SST_SMALL:
9347                         case FLASH_VENDOR_SST_LARGE:
9348                                 tp->nvram_jedecnum = JEDEC_SST;
9349                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9350                                 break;
9351                 }
9352         }
9353         else {
9354                 tp->nvram_jedecnum = JEDEC_ATMEL;
9355                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9356                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9357         }
9358 }
9359
9360 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9361 {
9362         u32 nvcfg1;
9363
9364         nvcfg1 = tr32(NVRAM_CFG1);
9365
9366         /* NVRAM protection for TPM */
9367         if (nvcfg1 & (1 << 27))
9368                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9369
9370         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9371                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9372                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9373                         tp->nvram_jedecnum = JEDEC_ATMEL;
9374                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9375                         break;
9376                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9377                         tp->nvram_jedecnum = JEDEC_ATMEL;
9378                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9379                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9380                         break;
9381                 case FLASH_5752VENDOR_ST_M45PE10:
9382                 case FLASH_5752VENDOR_ST_M45PE20:
9383                 case FLASH_5752VENDOR_ST_M45PE40:
9384                         tp->nvram_jedecnum = JEDEC_ST;
9385                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9386                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9387                         break;
9388         }
9389
9390         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9391                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9392                         case FLASH_5752PAGE_SIZE_256:
9393                                 tp->nvram_pagesize = 256;
9394                                 break;
9395                         case FLASH_5752PAGE_SIZE_512:
9396                                 tp->nvram_pagesize = 512;
9397                                 break;
9398                         case FLASH_5752PAGE_SIZE_1K:
9399                                 tp->nvram_pagesize = 1024;
9400                                 break;
9401                         case FLASH_5752PAGE_SIZE_2K:
9402                                 tp->nvram_pagesize = 2048;
9403                                 break;
9404                         case FLASH_5752PAGE_SIZE_4K:
9405                                 tp->nvram_pagesize = 4096;
9406                                 break;
9407                         case FLASH_5752PAGE_SIZE_264:
9408                                 tp->nvram_pagesize = 264;
9409                                 break;
9410                 }
9411         }
9412         else {
9413                 /* For eeprom, set pagesize to maximum eeprom size */
9414                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9415
9416                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9417                 tw32(NVRAM_CFG1, nvcfg1);
9418         }
9419 }
9420
9421 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9422 {
9423         u32 nvcfg1, protect = 0;
9424
9425         nvcfg1 = tr32(NVRAM_CFG1);
9426
9427         /* NVRAM protection for TPM */
9428         if (nvcfg1 & (1 << 27)) {
9429                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9430                 protect = 1;
9431         }
9432
9433         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9434         switch (nvcfg1) {
9435                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9436                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9437                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9438                         tp->nvram_jedecnum = JEDEC_ATMEL;
9439                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9440                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9441                         tp->nvram_pagesize = 264;
9442                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1)
9443                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9444                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9445                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9446                         else
9447                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9448                         break;
9449                 case FLASH_5752VENDOR_ST_M45PE10:
9450                 case FLASH_5752VENDOR_ST_M45PE20:
9451                 case FLASH_5752VENDOR_ST_M45PE40:
9452                         tp->nvram_jedecnum = JEDEC_ST;
9453                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9454                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9455                         tp->nvram_pagesize = 256;
9456                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9457                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9458                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9459                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9460                         else
9461                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9462                         break;
9463         }
9464 }
9465
9466 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9467 {
9468         u32 nvcfg1;
9469
9470         nvcfg1 = tr32(NVRAM_CFG1);
9471
9472         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9473                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9474                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9475                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9476                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9477                         tp->nvram_jedecnum = JEDEC_ATMEL;
9478                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9479                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9480
9481                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9482                         tw32(NVRAM_CFG1, nvcfg1);
9483                         break;
9484                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9485                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9486                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9487                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9488                         tp->nvram_jedecnum = JEDEC_ATMEL;
9489                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9490                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9491                         tp->nvram_pagesize = 264;
9492                         break;
9493                 case FLASH_5752VENDOR_ST_M45PE10:
9494                 case FLASH_5752VENDOR_ST_M45PE20:
9495                 case FLASH_5752VENDOR_ST_M45PE40:
9496                         tp->nvram_jedecnum = JEDEC_ST;
9497                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9498                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9499                         tp->nvram_pagesize = 256;
9500                         break;
9501         }
9502 }
9503
9504 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9505 {
9506         tp->nvram_jedecnum = JEDEC_ATMEL;
9507         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9508         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9509 }
9510
9511 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9512 static void __devinit tg3_nvram_init(struct tg3 *tp)
9513 {
9514         tw32_f(GRC_EEPROM_ADDR,
9515              (EEPROM_ADDR_FSM_RESET |
9516               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9517                EEPROM_ADDR_CLKPERD_SHIFT)));
9518
9519         msleep(1);
9520
9521         /* Enable seeprom accesses. */
9522         tw32_f(GRC_LOCAL_CTRL,
9523              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9524         udelay(100);
9525
9526         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9527             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9528                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9529
9530                 if (tg3_nvram_lock(tp)) {
9531                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9532                                "tg3_nvram_init failed.\n", tp->dev->name);
9533                         return;
9534                 }
9535                 tg3_enable_nvram_access(tp);
9536
9537                 tp->nvram_size = 0;
9538
9539                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9540                         tg3_get_5752_nvram_info(tp);
9541                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9542                         tg3_get_5755_nvram_info(tp);
9543                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9544                         tg3_get_5787_nvram_info(tp);
9545                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9546                         tg3_get_5906_nvram_info(tp);
9547                 else
9548                         tg3_get_nvram_info(tp);
9549
9550                 if (tp->nvram_size == 0)
9551                         tg3_get_nvram_size(tp);
9552
9553                 tg3_disable_nvram_access(tp);
9554                 tg3_nvram_unlock(tp);
9555
9556         } else {
9557                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9558
9559                 tg3_get_eeprom_size(tp);
9560         }
9561 }
9562
9563 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9564                                         u32 offset, u32 *val)
9565 {
9566         u32 tmp;
9567         int i;
9568
9569         if (offset > EEPROM_ADDR_ADDR_MASK ||
9570             (offset % 4) != 0)
9571                 return -EINVAL;
9572
9573         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9574                                         EEPROM_ADDR_DEVID_MASK |
9575                                         EEPROM_ADDR_READ);
9576         tw32(GRC_EEPROM_ADDR,
9577              tmp |
9578              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9579              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9580               EEPROM_ADDR_ADDR_MASK) |
9581              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9582
9583         for (i = 0; i < 1000; i++) {
9584                 tmp = tr32(GRC_EEPROM_ADDR);
9585
9586                 if (tmp & EEPROM_ADDR_COMPLETE)
9587                         break;
9588                 msleep(1);
9589         }
9590         if (!(tmp & EEPROM_ADDR_COMPLETE))
9591                 return -EBUSY;
9592
9593         *val = tr32(GRC_EEPROM_DATA);
9594         return 0;
9595 }
9596
9597 #define NVRAM_CMD_TIMEOUT 10000
9598
9599 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9600 {
9601         int i;
9602
9603         tw32(NVRAM_CMD, nvram_cmd);
9604         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9605                 udelay(10);
9606                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9607                         udelay(10);
9608                         break;
9609                 }
9610         }
9611         if (i == NVRAM_CMD_TIMEOUT) {
9612                 return -EBUSY;
9613         }
9614         return 0;
9615 }
9616
9617 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9618 {
9619         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9620             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9621             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9622             (tp->nvram_jedecnum == JEDEC_ATMEL))
9623
9624                 addr = ((addr / tp->nvram_pagesize) <<
9625                         ATMEL_AT45DB0X1B_PAGE_POS) +
9626                        (addr % tp->nvram_pagesize);
9627
9628         return addr;
9629 }
9630
9631 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9632 {
9633         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9634             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9635             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9636             (tp->nvram_jedecnum == JEDEC_ATMEL))
9637
9638                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9639                         tp->nvram_pagesize) +
9640                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9641
9642         return addr;
9643 }
9644
9645 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9646 {
9647         int ret;
9648
9649         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9650                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9651
9652         offset = tg3_nvram_phys_addr(tp, offset);
9653
9654         if (offset > NVRAM_ADDR_MSK)
9655                 return -EINVAL;
9656
9657         ret = tg3_nvram_lock(tp);
9658         if (ret)
9659                 return ret;
9660
9661         tg3_enable_nvram_access(tp);
9662
9663         tw32(NVRAM_ADDR, offset);
9664         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9665                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9666
9667         if (ret == 0)
9668                 *val = swab32(tr32(NVRAM_RDDATA));
9669
9670         tg3_disable_nvram_access(tp);
9671
9672         tg3_nvram_unlock(tp);
9673
9674         return ret;
9675 }
9676
9677 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9678 {
9679         int err;
9680         u32 tmp;
9681
9682         err = tg3_nvram_read(tp, offset, &tmp);
9683         *val = swab32(tmp);
9684         return err;
9685 }
9686
9687 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9688                                     u32 offset, u32 len, u8 *buf)
9689 {
9690         int i, j, rc = 0;
9691         u32 val;
9692
9693         for (i = 0; i < len; i += 4) {
9694                 u32 addr, data;
9695
9696                 addr = offset + i;
9697
9698                 memcpy(&data, buf + i, 4);
9699
9700                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9701
9702                 val = tr32(GRC_EEPROM_ADDR);
9703                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9704
9705                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9706                         EEPROM_ADDR_READ);
9707                 tw32(GRC_EEPROM_ADDR, val |
9708                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9709                         (addr & EEPROM_ADDR_ADDR_MASK) |
9710                         EEPROM_ADDR_START |
9711                         EEPROM_ADDR_WRITE);
9712
9713                 for (j = 0; j < 1000; j++) {
9714                         val = tr32(GRC_EEPROM_ADDR);
9715
9716                         if (val & EEPROM_ADDR_COMPLETE)
9717                                 break;
9718                         msleep(1);
9719                 }
9720                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9721                         rc = -EBUSY;
9722                         break;
9723                 }
9724         }
9725
9726         return rc;
9727 }
9728
9729 /* offset and length are dword aligned */
9730 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9731                 u8 *buf)
9732 {
9733         int ret = 0;
9734         u32 pagesize = tp->nvram_pagesize;
9735         u32 pagemask = pagesize - 1;
9736         u32 nvram_cmd;
9737         u8 *tmp;
9738
9739         tmp = kmalloc(pagesize, GFP_KERNEL);
9740         if (tmp == NULL)
9741                 return -ENOMEM;
9742
9743         while (len) {
9744                 int j;
9745                 u32 phy_addr, page_off, size;
9746
9747                 phy_addr = offset & ~pagemask;
9748
9749                 for (j = 0; j < pagesize; j += 4) {
9750                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9751                                                 (u32 *) (tmp + j))))
9752                                 break;
9753                 }
9754                 if (ret)
9755                         break;
9756
9757                 page_off = offset & pagemask;
9758                 size = pagesize;
9759                 if (len < size)
9760                         size = len;
9761
9762                 len -= size;
9763
9764                 memcpy(tmp + page_off, buf, size);
9765
9766                 offset = offset + (pagesize - page_off);
9767
9768                 tg3_enable_nvram_access(tp);
9769
9770                 /*
9771                  * Before we can erase the flash page, we need
9772                  * to issue a special "write enable" command.
9773                  */
9774                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9775
9776                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9777                         break;
9778
9779                 /* Erase the target page */
9780                 tw32(NVRAM_ADDR, phy_addr);
9781
9782                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9783                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9784
9785                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9786                         break;
9787
9788                 /* Issue another write enable to start the write. */
9789                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9790
9791                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9792                         break;
9793
9794                 for (j = 0; j < pagesize; j += 4) {
9795                         u32 data;
9796
9797                         data = *((u32 *) (tmp + j));
9798                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9799
9800                         tw32(NVRAM_ADDR, phy_addr + j);
9801
9802                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9803                                 NVRAM_CMD_WR;
9804
9805                         if (j == 0)
9806                                 nvram_cmd |= NVRAM_CMD_FIRST;
9807                         else if (j == (pagesize - 4))
9808                                 nvram_cmd |= NVRAM_CMD_LAST;
9809
9810                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9811                                 break;
9812                 }
9813                 if (ret)
9814                         break;
9815         }
9816
9817         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9818         tg3_nvram_exec_cmd(tp, nvram_cmd);
9819
9820         kfree(tmp);
9821
9822         return ret;
9823 }
9824
9825 /* offset and length are dword aligned */
9826 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9827                 u8 *buf)
9828 {
9829         int i, ret = 0;
9830
9831         for (i = 0; i < len; i += 4, offset += 4) {
9832                 u32 data, page_off, phy_addr, nvram_cmd;
9833
9834                 memcpy(&data, buf + i, 4);
9835                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9836
9837                 page_off = offset % tp->nvram_pagesize;
9838
9839                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9840
9841                 tw32(NVRAM_ADDR, phy_addr);
9842
9843                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9844
9845                 if ((page_off == 0) || (i == 0))
9846                         nvram_cmd |= NVRAM_CMD_FIRST;
9847                 if (page_off == (tp->nvram_pagesize - 4))
9848                         nvram_cmd |= NVRAM_CMD_LAST;
9849
9850                 if (i == (len - 4))
9851                         nvram_cmd |= NVRAM_CMD_LAST;
9852
9853                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9854                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9855                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9856                     (tp->nvram_jedecnum == JEDEC_ST) &&
9857                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9858
9859                         if ((ret = tg3_nvram_exec_cmd(tp,
9860                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9861                                 NVRAM_CMD_DONE)))
9862
9863                                 break;
9864                 }
9865                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9866                         /* We always do complete word writes to eeprom. */
9867                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9868                 }
9869
9870                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9871                         break;
9872         }
9873         return ret;
9874 }
9875
9876 /* offset and length are dword aligned */
9877 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9878 {
9879         int ret;
9880
9881         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9882                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9883                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9884                 udelay(40);
9885         }
9886
9887         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9888                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9889         }
9890         else {
9891                 u32 grc_mode;
9892
9893                 ret = tg3_nvram_lock(tp);
9894                 if (ret)
9895                         return ret;
9896
9897                 tg3_enable_nvram_access(tp);
9898                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9899                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9900                         tw32(NVRAM_WRITE1, 0x406);
9901
9902                 grc_mode = tr32(GRC_MODE);
9903                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9904
9905                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9906                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9907
9908                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9909                                 buf);
9910                 }
9911                 else {
9912                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9913                                 buf);
9914                 }
9915
9916                 grc_mode = tr32(GRC_MODE);
9917                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9918
9919                 tg3_disable_nvram_access(tp);
9920                 tg3_nvram_unlock(tp);
9921         }
9922
9923         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9924                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9925                 udelay(40);
9926         }
9927
9928         return ret;
9929 }
9930
9931 struct subsys_tbl_ent {
9932         u16 subsys_vendor, subsys_devid;
9933         u32 phy_id;
9934 };
9935
9936 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9937         /* Broadcom boards. */
9938         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9939         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9940         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9941         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9942         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9943         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9944         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9945         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9946         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9947         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9948         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9949
9950         /* 3com boards. */
9951         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9952         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9953         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9954         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9955         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9956
9957         /* DELL boards. */
9958         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9959         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9960         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9961         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9962
9963         /* Compaq boards. */
9964         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9965         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9966         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9967         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9968         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9969
9970         /* IBM boards. */
9971         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9972 };
9973
9974 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9975 {
9976         int i;
9977
9978         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9979                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9980                      tp->pdev->subsystem_vendor) &&
9981                     (subsys_id_to_phy_id[i].subsys_devid ==
9982                      tp->pdev->subsystem_device))
9983                         return &subsys_id_to_phy_id[i];
9984         }
9985         return NULL;
9986 }
9987
9988 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9989 {
9990         u32 val;
9991         u16 pmcsr;
9992
9993         /* On some early chips the SRAM cannot be accessed in D3hot state,
9994          * so need make sure we're in D0.
9995          */
9996         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9997         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9998         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9999         msleep(1);
10000
10001         /* Make sure register accesses (indirect or otherwise)
10002          * will function correctly.
10003          */
10004         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10005                                tp->misc_host_ctrl);
10006
10007         /* The memory arbiter has to be enabled in order for SRAM accesses
10008          * to succeed.  Normally on powerup the tg3 chip firmware will make
10009          * sure it is enabled, but other entities such as system netboot
10010          * code might disable it.
10011          */
10012         val = tr32(MEMARB_MODE);
10013         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10014
10015         tp->phy_id = PHY_ID_INVALID;
10016         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10017
10018         /* Assume an onboard device and WOL capable by default.  */
10019         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10020
10021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10022                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10023                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10024                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10025                 }
10026                 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10027                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10028                 return;
10029         }
10030
10031         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10032         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10033                 u32 nic_cfg, led_cfg;
10034                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10035                 int eeprom_phy_serdes = 0;
10036
10037                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10038                 tp->nic_sram_data_cfg = nic_cfg;
10039
10040                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10041                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10042                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10043                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10044                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10045                     (ver > 0) && (ver < 0x100))
10046                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10047
10048                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10049                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10050                         eeprom_phy_serdes = 1;
10051
10052                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10053                 if (nic_phy_id != 0) {
10054                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10055                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10056
10057                         eeprom_phy_id  = (id1 >> 16) << 10;
10058                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10059                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10060                 } else
10061                         eeprom_phy_id = 0;
10062
10063                 tp->phy_id = eeprom_phy_id;
10064                 if (eeprom_phy_serdes) {
10065                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10066                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10067                         else
10068                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10069                 }
10070
10071                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10072                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10073                                     SHASTA_EXT_LED_MODE_MASK);
10074                 else
10075                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10076
10077                 switch (led_cfg) {
10078                 default:
10079                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10080                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10081                         break;
10082
10083                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10084                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10085                         break;
10086
10087                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10088                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10089
10090                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10091                          * read on some older 5700/5701 bootcode.
10092                          */
10093                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10094                             ASIC_REV_5700 ||
10095                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10096                             ASIC_REV_5701)
10097                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10098
10099                         break;
10100
10101                 case SHASTA_EXT_LED_SHARED:
10102                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10103                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10104                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10105                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10106                                                  LED_CTRL_MODE_PHY_2);
10107                         break;
10108
10109                 case SHASTA_EXT_LED_MAC:
10110                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10111                         break;
10112
10113                 case SHASTA_EXT_LED_COMBO:
10114                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10115                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10116                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10117                                                  LED_CTRL_MODE_PHY_2);
10118                         break;
10119
10120                 };
10121
10122                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10123                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10124                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10125                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10126
10127                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10128                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10129                         if ((tp->pdev->subsystem_vendor ==
10130                              PCI_VENDOR_ID_ARIMA) &&
10131                             (tp->pdev->subsystem_device == 0x205a ||
10132                              tp->pdev->subsystem_device == 0x2063))
10133                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10134                 } else {
10135                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10136                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10137                 }
10138
10139                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10140                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10141                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10142                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10143                 }
10144                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10145                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10146                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10147
10148                 if (cfg2 & (1 << 17))
10149                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10150
10151                 /* serdes signal pre-emphasis in register 0x590 set by */
10152                 /* bootcode if bit 18 is set */
10153                 if (cfg2 & (1 << 18))
10154                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10155
10156                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10157                         u32 cfg3;
10158
10159                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10160                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10161                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10162                 }
10163         }
10164 }
10165
10166 static int __devinit tg3_phy_probe(struct tg3 *tp)
10167 {
10168         u32 hw_phy_id_1, hw_phy_id_2;
10169         u32 hw_phy_id, hw_phy_id_masked;
10170         int err;
10171
10172         /* Reading the PHY ID register can conflict with ASF
10173          * firwmare access to the PHY hardware.
10174          */
10175         err = 0;
10176         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10177                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10178         } else {
10179                 /* Now read the physical PHY_ID from the chip and verify
10180                  * that it is sane.  If it doesn't look good, we fall back
10181                  * to either the hard-coded table based PHY_ID and failing
10182                  * that the value found in the eeprom area.
10183                  */
10184                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10185                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10186
10187                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10188                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10189                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10190
10191                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10192         }
10193
10194         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10195                 tp->phy_id = hw_phy_id;
10196                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10197                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10198                 else
10199                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10200         } else {
10201                 if (tp->phy_id != PHY_ID_INVALID) {
10202                         /* Do nothing, phy ID already set up in
10203                          * tg3_get_eeprom_hw_cfg().
10204                          */
10205                 } else {
10206                         struct subsys_tbl_ent *p;
10207
10208                         /* No eeprom signature?  Try the hardcoded
10209                          * subsys device table.
10210                          */
10211                         p = lookup_by_subsys(tp);
10212                         if (!p)
10213                                 return -ENODEV;
10214
10215                         tp->phy_id = p->phy_id;
10216                         if (!tp->phy_id ||
10217                             tp->phy_id == PHY_ID_BCM8002)
10218                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10219                 }
10220         }
10221
10222         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10223             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10224                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10225
10226                 tg3_readphy(tp, MII_BMSR, &bmsr);
10227                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10228                     (bmsr & BMSR_LSTATUS))
10229                         goto skip_phy_reset;
10230
10231                 err = tg3_phy_reset(tp);
10232                 if (err)
10233                         return err;
10234
10235                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10236                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10237                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10238                 tg3_ctrl = 0;
10239                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10240                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10241                                     MII_TG3_CTRL_ADV_1000_FULL);
10242                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10243                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10244                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10245                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10246                 }
10247
10248                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10249                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10250                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10251                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10252                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10253
10254                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10255                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10256
10257                         tg3_writephy(tp, MII_BMCR,
10258                                      BMCR_ANENABLE | BMCR_ANRESTART);
10259                 }
10260                 tg3_phy_set_wirespeed(tp);
10261
10262                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10263                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10264                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10265         }
10266
10267 skip_phy_reset:
10268         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10269                 err = tg3_init_5401phy_dsp(tp);
10270                 if (err)
10271                         return err;
10272         }
10273
10274         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10275                 err = tg3_init_5401phy_dsp(tp);
10276         }
10277
10278         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10279                 tp->link_config.advertising =
10280                         (ADVERTISED_1000baseT_Half |
10281                          ADVERTISED_1000baseT_Full |
10282                          ADVERTISED_Autoneg |
10283                          ADVERTISED_FIBRE);
10284         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10285                 tp->link_config.advertising &=
10286                         ~(ADVERTISED_1000baseT_Half |
10287                           ADVERTISED_1000baseT_Full);
10288
10289         return err;
10290 }
10291
10292 static void __devinit tg3_read_partno(struct tg3 *tp)
10293 {
10294         unsigned char vpd_data[256];
10295         unsigned int i;
10296         u32 magic;
10297
10298         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10299                 goto out_not_found;
10300
10301         if (magic == TG3_EEPROM_MAGIC) {
10302                 for (i = 0; i < 256; i += 4) {
10303                         u32 tmp;
10304
10305                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10306                                 goto out_not_found;
10307
10308                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10309                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10310                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10311                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10312                 }
10313         } else {
10314                 int vpd_cap;
10315
10316                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10317                 for (i = 0; i < 256; i += 4) {
10318                         u32 tmp, j = 0;
10319                         u16 tmp16;
10320
10321                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10322                                               i);
10323                         while (j++ < 100) {
10324                                 pci_read_config_word(tp->pdev, vpd_cap +
10325                                                      PCI_VPD_ADDR, &tmp16);
10326                                 if (tmp16 & 0x8000)
10327                                         break;
10328                                 msleep(1);
10329                         }
10330                         if (!(tmp16 & 0x8000))
10331                                 goto out_not_found;
10332
10333                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10334                                               &tmp);
10335                         tmp = cpu_to_le32(tmp);
10336                         memcpy(&vpd_data[i], &tmp, 4);
10337                 }
10338         }
10339
10340         /* Now parse and find the part number. */
10341         for (i = 0; i < 254; ) {
10342                 unsigned char val = vpd_data[i];
10343                 unsigned int block_end;
10344
10345                 if (val == 0x82 || val == 0x91) {
10346                         i = (i + 3 +
10347                              (vpd_data[i + 1] +
10348                               (vpd_data[i + 2] << 8)));
10349                         continue;
10350                 }
10351
10352                 if (val != 0x90)
10353                         goto out_not_found;
10354
10355                 block_end = (i + 3 +
10356                              (vpd_data[i + 1] +
10357                               (vpd_data[i + 2] << 8)));
10358                 i += 3;
10359
10360                 if (block_end > 256)
10361                         goto out_not_found;
10362
10363                 while (i < (block_end - 2)) {
10364                         if (vpd_data[i + 0] == 'P' &&
10365                             vpd_data[i + 1] == 'N') {
10366                                 int partno_len = vpd_data[i + 2];
10367
10368                                 i += 3;
10369                                 if (partno_len > 24 || (partno_len + i) > 256)
10370                                         goto out_not_found;
10371
10372                                 memcpy(tp->board_part_number,
10373                                        &vpd_data[i], partno_len);
10374
10375                                 /* Success. */
10376                                 return;
10377                         }
10378                         i += 3 + vpd_data[i + 2];
10379                 }
10380
10381                 /* Part number not found. */
10382                 goto out_not_found;
10383         }
10384
10385 out_not_found:
10386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10387                 strcpy(tp->board_part_number, "BCM95906");
10388         else
10389                 strcpy(tp->board_part_number, "none");
10390 }
10391
10392 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10393 {
10394         u32 val, offset, start;
10395
10396         if (tg3_nvram_read_swab(tp, 0, &val))
10397                 return;
10398
10399         if (val != TG3_EEPROM_MAGIC)
10400                 return;
10401
10402         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10403             tg3_nvram_read_swab(tp, 0x4, &start))
10404                 return;
10405
10406         offset = tg3_nvram_logical_addr(tp, offset);
10407         if (tg3_nvram_read_swab(tp, offset, &val))
10408                 return;
10409
10410         if ((val & 0xfc000000) == 0x0c000000) {
10411                 u32 ver_offset, addr;
10412                 int i;
10413
10414                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10415                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10416                         return;
10417
10418                 if (val != 0)
10419                         return;
10420
10421                 addr = offset + ver_offset - start;
10422                 for (i = 0; i < 16; i += 4) {
10423                         if (tg3_nvram_read(tp, addr + i, &val))
10424                                 return;
10425
10426                         val = cpu_to_le32(val);
10427                         memcpy(tp->fw_ver + i, &val, 4);
10428                 }
10429         }
10430 }
10431
10432 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10433
10434 static int __devinit tg3_get_invariants(struct tg3 *tp)
10435 {
10436         static struct pci_device_id write_reorder_chipsets[] = {
10437                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10438                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10439                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10440                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10441                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10442                              PCI_DEVICE_ID_VIA_8385_0) },
10443                 { },
10444         };
10445         u32 misc_ctrl_reg;
10446         u32 cacheline_sz_reg;
10447         u32 pci_state_reg, grc_misc_cfg;
10448         u32 val;
10449         u16 pci_cmd;
10450         int err, pcie_cap;
10451
10452         /* Force memory write invalidate off.  If we leave it on,
10453          * then on 5700_BX chips we have to enable a workaround.
10454          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10455          * to match the cacheline size.  The Broadcom driver have this
10456          * workaround but turns MWI off all the times so never uses
10457          * it.  This seems to suggest that the workaround is insufficient.
10458          */
10459         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10460         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10461         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10462
10463         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10464          * has the register indirect write enable bit set before
10465          * we try to access any of the MMIO registers.  It is also
10466          * critical that the PCI-X hw workaround situation is decided
10467          * before that as well.
10468          */
10469         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10470                               &misc_ctrl_reg);
10471
10472         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10473                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10474
10475         /* Wrong chip ID in 5752 A0. This code can be removed later
10476          * as A0 is not in production.
10477          */
10478         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10479                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10480
10481         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10482          * we need to disable memory and use config. cycles
10483          * only to access all registers. The 5702/03 chips
10484          * can mistakenly decode the special cycles from the
10485          * ICH chipsets as memory write cycles, causing corruption
10486          * of register and memory space. Only certain ICH bridges
10487          * will drive special cycles with non-zero data during the
10488          * address phase which can fall within the 5703's address
10489          * range. This is not an ICH bug as the PCI spec allows
10490          * non-zero address during special cycles. However, only
10491          * these ICH bridges are known to drive non-zero addresses
10492          * during special cycles.
10493          *
10494          * Since special cycles do not cross PCI bridges, we only
10495          * enable this workaround if the 5703 is on the secondary
10496          * bus of these ICH bridges.
10497          */
10498         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10499             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10500                 static struct tg3_dev_id {
10501                         u32     vendor;
10502                         u32     device;
10503                         u32     rev;
10504                 } ich_chipsets[] = {
10505                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10506                           PCI_ANY_ID },
10507                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10508                           PCI_ANY_ID },
10509                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10510                           0xa },
10511                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10512                           PCI_ANY_ID },
10513                         { },
10514                 };
10515                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10516                 struct pci_dev *bridge = NULL;
10517
10518                 while (pci_id->vendor != 0) {
10519                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10520                                                 bridge);
10521                         if (!bridge) {
10522                                 pci_id++;
10523                                 continue;
10524                         }
10525                         if (pci_id->rev != PCI_ANY_ID) {
10526                                 u8 rev;
10527
10528                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10529                                                      &rev);
10530                                 if (rev > pci_id->rev)
10531                                         continue;
10532                         }
10533                         if (bridge->subordinate &&
10534                             (bridge->subordinate->number ==
10535                              tp->pdev->bus->number)) {
10536
10537                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10538                                 pci_dev_put(bridge);
10539                                 break;
10540                         }
10541                 }
10542         }
10543
10544         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10545          * DMA addresses > 40-bit. This bridge may have other additional
10546          * 57xx devices behind it in some 4-port NIC designs for example.
10547          * Any tg3 device found behind the bridge will also need the 40-bit
10548          * DMA workaround.
10549          */
10550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10552                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10553                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10554                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10555         }
10556         else {
10557                 struct pci_dev *bridge = NULL;
10558
10559                 do {
10560                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10561                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10562                                                 bridge);
10563                         if (bridge && bridge->subordinate &&
10564                             (bridge->subordinate->number <=
10565                              tp->pdev->bus->number) &&
10566                             (bridge->subordinate->subordinate >=
10567                              tp->pdev->bus->number)) {
10568                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10569                                 pci_dev_put(bridge);
10570                                 break;
10571                         }
10572                 } while (bridge);
10573         }
10574
10575         /* Initialize misc host control in PCI block. */
10576         tp->misc_host_ctrl |= (misc_ctrl_reg &
10577                                MISC_HOST_CTRL_CHIPREV);
10578         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10579                                tp->misc_host_ctrl);
10580
10581         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10582                               &cacheline_sz_reg);
10583
10584         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10585         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10586         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10587         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10588
10589         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10590             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10591                 tp->pdev_peer = tg3_find_peer(tp);
10592
10593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10595             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10598             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10599                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10600
10601         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10602             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10603                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10604
10605         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10606                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10607                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10608                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10609                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10610                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10611                      tp->pdev_peer == tp->pdev))
10612                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10613
10614                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10615                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10616                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10617                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10618                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10619                 } else {
10620                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
10621                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10622                                 ASIC_REV_5750 &&
10623                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10624                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
10625                 }
10626         }
10627
10628         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10629             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10630             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10631             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10632             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10633             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10634                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10635
10636         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10637         if (pcie_cap != 0) {
10638                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10639                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10640                         u16 lnkctl;
10641
10642                         pci_read_config_word(tp->pdev,
10643                                              pcie_cap + PCI_EXP_LNKCTL,
10644                                              &lnkctl);
10645                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10646                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10647                 }
10648         }
10649
10650         /* If we have an AMD 762 or VIA K8T800 chipset, write
10651          * reordering to the mailbox registers done by the host
10652          * controller can cause major troubles.  We read back from
10653          * every mailbox register write to force the writes to be
10654          * posted to the chip in order.
10655          */
10656         if (pci_dev_present(write_reorder_chipsets) &&
10657             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10658                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10659
10660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10661             tp->pci_lat_timer < 64) {
10662                 tp->pci_lat_timer = 64;
10663
10664                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10665                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10666                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10667                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10668
10669                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10670                                        cacheline_sz_reg);
10671         }
10672
10673         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10674                               &pci_state_reg);
10675
10676         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10677                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10678
10679                 /* If this is a 5700 BX chipset, and we are in PCI-X
10680                  * mode, enable register write workaround.
10681                  *
10682                  * The workaround is to use indirect register accesses
10683                  * for all chip writes not to mailbox registers.
10684                  */
10685                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10686                         u32 pm_reg;
10687                         u16 pci_cmd;
10688
10689                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10690
10691                         /* The chip can have it's power management PCI config
10692                          * space registers clobbered due to this bug.
10693                          * So explicitly force the chip into D0 here.
10694                          */
10695                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10696                                               &pm_reg);
10697                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10698                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10699                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10700                                                pm_reg);
10701
10702                         /* Also, force SERR#/PERR# in PCI command. */
10703                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10704                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10705                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10706                 }
10707         }
10708
10709         /* 5700 BX chips need to have their TX producer index mailboxes
10710          * written twice to workaround a bug.
10711          */
10712         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10713                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10714
10715         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10716                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10717         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10718                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10719
10720         /* Chip-specific fixup from Broadcom driver */
10721         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10722             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10723                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10724                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10725         }
10726
10727         /* Default fast path register access methods */
10728         tp->read32 = tg3_read32;
10729         tp->write32 = tg3_write32;
10730         tp->read32_mbox = tg3_read32;
10731         tp->write32_mbox = tg3_write32;
10732         tp->write32_tx_mbox = tg3_write32;
10733         tp->write32_rx_mbox = tg3_write32;
10734
10735         /* Various workaround register access methods */
10736         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10737                 tp->write32 = tg3_write_indirect_reg32;
10738         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10739                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10740                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
10741                 /*
10742                  * Back to back register writes can cause problems on these
10743                  * chips, the workaround is to read back all reg writes
10744                  * except those to mailbox regs.
10745                  *
10746                  * See tg3_write_indirect_reg32().
10747                  */
10748                 tp->write32 = tg3_write_flush_reg32;
10749         }
10750
10751
10752         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10753             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10754                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10755                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10756                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10757         }
10758
10759         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10760                 tp->read32 = tg3_read_indirect_reg32;
10761                 tp->write32 = tg3_write_indirect_reg32;
10762                 tp->read32_mbox = tg3_read_indirect_mbox;
10763                 tp->write32_mbox = tg3_write_indirect_mbox;
10764                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10765                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10766
10767                 iounmap(tp->regs);
10768                 tp->regs = NULL;
10769
10770                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10771                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10772                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10773         }
10774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10775                 tp->read32_mbox = tg3_read32_mbox_5906;
10776                 tp->write32_mbox = tg3_write32_mbox_5906;
10777                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10778                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10779         }
10780
10781         if (tp->write32 == tg3_write_indirect_reg32 ||
10782             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10783              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10784               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10785                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10786
10787         /* Get eeprom hw config before calling tg3_set_power_state().
10788          * In particular, the TG3_FLG2_IS_NIC flag must be
10789          * determined before calling tg3_set_power_state() so that
10790          * we know whether or not to switch out of Vaux power.
10791          * When the flag is set, it means that GPIO1 is used for eeprom
10792          * write protect and also implies that it is a LOM where GPIOs
10793          * are not used to switch power.
10794          */
10795         tg3_get_eeprom_hw_cfg(tp);
10796
10797         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10798          * GPIO1 driven high will bring 5700's external PHY out of reset.
10799          * It is also used as eeprom write protect on LOMs.
10800          */
10801         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10802         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10803             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10804                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10805                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10806         /* Unused GPIO3 must be driven as output on 5752 because there
10807          * are no pull-up resistors on unused GPIO pins.
10808          */
10809         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10810                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10811
10812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10813                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10814
10815         /* Force the chip into D0. */
10816         err = tg3_set_power_state(tp, PCI_D0);
10817         if (err) {
10818                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10819                        pci_name(tp->pdev));
10820                 return err;
10821         }
10822
10823         /* 5700 B0 chips do not support checksumming correctly due
10824          * to hardware bugs.
10825          */
10826         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10827                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10828
10829         /* Derive initial jumbo mode from MTU assigned in
10830          * ether_setup() via the alloc_etherdev() call
10831          */
10832         if (tp->dev->mtu > ETH_DATA_LEN &&
10833             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10834                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10835
10836         /* Determine WakeOnLan speed to use. */
10837         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10838             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10839             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10840             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10841                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10842         } else {
10843                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10844         }
10845
10846         /* A few boards don't want Ethernet@WireSpeed phy feature */
10847         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10848             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10849              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10850              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10851             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10852             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10853                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10854
10855         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10856             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10857                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10858         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10859                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10860
10861         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10864                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10865                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10866                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10867                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10868                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10869                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10870                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10871         }
10872
10873         tp->coalesce_mode = 0;
10874         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10875             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10876                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10877
10878         /* Initialize MAC MI mode, polling disabled. */
10879         tw32_f(MAC_MI_MODE, tp->mi_mode);
10880         udelay(80);
10881
10882         /* Initialize data/descriptor byte/word swapping. */
10883         val = tr32(GRC_MODE);
10884         val &= GRC_MODE_HOST_STACKUP;
10885         tw32(GRC_MODE, val | tp->grc_mode);
10886
10887         tg3_switch_clocks(tp);
10888
10889         /* Clear this out for sanity. */
10890         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10891
10892         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10893                               &pci_state_reg);
10894         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10895             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10896                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10897
10898                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10899                     chiprevid == CHIPREV_ID_5701_B0 ||
10900                     chiprevid == CHIPREV_ID_5701_B2 ||
10901                     chiprevid == CHIPREV_ID_5701_B5) {
10902                         void __iomem *sram_base;
10903
10904                         /* Write some dummy words into the SRAM status block
10905                          * area, see if it reads back correctly.  If the return
10906                          * value is bad, force enable the PCIX workaround.
10907                          */
10908                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10909
10910                         writel(0x00000000, sram_base);
10911                         writel(0x00000000, sram_base + 4);
10912                         writel(0xffffffff, sram_base + 4);
10913                         if (readl(sram_base) != 0x00000000)
10914                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10915                 }
10916         }
10917
10918         udelay(50);
10919         tg3_nvram_init(tp);
10920
10921         grc_misc_cfg = tr32(GRC_MISC_CFG);
10922         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10923
10924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10925             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10926              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10927                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10928
10929         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10930             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10931                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10932         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10933                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10934                                       HOSTCC_MODE_CLRTICK_TXBD);
10935
10936                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10937                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10938                                        tp->misc_host_ctrl);
10939         }
10940
10941         /* these are limited to 10/100 only */
10942         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10943              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10944             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10945              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10946              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10947               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10948               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10949             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10950              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10951               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10952               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10954                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10955
10956         err = tg3_phy_probe(tp);
10957         if (err) {
10958                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10959                        pci_name(tp->pdev), err);
10960                 /* ... but do not return immediately ... */
10961         }
10962
10963         tg3_read_partno(tp);
10964         tg3_read_fw_ver(tp);
10965
10966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10967                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10968         } else {
10969                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10970                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10971                 else
10972                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10973         }
10974
10975         /* 5700 {AX,BX} chips have a broken status block link
10976          * change bit implementation, so we must use the
10977          * status register in those cases.
10978          */
10979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10980                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10981         else
10982                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10983
10984         /* The led_ctrl is set during tg3_phy_probe, here we might
10985          * have to force the link status polling mechanism based
10986          * upon subsystem IDs.
10987          */
10988         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10990             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10991                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10992                                   TG3_FLAG_USE_LINKCHG_REG);
10993         }
10994
10995         /* For all SERDES we poll the MAC status register. */
10996         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10997                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10998         else
10999                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11000
11001         /* All chips before 5787 can get confused if TX buffers
11002          * straddle the 4GB address boundary in some cases.
11003          */
11004         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11005             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11006             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11007                 tp->dev->hard_start_xmit = tg3_start_xmit;
11008         else
11009                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11010
11011         tp->rx_offset = 2;
11012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11013             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11014                 tp->rx_offset = 0;
11015
11016         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11017
11018         /* Increment the rx prod index on the rx std ring by at most
11019          * 8 for these chips to workaround hw errata.
11020          */
11021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11024                 tp->rx_std_max_post = 8;
11025
11026         /* By default, disable wake-on-lan.  User can change this
11027          * using ETHTOOL_SWOL.
11028          */
11029         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11030
11031         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11032                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11033                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11034
11035         return err;
11036 }
11037
11038 #ifdef CONFIG_SPARC
11039 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11040 {
11041         struct net_device *dev = tp->dev;
11042         struct pci_dev *pdev = tp->pdev;
11043         struct device_node *dp = pci_device_to_OF_node(pdev);
11044         const unsigned char *addr;
11045         int len;
11046
11047         addr = of_get_property(dp, "local-mac-address", &len);
11048         if (addr && len == 6) {
11049                 memcpy(dev->dev_addr, addr, 6);
11050                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11051                 return 0;
11052         }
11053         return -ENODEV;
11054 }
11055
11056 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11057 {
11058         struct net_device *dev = tp->dev;
11059
11060         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11061         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11062         return 0;
11063 }
11064 #endif
11065
11066 static int __devinit tg3_get_device_address(struct tg3 *tp)
11067 {
11068         struct net_device *dev = tp->dev;
11069         u32 hi, lo, mac_offset;
11070         int addr_ok = 0;
11071
11072 #ifdef CONFIG_SPARC
11073         if (!tg3_get_macaddr_sparc(tp))
11074                 return 0;
11075 #endif
11076
11077         mac_offset = 0x7c;
11078         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11079             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11080                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11081                         mac_offset = 0xcc;
11082                 if (tg3_nvram_lock(tp))
11083                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11084                 else
11085                         tg3_nvram_unlock(tp);
11086         }
11087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11088                 mac_offset = 0x10;
11089
11090         /* First try to get it from MAC address mailbox. */
11091         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11092         if ((hi >> 16) == 0x484b) {
11093                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11094                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11095
11096                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11097                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11098                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11099                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11100                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11101
11102                 /* Some old bootcode may report a 0 MAC address in SRAM */
11103                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11104         }
11105         if (!addr_ok) {
11106                 /* Next, try NVRAM. */
11107                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11108                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11109                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11110                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11111                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11112                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11113                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11114                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11115                 }
11116                 /* Finally just fetch it out of the MAC control regs. */
11117                 else {
11118                         hi = tr32(MAC_ADDR_0_HIGH);
11119                         lo = tr32(MAC_ADDR_0_LOW);
11120
11121                         dev->dev_addr[5] = lo & 0xff;
11122                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11123                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11124                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11125                         dev->dev_addr[1] = hi & 0xff;
11126                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11127                 }
11128         }
11129
11130         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11131 #ifdef CONFIG_SPARC64
11132                 if (!tg3_get_default_macaddr_sparc(tp))
11133                         return 0;
11134 #endif
11135                 return -EINVAL;
11136         }
11137         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11138         return 0;
11139 }
11140
11141 #define BOUNDARY_SINGLE_CACHELINE       1
11142 #define BOUNDARY_MULTI_CACHELINE        2
11143
11144 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11145 {
11146         int cacheline_size;
11147         u8 byte;
11148         int goal;
11149
11150         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11151         if (byte == 0)
11152                 cacheline_size = 1024;
11153         else
11154                 cacheline_size = (int) byte * 4;
11155
11156         /* On 5703 and later chips, the boundary bits have no
11157          * effect.
11158          */
11159         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11160             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11161             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11162                 goto out;
11163
11164 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11165         goal = BOUNDARY_MULTI_CACHELINE;
11166 #else
11167 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11168         goal = BOUNDARY_SINGLE_CACHELINE;
11169 #else
11170         goal = 0;
11171 #endif
11172 #endif
11173
11174         if (!goal)
11175                 goto out;
11176
11177         /* PCI controllers on most RISC systems tend to disconnect
11178          * when a device tries to burst across a cache-line boundary.
11179          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11180          *
11181          * Unfortunately, for PCI-E there are only limited
11182          * write-side controls for this, and thus for reads
11183          * we will still get the disconnects.  We'll also waste
11184          * these PCI cycles for both read and write for chips
11185          * other than 5700 and 5701 which do not implement the
11186          * boundary bits.
11187          */
11188         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11189             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11190                 switch (cacheline_size) {
11191                 case 16:
11192                 case 32:
11193                 case 64:
11194                 case 128:
11195                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11196                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11197                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11198                         } else {
11199                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11200                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11201                         }
11202                         break;
11203
11204                 case 256:
11205                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11206                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11207                         break;
11208
11209                 default:
11210                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11211                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11212                         break;
11213                 };
11214         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11215                 switch (cacheline_size) {
11216                 case 16:
11217                 case 32:
11218                 case 64:
11219                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11220                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11221                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11222                                 break;
11223                         }
11224                         /* fallthrough */
11225                 case 128:
11226                 default:
11227                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11228                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11229                         break;
11230                 };
11231         } else {
11232                 switch (cacheline_size) {
11233                 case 16:
11234                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11235                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11236                                         DMA_RWCTRL_WRITE_BNDRY_16);
11237                                 break;
11238                         }
11239                         /* fallthrough */
11240                 case 32:
11241                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11242                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11243                                         DMA_RWCTRL_WRITE_BNDRY_32);
11244                                 break;
11245                         }
11246                         /* fallthrough */
11247                 case 64:
11248                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11249                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11250                                         DMA_RWCTRL_WRITE_BNDRY_64);
11251                                 break;
11252                         }
11253                         /* fallthrough */
11254                 case 128:
11255                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11256                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11257                                         DMA_RWCTRL_WRITE_BNDRY_128);
11258                                 break;
11259                         }
11260                         /* fallthrough */
11261                 case 256:
11262                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11263                                 DMA_RWCTRL_WRITE_BNDRY_256);
11264                         break;
11265                 case 512:
11266                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11267                                 DMA_RWCTRL_WRITE_BNDRY_512);
11268                         break;
11269                 case 1024:
11270                 default:
11271                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11272                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11273                         break;
11274                 };
11275         }
11276
11277 out:
11278         return val;
11279 }
11280
11281 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11282 {
11283         struct tg3_internal_buffer_desc test_desc;
11284         u32 sram_dma_descs;
11285         int i, ret;
11286
11287         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11288
11289         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11290         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11291         tw32(RDMAC_STATUS, 0);
11292         tw32(WDMAC_STATUS, 0);
11293
11294         tw32(BUFMGR_MODE, 0);
11295         tw32(FTQ_RESET, 0);
11296
11297         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11298         test_desc.addr_lo = buf_dma & 0xffffffff;
11299         test_desc.nic_mbuf = 0x00002100;
11300         test_desc.len = size;
11301
11302         /*
11303          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11304          * the *second* time the tg3 driver was getting loaded after an
11305          * initial scan.
11306          *
11307          * Broadcom tells me:
11308          *   ...the DMA engine is connected to the GRC block and a DMA
11309          *   reset may affect the GRC block in some unpredictable way...
11310          *   The behavior of resets to individual blocks has not been tested.
11311          *
11312          * Broadcom noted the GRC reset will also reset all sub-components.
11313          */
11314         if (to_device) {
11315                 test_desc.cqid_sqid = (13 << 8) | 2;
11316
11317                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11318                 udelay(40);
11319         } else {
11320                 test_desc.cqid_sqid = (16 << 8) | 7;
11321
11322                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11323                 udelay(40);
11324         }
11325         test_desc.flags = 0x00000005;
11326
11327         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11328                 u32 val;
11329
11330                 val = *(((u32 *)&test_desc) + i);
11331                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11332                                        sram_dma_descs + (i * sizeof(u32)));
11333                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11334         }
11335         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11336
11337         if (to_device) {
11338                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11339         } else {
11340                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11341         }
11342
11343         ret = -ENODEV;
11344         for (i = 0; i < 40; i++) {
11345                 u32 val;
11346
11347                 if (to_device)
11348                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11349                 else
11350                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11351                 if ((val & 0xffff) == sram_dma_descs) {
11352                         ret = 0;
11353                         break;
11354                 }
11355
11356                 udelay(100);
11357         }
11358
11359         return ret;
11360 }
11361
11362 #define TEST_BUFFER_SIZE        0x2000
11363
11364 static int __devinit tg3_test_dma(struct tg3 *tp)
11365 {
11366         dma_addr_t buf_dma;
11367         u32 *buf, saved_dma_rwctrl;
11368         int ret;
11369
11370         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11371         if (!buf) {
11372                 ret = -ENOMEM;
11373                 goto out_nofree;
11374         }
11375
11376         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11377                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11378
11379         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11380
11381         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11382                 /* DMA read watermark not used on PCIE */
11383                 tp->dma_rwctrl |= 0x00180000;
11384         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11385                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11387                         tp->dma_rwctrl |= 0x003f0000;
11388                 else
11389                         tp->dma_rwctrl |= 0x003f000f;
11390         } else {
11391                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11392                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11393                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11394                         u32 read_water = 0x7;
11395
11396                         /* If the 5704 is behind the EPB bridge, we can
11397                          * do the less restrictive ONE_DMA workaround for
11398                          * better performance.
11399                          */
11400                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11401                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11402                                 tp->dma_rwctrl |= 0x8000;
11403                         else if (ccval == 0x6 || ccval == 0x7)
11404                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11405
11406                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11407                                 read_water = 4;
11408                         /* Set bit 23 to enable PCIX hw bug fix */
11409                         tp->dma_rwctrl |=
11410                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11411                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11412                                 (1 << 23);
11413                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11414                         /* 5780 always in PCIX mode */
11415                         tp->dma_rwctrl |= 0x00144000;
11416                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11417                         /* 5714 always in PCIX mode */
11418                         tp->dma_rwctrl |= 0x00148000;
11419                 } else {
11420                         tp->dma_rwctrl |= 0x001b000f;
11421                 }
11422         }
11423
11424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11425             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11426                 tp->dma_rwctrl &= 0xfffffff0;
11427
11428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11429             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11430                 /* Remove this if it causes problems for some boards. */
11431                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11432
11433                 /* On 5700/5701 chips, we need to set this bit.
11434                  * Otherwise the chip will issue cacheline transactions
11435                  * to streamable DMA memory with not all the byte
11436                  * enables turned on.  This is an error on several
11437                  * RISC PCI controllers, in particular sparc64.
11438                  *
11439                  * On 5703/5704 chips, this bit has been reassigned
11440                  * a different meaning.  In particular, it is used
11441                  * on those chips to enable a PCI-X workaround.
11442                  */
11443                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11444         }
11445
11446         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11447
11448 #if 0
11449         /* Unneeded, already done by tg3_get_invariants.  */
11450         tg3_switch_clocks(tp);
11451 #endif
11452
11453         ret = 0;
11454         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11455             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11456                 goto out;
11457
11458         /* It is best to perform DMA test with maximum write burst size
11459          * to expose the 5700/5701 write DMA bug.
11460          */
11461         saved_dma_rwctrl = tp->dma_rwctrl;
11462         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11463         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11464
11465         while (1) {
11466                 u32 *p = buf, i;
11467
11468                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11469                         p[i] = i;
11470
11471                 /* Send the buffer to the chip. */
11472                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11473                 if (ret) {
11474                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11475                         break;
11476                 }
11477
11478 #if 0
11479                 /* validate data reached card RAM correctly. */
11480                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11481                         u32 val;
11482                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11483                         if (le32_to_cpu(val) != p[i]) {
11484                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11485                                 /* ret = -ENODEV here? */
11486                         }
11487                         p[i] = 0;
11488                 }
11489 #endif
11490                 /* Now read it back. */
11491                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11492                 if (ret) {
11493                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11494
11495                         break;
11496                 }
11497
11498                 /* Verify it. */
11499                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11500                         if (p[i] == i)
11501                                 continue;
11502
11503                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11504                             DMA_RWCTRL_WRITE_BNDRY_16) {
11505                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11506                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11507                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11508                                 break;
11509                         } else {
11510                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11511                                 ret = -ENODEV;
11512                                 goto out;
11513                         }
11514                 }
11515
11516                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11517                         /* Success. */
11518                         ret = 0;
11519                         break;
11520                 }
11521         }
11522         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11523             DMA_RWCTRL_WRITE_BNDRY_16) {
11524                 static struct pci_device_id dma_wait_state_chipsets[] = {
11525                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11526                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11527                         { },
11528                 };
11529
11530                 /* DMA test passed without adjusting DMA boundary,
11531                  * now look for chipsets that are known to expose the
11532                  * DMA bug without failing the test.
11533                  */
11534                 if (pci_dev_present(dma_wait_state_chipsets)) {
11535                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11536                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11537                 }
11538                 else
11539                         /* Safe to use the calculated DMA boundary. */
11540                         tp->dma_rwctrl = saved_dma_rwctrl;
11541
11542                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11543         }
11544
11545 out:
11546         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11547 out_nofree:
11548         return ret;
11549 }
11550
11551 static void __devinit tg3_init_link_config(struct tg3 *tp)
11552 {
11553         tp->link_config.advertising =
11554                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11555                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11556                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11557                  ADVERTISED_Autoneg | ADVERTISED_MII);
11558         tp->link_config.speed = SPEED_INVALID;
11559         tp->link_config.duplex = DUPLEX_INVALID;
11560         tp->link_config.autoneg = AUTONEG_ENABLE;
11561         tp->link_config.active_speed = SPEED_INVALID;
11562         tp->link_config.active_duplex = DUPLEX_INVALID;
11563         tp->link_config.phy_is_low_power = 0;
11564         tp->link_config.orig_speed = SPEED_INVALID;
11565         tp->link_config.orig_duplex = DUPLEX_INVALID;
11566         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11567 }
11568
11569 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11570 {
11571         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11572                 tp->bufmgr_config.mbuf_read_dma_low_water =
11573                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11574                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11575                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11576                 tp->bufmgr_config.mbuf_high_water =
11577                         DEFAULT_MB_HIGH_WATER_5705;
11578                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11579                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11580                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11581                         tp->bufmgr_config.mbuf_high_water =
11582                                 DEFAULT_MB_HIGH_WATER_5906;
11583                 }
11584
11585                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11586                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11587                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11588                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11589                 tp->bufmgr_config.mbuf_high_water_jumbo =
11590                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11591         } else {
11592                 tp->bufmgr_config.mbuf_read_dma_low_water =
11593                         DEFAULT_MB_RDMA_LOW_WATER;
11594                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11595                         DEFAULT_MB_MACRX_LOW_WATER;
11596                 tp->bufmgr_config.mbuf_high_water =
11597                         DEFAULT_MB_HIGH_WATER;
11598
11599                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11600                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11601                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11602                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11603                 tp->bufmgr_config.mbuf_high_water_jumbo =
11604                         DEFAULT_MB_HIGH_WATER_JUMBO;
11605         }
11606
11607         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11608         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11609 }
11610
11611 static char * __devinit tg3_phy_string(struct tg3 *tp)
11612 {
11613         switch (tp->phy_id & PHY_ID_MASK) {
11614         case PHY_ID_BCM5400:    return "5400";
11615         case PHY_ID_BCM5401:    return "5401";
11616         case PHY_ID_BCM5411:    return "5411";
11617         case PHY_ID_BCM5701:    return "5701";
11618         case PHY_ID_BCM5703:    return "5703";
11619         case PHY_ID_BCM5704:    return "5704";
11620         case PHY_ID_BCM5705:    return "5705";
11621         case PHY_ID_BCM5750:    return "5750";
11622         case PHY_ID_BCM5752:    return "5752";
11623         case PHY_ID_BCM5714:    return "5714";
11624         case PHY_ID_BCM5780:    return "5780";
11625         case PHY_ID_BCM5755:    return "5755";
11626         case PHY_ID_BCM5787:    return "5787";
11627         case PHY_ID_BCM5756:    return "5722/5756";
11628         case PHY_ID_BCM5906:    return "5906";
11629         case PHY_ID_BCM8002:    return "8002/serdes";
11630         case 0:                 return "serdes";
11631         default:                return "unknown";
11632         };
11633 }
11634
11635 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11636 {
11637         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11638                 strcpy(str, "PCI Express");
11639                 return str;
11640         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11641                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11642
11643                 strcpy(str, "PCIX:");
11644
11645                 if ((clock_ctrl == 7) ||
11646                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11647                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11648                         strcat(str, "133MHz");
11649                 else if (clock_ctrl == 0)
11650                         strcat(str, "33MHz");
11651                 else if (clock_ctrl == 2)
11652                         strcat(str, "50MHz");
11653                 else if (clock_ctrl == 4)
11654                         strcat(str, "66MHz");
11655                 else if (clock_ctrl == 6)
11656                         strcat(str, "100MHz");
11657         } else {
11658                 strcpy(str, "PCI:");
11659                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11660                         strcat(str, "66MHz");
11661                 else
11662                         strcat(str, "33MHz");
11663         }
11664         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11665                 strcat(str, ":32-bit");
11666         else
11667                 strcat(str, ":64-bit");
11668         return str;
11669 }
11670
11671 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11672 {
11673         struct pci_dev *peer;
11674         unsigned int func, devnr = tp->pdev->devfn & ~7;
11675
11676         for (func = 0; func < 8; func++) {
11677                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11678                 if (peer && peer != tp->pdev)
11679                         break;
11680                 pci_dev_put(peer);
11681         }
11682         /* 5704 can be configured in single-port mode, set peer to
11683          * tp->pdev in that case.
11684          */
11685         if (!peer) {
11686                 peer = tp->pdev;
11687                 return peer;
11688         }
11689
11690         /*
11691          * We don't need to keep the refcount elevated; there's no way
11692          * to remove one half of this device without removing the other
11693          */
11694         pci_dev_put(peer);
11695
11696         return peer;
11697 }
11698
11699 static void __devinit tg3_init_coal(struct tg3 *tp)
11700 {
11701         struct ethtool_coalesce *ec = &tp->coal;
11702
11703         memset(ec, 0, sizeof(*ec));
11704         ec->cmd = ETHTOOL_GCOALESCE;
11705         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11706         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11707         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11708         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11709         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11710         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11711         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11712         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11713         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11714
11715         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11716                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11717                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11718                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11719                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11720                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11721         }
11722
11723         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11724                 ec->rx_coalesce_usecs_irq = 0;
11725                 ec->tx_coalesce_usecs_irq = 0;
11726                 ec->stats_block_coalesce_usecs = 0;
11727         }
11728 }
11729
11730 static int __devinit tg3_init_one(struct pci_dev *pdev,
11731                                   const struct pci_device_id *ent)
11732 {
11733         static int tg3_version_printed = 0;
11734         unsigned long tg3reg_base, tg3reg_len;
11735         struct net_device *dev;
11736         struct tg3 *tp;
11737         int i, err, pm_cap;
11738         char str[40];
11739         u64 dma_mask, persist_dma_mask;
11740
11741         if (tg3_version_printed++ == 0)
11742                 printk(KERN_INFO "%s", version);
11743
11744         err = pci_enable_device(pdev);
11745         if (err) {
11746                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11747                        "aborting.\n");
11748                 return err;
11749         }
11750
11751         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11752                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11753                        "base address, aborting.\n");
11754                 err = -ENODEV;
11755                 goto err_out_disable_pdev;
11756         }
11757
11758         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11759         if (err) {
11760                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11761                        "aborting.\n");
11762                 goto err_out_disable_pdev;
11763         }
11764
11765         pci_set_master(pdev);
11766
11767         /* Find power-management capability. */
11768         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11769         if (pm_cap == 0) {
11770                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11771                        "aborting.\n");
11772                 err = -EIO;
11773                 goto err_out_free_res;
11774         }
11775
11776         tg3reg_base = pci_resource_start(pdev, 0);
11777         tg3reg_len = pci_resource_len(pdev, 0);
11778
11779         dev = alloc_etherdev(sizeof(*tp));
11780         if (!dev) {
11781                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11782                 err = -ENOMEM;
11783                 goto err_out_free_res;
11784         }
11785
11786         SET_MODULE_OWNER(dev);
11787         SET_NETDEV_DEV(dev, &pdev->dev);
11788
11789 #if TG3_VLAN_TAG_USED
11790         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11791         dev->vlan_rx_register = tg3_vlan_rx_register;
11792 #endif
11793
11794         tp = netdev_priv(dev);
11795         tp->pdev = pdev;
11796         tp->dev = dev;
11797         tp->pm_cap = pm_cap;
11798         tp->mac_mode = TG3_DEF_MAC_MODE;
11799         tp->rx_mode = TG3_DEF_RX_MODE;
11800         tp->tx_mode = TG3_DEF_TX_MODE;
11801         tp->mi_mode = MAC_MI_MODE_BASE;
11802         if (tg3_debug > 0)
11803                 tp->msg_enable = tg3_debug;
11804         else
11805                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11806
11807         /* The word/byte swap controls here control register access byte
11808          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11809          * setting below.
11810          */
11811         tp->misc_host_ctrl =
11812                 MISC_HOST_CTRL_MASK_PCI_INT |
11813                 MISC_HOST_CTRL_WORD_SWAP |
11814                 MISC_HOST_CTRL_INDIR_ACCESS |
11815                 MISC_HOST_CTRL_PCISTATE_RW;
11816
11817         /* The NONFRM (non-frame) byte/word swap controls take effect
11818          * on descriptor entries, anything which isn't packet data.
11819          *
11820          * The StrongARM chips on the board (one for tx, one for rx)
11821          * are running in big-endian mode.
11822          */
11823         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11824                         GRC_MODE_WSWAP_NONFRM_DATA);
11825 #ifdef __BIG_ENDIAN
11826         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11827 #endif
11828         spin_lock_init(&tp->lock);
11829         spin_lock_init(&tp->indirect_lock);
11830         INIT_WORK(&tp->reset_task, tg3_reset_task);
11831
11832         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11833         if (tp->regs == 0UL) {
11834                 printk(KERN_ERR PFX "Cannot map device registers, "
11835                        "aborting.\n");
11836                 err = -ENOMEM;
11837                 goto err_out_free_dev;
11838         }
11839
11840         tg3_init_link_config(tp);
11841
11842         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11843         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11844         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11845
11846         dev->open = tg3_open;
11847         dev->stop = tg3_close;
11848         dev->get_stats = tg3_get_stats;
11849         dev->set_multicast_list = tg3_set_rx_mode;
11850         dev->set_mac_address = tg3_set_mac_addr;
11851         dev->do_ioctl = tg3_ioctl;
11852         dev->tx_timeout = tg3_tx_timeout;
11853         dev->poll = tg3_poll;
11854         dev->ethtool_ops = &tg3_ethtool_ops;
11855         dev->weight = 64;
11856         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11857         dev->change_mtu = tg3_change_mtu;
11858         dev->irq = pdev->irq;
11859 #ifdef CONFIG_NET_POLL_CONTROLLER
11860         dev->poll_controller = tg3_poll_controller;
11861 #endif
11862
11863         err = tg3_get_invariants(tp);
11864         if (err) {
11865                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11866                        "aborting.\n");
11867                 goto err_out_iounmap;
11868         }
11869
11870         /* The EPB bridge inside 5714, 5715, and 5780 and any
11871          * device behind the EPB cannot support DMA addresses > 40-bit.
11872          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11873          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11874          * do DMA address check in tg3_start_xmit().
11875          */
11876         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11877                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11878         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11879                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11880 #ifdef CONFIG_HIGHMEM
11881                 dma_mask = DMA_64BIT_MASK;
11882 #endif
11883         } else
11884                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11885
11886         /* Configure DMA attributes. */
11887         if (dma_mask > DMA_32BIT_MASK) {
11888                 err = pci_set_dma_mask(pdev, dma_mask);
11889                 if (!err) {
11890                         dev->features |= NETIF_F_HIGHDMA;
11891                         err = pci_set_consistent_dma_mask(pdev,
11892                                                           persist_dma_mask);
11893                         if (err < 0) {
11894                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11895                                        "DMA for consistent allocations\n");
11896                                 goto err_out_iounmap;
11897                         }
11898                 }
11899         }
11900         if (err || dma_mask == DMA_32BIT_MASK) {
11901                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11902                 if (err) {
11903                         printk(KERN_ERR PFX "No usable DMA configuration, "
11904                                "aborting.\n");
11905                         goto err_out_iounmap;
11906                 }
11907         }
11908
11909         tg3_init_bufmgr_config(tp);
11910
11911         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11912                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11913         }
11914         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11916             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11918             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11919                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11920         } else {
11921                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
11922         }
11923
11924         /* TSO is on by default on chips that support hardware TSO.
11925          * Firmware TSO on older chips gives lower performance, so it
11926          * is off by default, but can be enabled using ethtool.
11927          */
11928         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11929                 dev->features |= NETIF_F_TSO;
11930                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11931                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11932                         dev->features |= NETIF_F_TSO6;
11933         }
11934
11935
11936         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11937             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11938             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11939                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11940                 tp->rx_pending = 63;
11941         }
11942
11943         err = tg3_get_device_address(tp);
11944         if (err) {
11945                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11946                        "aborting.\n");
11947                 goto err_out_iounmap;
11948         }
11949
11950         /*
11951          * Reset chip in case UNDI or EFI driver did not shutdown
11952          * DMA self test will enable WDMAC and we'll see (spurious)
11953          * pending DMA on the PCI bus at that point.
11954          */
11955         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11956             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11957                 pci_save_state(tp->pdev);
11958                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11959                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11960         }
11961
11962         err = tg3_test_dma(tp);
11963         if (err) {
11964                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11965                 goto err_out_iounmap;
11966         }
11967
11968         /* Tigon3 can do ipv4 only... and some chips have buggy
11969          * checksumming.
11970          */
11971         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11972                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11973                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11974                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11975                         dev->features |= NETIF_F_IPV6_CSUM;
11976
11977                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11978         } else
11979                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11980
11981         /* flow control autonegotiation is default behavior */
11982         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11983
11984         tg3_init_coal(tp);
11985
11986         /* Now that we have fully setup the chip, save away a snapshot
11987          * of the PCI config space.  We need to restore this after
11988          * GRC_MISC_CFG core clock resets and some resume events.
11989          */
11990         pci_save_state(tp->pdev);
11991
11992         pci_set_drvdata(pdev, dev);
11993
11994         err = register_netdev(dev);
11995         if (err) {
11996                 printk(KERN_ERR PFX "Cannot register net device, "
11997                        "aborting.\n");
11998                 goto err_out_iounmap;
11999         }
12000
12001         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12002                dev->name,
12003                tp->board_part_number,
12004                tp->pci_chip_rev_id,
12005                tg3_phy_string(tp),
12006                tg3_bus_string(tp, str),
12007                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12008                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12009                  "10/100/1000Base-T")));
12010
12011         for (i = 0; i < 6; i++)
12012                 printk("%2.2x%c", dev->dev_addr[i],
12013                        i == 5 ? '\n' : ':');
12014
12015         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12016                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12017                dev->name,
12018                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12019                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12020                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12021                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12022                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12023                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12024         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12025                dev->name, tp->dma_rwctrl,
12026                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12027                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12028
12029         return 0;
12030
12031 err_out_iounmap:
12032         if (tp->regs) {
12033                 iounmap(tp->regs);
12034                 tp->regs = NULL;
12035         }
12036
12037 err_out_free_dev:
12038         free_netdev(dev);
12039
12040 err_out_free_res:
12041         pci_release_regions(pdev);
12042
12043 err_out_disable_pdev:
12044         pci_disable_device(pdev);
12045         pci_set_drvdata(pdev, NULL);
12046         return err;
12047 }
12048
12049 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12050 {
12051         struct net_device *dev = pci_get_drvdata(pdev);
12052
12053         if (dev) {
12054                 struct tg3 *tp = netdev_priv(dev);
12055
12056                 flush_scheduled_work();
12057                 unregister_netdev(dev);
12058                 if (tp->regs) {
12059                         iounmap(tp->regs);
12060                         tp->regs = NULL;
12061                 }
12062                 free_netdev(dev);
12063                 pci_release_regions(pdev);
12064                 pci_disable_device(pdev);
12065                 pci_set_drvdata(pdev, NULL);
12066         }
12067 }
12068
12069 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12070 {
12071         struct net_device *dev = pci_get_drvdata(pdev);
12072         struct tg3 *tp = netdev_priv(dev);
12073         int err;
12074
12075         if (!netif_running(dev))
12076                 return 0;
12077
12078         flush_scheduled_work();
12079         tg3_netif_stop(tp);
12080
12081         del_timer_sync(&tp->timer);
12082
12083         tg3_full_lock(tp, 1);
12084         tg3_disable_ints(tp);
12085         tg3_full_unlock(tp);
12086
12087         netif_device_detach(dev);
12088
12089         tg3_full_lock(tp, 0);
12090         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12091         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12092         tg3_full_unlock(tp);
12093
12094         /* Save MSI address and data for resume.  */
12095         pci_save_state(pdev);
12096
12097         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12098         if (err) {
12099                 tg3_full_lock(tp, 0);
12100
12101                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12102                 if (tg3_restart_hw(tp, 1))
12103                         goto out;
12104
12105                 tp->timer.expires = jiffies + tp->timer_offset;
12106                 add_timer(&tp->timer);
12107
12108                 netif_device_attach(dev);
12109                 tg3_netif_start(tp);
12110
12111 out:
12112                 tg3_full_unlock(tp);
12113         }
12114
12115         return err;
12116 }
12117
12118 static int tg3_resume(struct pci_dev *pdev)
12119 {
12120         struct net_device *dev = pci_get_drvdata(pdev);
12121         struct tg3 *tp = netdev_priv(dev);
12122         int err;
12123
12124         if (!netif_running(dev))
12125                 return 0;
12126
12127         pci_restore_state(tp->pdev);
12128
12129         err = tg3_set_power_state(tp, PCI_D0);
12130         if (err)
12131                 return err;
12132
12133         netif_device_attach(dev);
12134
12135         tg3_full_lock(tp, 0);
12136
12137         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12138         err = tg3_restart_hw(tp, 1);
12139         if (err)
12140                 goto out;
12141
12142         tp->timer.expires = jiffies + tp->timer_offset;
12143         add_timer(&tp->timer);
12144
12145         tg3_netif_start(tp);
12146
12147 out:
12148         tg3_full_unlock(tp);
12149
12150         return err;
12151 }
12152
12153 static struct pci_driver tg3_driver = {
12154         .name           = DRV_MODULE_NAME,
12155         .id_table       = tg3_pci_tbl,
12156         .probe          = tg3_init_one,
12157         .remove         = __devexit_p(tg3_remove_one),
12158         .suspend        = tg3_suspend,
12159         .resume         = tg3_resume
12160 };
12161
12162 static int __init tg3_init(void)
12163 {
12164         return pci_register_driver(&tg3_driver);
12165 }
12166
12167 static void __exit tg3_cleanup(void)
12168 {
12169         pci_unregister_driver(&tg3_driver);
12170 }
12171
12172 module_init(tg3_init);
12173 module_exit(tg3_cleanup);