]> err.no Git - linux-2.6/blob - drivers/net/tg3.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.91"
68 #define DRV_MODULE_RELDATE      "April 18, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808 {
809         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
811 }
812
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
814 {
815         u32 phy;
816
817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
819                 return;
820
821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
822                 u32 ephy;
823
824                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
826                                      ephy | MII_TG3_EPHY_SHADOW_EN);
827                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
828                                 if (enable)
829                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
830                                 else
831                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
833                         }
834                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
835                 }
836         } else {
837                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838                       MII_TG3_AUXCTL_SHDWSEL_MISC;
839                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
841                         if (enable)
842                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
843                         else
844                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845                         phy |= MII_TG3_AUXCTL_MISC_WREN;
846                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
847                 }
848         }
849 }
850
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
852 {
853         u32 val;
854
855         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
856                 return;
857
858         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861                              (val | (1 << 15) | (1 << 4)));
862 }
863
864 static int tg3_bmcr_reset(struct tg3 *tp)
865 {
866         u32 phy_control;
867         int limit, err;
868
869         /* OK, reset it, and poll the BMCR_RESET bit until it
870          * clears or we time out.
871          */
872         phy_control = BMCR_RESET;
873         err = tg3_writephy(tp, MII_BMCR, phy_control);
874         if (err != 0)
875                 return -EBUSY;
876
877         limit = 5000;
878         while (limit--) {
879                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880                 if (err != 0)
881                         return -EBUSY;
882
883                 if ((phy_control & BMCR_RESET) == 0) {
884                         udelay(40);
885                         break;
886                 }
887                 udelay(10);
888         }
889         if (limit <= 0)
890                 return -EBUSY;
891
892         return 0;
893 }
894
895 static void tg3_phy_apply_otp(struct tg3 *tp)
896 {
897         u32 otp, phy;
898
899         if (!tp->phy_otp)
900                 return;
901
902         otp = tp->phy_otp;
903
904         /* Enable SM_DSP clock and tx 6dB coding. */
905         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907               MII_TG3_AUXCTL_ACTL_TX_6DB;
908         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
909
910         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
913
914         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
917
918         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
921
922         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
924
925         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
927
928         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
931
932         /* Turn off SM_DSP clock. */
933         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934               MII_TG3_AUXCTL_ACTL_TX_6DB;
935         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
936 }
937
938 static int tg3_wait_macro_done(struct tg3 *tp)
939 {
940         int limit = 100;
941
942         while (limit--) {
943                 u32 tmp32;
944
945                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946                         if ((tmp32 & 0x1000) == 0)
947                                 break;
948                 }
949         }
950         if (limit <= 0)
951                 return -EBUSY;
952
953         return 0;
954 }
955
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
957 {
958         static const u32 test_pat[4][6] = {
959         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
963         };
964         int chan;
965
966         for (chan = 0; chan < 4; chan++) {
967                 int i;
968
969                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970                              (chan * 0x2000) | 0x0200);
971                 tg3_writephy(tp, 0x16, 0x0002);
972
973                 for (i = 0; i < 6; i++)
974                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
975                                      test_pat[chan][i]);
976
977                 tg3_writephy(tp, 0x16, 0x0202);
978                 if (tg3_wait_macro_done(tp)) {
979                         *resetp = 1;
980                         return -EBUSY;
981                 }
982
983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984                              (chan * 0x2000) | 0x0200);
985                 tg3_writephy(tp, 0x16, 0x0082);
986                 if (tg3_wait_macro_done(tp)) {
987                         *resetp = 1;
988                         return -EBUSY;
989                 }
990
991                 tg3_writephy(tp, 0x16, 0x0802);
992                 if (tg3_wait_macro_done(tp)) {
993                         *resetp = 1;
994                         return -EBUSY;
995                 }
996
997                 for (i = 0; i < 6; i += 2) {
998                         u32 low, high;
999
1000                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002                             tg3_wait_macro_done(tp)) {
1003                                 *resetp = 1;
1004                                 return -EBUSY;
1005                         }
1006                         low &= 0x7fff;
1007                         high &= 0x000f;
1008                         if (low != test_pat[chan][i] ||
1009                             high != test_pat[chan][i+1]) {
1010                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1013
1014                                 return -EBUSY;
1015                         }
1016                 }
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1023 {
1024         int chan;
1025
1026         for (chan = 0; chan < 4; chan++) {
1027                 int i;
1028
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030                              (chan * 0x2000) | 0x0200);
1031                 tg3_writephy(tp, 0x16, 0x0002);
1032                 for (i = 0; i < 6; i++)
1033                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034                 tg3_writephy(tp, 0x16, 0x0202);
1035                 if (tg3_wait_macro_done(tp))
1036                         return -EBUSY;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1043 {
1044         u32 reg32, phy9_orig;
1045         int retries, do_phy_reset, err;
1046
1047         retries = 10;
1048         do_phy_reset = 1;
1049         do {
1050                 if (do_phy_reset) {
1051                         err = tg3_bmcr_reset(tp);
1052                         if (err)
1053                                 return err;
1054                         do_phy_reset = 0;
1055                 }
1056
1057                 /* Disable transmitter and interrupt.  */
1058                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1059                         continue;
1060
1061                 reg32 |= 0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063
1064                 /* Set full-duplex, 1000 mbps.  */
1065                 tg3_writephy(tp, MII_BMCR,
1066                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1067
1068                 /* Set to master mode.  */
1069                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1070                         continue;
1071
1072                 tg3_writephy(tp, MII_TG3_CTRL,
1073                              (MII_TG3_CTRL_AS_MASTER |
1074                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1075
1076                 /* Enable SM_DSP_CLOCK and 6dB.  */
1077                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1078
1079                 /* Block the PHY control access.  */
1080                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1082
1083                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1084                 if (!err)
1085                         break;
1086         } while (--retries);
1087
1088         err = tg3_phy_reset_chanpat(tp);
1089         if (err)
1090                 return err;
1091
1092         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1094
1095         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096         tg3_writephy(tp, 0x16, 0x0000);
1097
1098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100                 /* Set Extended packet length bit for jumbo frames */
1101                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1102         }
1103         else {
1104                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1105         }
1106
1107         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1108
1109         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1110                 reg32 &= ~0x3000;
1111                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1112         } else if (!err)
1113                 err = -EBUSY;
1114
1115         return err;
1116 }
1117
1118 static void tg3_link_report(struct tg3 *);
1119
1120 /* This will reset the tigon3 PHY if there is no valid
1121  * link unless the FORCE argument is non-zero.
1122  */
1123 static int tg3_phy_reset(struct tg3 *tp)
1124 {
1125         u32 cpmuctrl;
1126         u32 phy_status;
1127         int err;
1128
1129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1130                 u32 val;
1131
1132                 val = tr32(GRC_MISC_CFG);
1133                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1134                 udelay(40);
1135         }
1136         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1137         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1138         if (err != 0)
1139                 return -EBUSY;
1140
1141         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142                 netif_carrier_off(tp->dev);
1143                 tg3_link_report(tp);
1144         }
1145
1146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149                 err = tg3_phy_reset_5703_4_5(tp);
1150                 if (err)
1151                         return err;
1152                 goto out;
1153         }
1154
1155         cpmuctrl = 0;
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1160                         tw32(TG3_CPMU_CTRL,
1161                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1162         }
1163
1164         err = tg3_bmcr_reset(tp);
1165         if (err)
1166                 return err;
1167
1168         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1169                 u32 phy;
1170
1171                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1173
1174                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1175         }
1176
1177         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1178                 u32 val;
1179
1180                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1183                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1184                         udelay(40);
1185                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1186                 }
1187
1188                 /* Disable GPHY autopowerdown. */
1189                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190                              MII_TG3_MISC_SHDW_WREN |
1191                              MII_TG3_MISC_SHDW_APD_SEL |
1192                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1193         }
1194
1195         tg3_phy_apply_otp(tp);
1196
1197 out:
1198         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1205         }
1206         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207                 tg3_writephy(tp, 0x1c, 0x8d68);
1208                 tg3_writephy(tp, 0x1c, 0x8d68);
1209         }
1210         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1219         }
1220         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225                         tg3_writephy(tp, MII_TG3_TEST1,
1226                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1227                 } else
1228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1230         }
1231         /* Set Extended packet length bit (bit 14) on all chips that */
1232         /* support jumbo frames */
1233         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234                 /* Cannot do read-modify-write on 5401 */
1235                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1237                 u32 phy_reg;
1238
1239                 /* Set bit 14 with read-modify-write to preserve other bits */
1240                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1243         }
1244
1245         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246          * jumbo frames transmission.
1247          */
1248         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1249                 u32 phy_reg;
1250
1251                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1254         }
1255
1256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257                 /* adjust output voltage */
1258                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1259         }
1260
1261         tg3_phy_toggle_automdix(tp, 1);
1262         tg3_phy_set_wirespeed(tp);
1263         return 0;
1264 }
1265
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1267 {
1268         struct tg3 *tp_peer = tp;
1269
1270         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1271                 return;
1272
1273         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275                 struct net_device *dev_peer;
1276
1277                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278                 /* remove_one() may have been run on the peer. */
1279                 if (!dev_peer)
1280                         tp_peer = tp;
1281                 else
1282                         tp_peer = netdev_priv(dev_peer);
1283         }
1284
1285         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292                                     (GRC_LCLCTRL_GPIO_OE0 |
1293                                      GRC_LCLCTRL_GPIO_OE1 |
1294                                      GRC_LCLCTRL_GPIO_OE2 |
1295                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1296                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1297                                     100);
1298                 } else {
1299                         u32 no_gpio2;
1300                         u32 grc_local_ctrl = 0;
1301
1302                         if (tp_peer != tp &&
1303                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1304                                 return;
1305
1306                         /* Workaround to prevent overdrawing Amps. */
1307                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1308                             ASIC_REV_5714) {
1309                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1310                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1311                                             grc_local_ctrl, 100);
1312                         }
1313
1314                         /* On 5753 and variants, GPIO2 cannot be used. */
1315                         no_gpio2 = tp->nic_sram_data_cfg &
1316                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1317
1318                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1319                                          GRC_LCLCTRL_GPIO_OE1 |
1320                                          GRC_LCLCTRL_GPIO_OE2 |
1321                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1322                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1323                         if (no_gpio2) {
1324                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1325                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1326                         }
1327                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1328                                                     grc_local_ctrl, 100);
1329
1330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1331
1332                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1333                                                     grc_local_ctrl, 100);
1334
1335                         if (!no_gpio2) {
1336                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1337                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1338                                             grc_local_ctrl, 100);
1339                         }
1340                 }
1341         } else {
1342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1343                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1344                         if (tp_peer != tp &&
1345                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1346                                 return;
1347
1348                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1349                                     (GRC_LCLCTRL_GPIO_OE1 |
1350                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1351
1352                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353                                     GRC_LCLCTRL_GPIO_OE1, 100);
1354
1355                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1356                                     (GRC_LCLCTRL_GPIO_OE1 |
1357                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1358                 }
1359         }
1360 }
1361
1362 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1363 {
1364         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1365                 return 1;
1366         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1367                 if (speed != SPEED_10)
1368                         return 1;
1369         } else if (speed == SPEED_10)
1370                 return 1;
1371
1372         return 0;
1373 }
1374
1375 static int tg3_setup_phy(struct tg3 *, int);
1376
1377 #define RESET_KIND_SHUTDOWN     0
1378 #define RESET_KIND_INIT         1
1379 #define RESET_KIND_SUSPEND      2
1380
1381 static void tg3_write_sig_post_reset(struct tg3 *, int);
1382 static int tg3_halt_cpu(struct tg3 *, u32);
1383 static int tg3_nvram_lock(struct tg3 *);
1384 static void tg3_nvram_unlock(struct tg3 *);
1385
1386 static void tg3_power_down_phy(struct tg3 *tp)
1387 {
1388         u32 val;
1389
1390         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1391                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1392                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1393                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1394
1395                         sg_dig_ctrl |=
1396                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1397                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1398                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1399                 }
1400                 return;
1401         }
1402
1403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1404                 tg3_bmcr_reset(tp);
1405                 val = tr32(GRC_MISC_CFG);
1406                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1407                 udelay(40);
1408                 return;
1409         } else {
1410                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1413         }
1414
1415         /* The PHY should not be powered down on some chips because
1416          * of bugs.
1417          */
1418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1420             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1421              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1422                 return;
1423
1424         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1425                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1426                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1427                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1428                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1429         }
1430
1431         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1432 }
1433
1434 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1435 {
1436         u32 misc_host_ctrl;
1437         u16 power_control, power_caps;
1438         int pm = tp->pm_cap;
1439
1440         /* Make sure register accesses (indirect or otherwise)
1441          * will function correctly.
1442          */
1443         pci_write_config_dword(tp->pdev,
1444                                TG3PCI_MISC_HOST_CTRL,
1445                                tp->misc_host_ctrl);
1446
1447         pci_read_config_word(tp->pdev,
1448                              pm + PCI_PM_CTRL,
1449                              &power_control);
1450         power_control |= PCI_PM_CTRL_PME_STATUS;
1451         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1452         switch (state) {
1453         case PCI_D0:
1454                 power_control |= 0;
1455                 pci_write_config_word(tp->pdev,
1456                                       pm + PCI_PM_CTRL,
1457                                       power_control);
1458                 udelay(100);    /* Delay after power state change */
1459
1460                 /* Switch out of Vaux if it is a NIC */
1461                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1462                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1463
1464                 return 0;
1465
1466         case PCI_D1:
1467                 power_control |= 1;
1468                 break;
1469
1470         case PCI_D2:
1471                 power_control |= 2;
1472                 break;
1473
1474         case PCI_D3hot:
1475                 power_control |= 3;
1476                 break;
1477
1478         default:
1479                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1480                        "requested.\n",
1481                        tp->dev->name, state);
1482                 return -EINVAL;
1483         };
1484
1485         power_control |= PCI_PM_CTRL_PME_ENABLE;
1486
1487         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1488         tw32(TG3PCI_MISC_HOST_CTRL,
1489              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1490
1491         if (tp->link_config.phy_is_low_power == 0) {
1492                 tp->link_config.phy_is_low_power = 1;
1493                 tp->link_config.orig_speed = tp->link_config.speed;
1494                 tp->link_config.orig_duplex = tp->link_config.duplex;
1495                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1496         }
1497
1498         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1499                 tp->link_config.speed = SPEED_10;
1500                 tp->link_config.duplex = DUPLEX_HALF;
1501                 tp->link_config.autoneg = AUTONEG_ENABLE;
1502                 tg3_setup_phy(tp, 0);
1503         }
1504
1505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1506                 u32 val;
1507
1508                 val = tr32(GRC_VCPU_EXT_CTRL);
1509                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1510         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1511                 int i;
1512                 u32 val;
1513
1514                 for (i = 0; i < 200; i++) {
1515                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1516                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1517                                 break;
1518                         msleep(1);
1519                 }
1520         }
1521         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1522                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1523                                                      WOL_DRV_STATE_SHUTDOWN |
1524                                                      WOL_DRV_WOL |
1525                                                      WOL_SET_MAGIC_PKT);
1526
1527         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1528
1529         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1530                 u32 mac_mode;
1531
1532                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1534                         udelay(40);
1535
1536                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1538                         else
1539                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1540
1541                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1543                             ASIC_REV_5700) {
1544                                 u32 speed = (tp->tg3_flags &
1545                                              TG3_FLAG_WOL_SPEED_100MB) ?
1546                                              SPEED_100 : SPEED_10;
1547                                 if (tg3_5700_link_polarity(tp, speed))
1548                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1549                                 else
1550                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1551                         }
1552                 } else {
1553                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1554                 }
1555
1556                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1557                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1558
1559                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1560                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1561                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1562
1563                 tw32_f(MAC_MODE, mac_mode);
1564                 udelay(100);
1565
1566                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1567                 udelay(10);
1568         }
1569
1570         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1571             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1573                 u32 base_val;
1574
1575                 base_val = tp->pci_clock_ctrl;
1576                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1577                              CLOCK_CTRL_TXCLK_DISABLE);
1578
1579                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1580                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1581         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1582                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1583                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1584                 /* do nothing */
1585         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1586                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1587                 u32 newbits1, newbits2;
1588
1589                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1590                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1591                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1592                                     CLOCK_CTRL_TXCLK_DISABLE |
1593                                     CLOCK_CTRL_ALTCLK);
1594                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1595                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1596                         newbits1 = CLOCK_CTRL_625_CORE;
1597                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1598                 } else {
1599                         newbits1 = CLOCK_CTRL_ALTCLK;
1600                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1601                 }
1602
1603                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1604                             40);
1605
1606                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1607                             40);
1608
1609                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1610                         u32 newbits3;
1611
1612                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1613                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1614                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1615                                             CLOCK_CTRL_TXCLK_DISABLE |
1616                                             CLOCK_CTRL_44MHZ_CORE);
1617                         } else {
1618                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1619                         }
1620
1621                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1622                                     tp->pci_clock_ctrl | newbits3, 40);
1623                 }
1624         }
1625
1626         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1627             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1628             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1629                 tg3_power_down_phy(tp);
1630
1631         tg3_frob_aux_power(tp);
1632
1633         /* Workaround for unstable PLL clock */
1634         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1635             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1636                 u32 val = tr32(0x7d00);
1637
1638                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1639                 tw32(0x7d00, val);
1640                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1641                         int err;
1642
1643                         err = tg3_nvram_lock(tp);
1644                         tg3_halt_cpu(tp, RX_CPU_BASE);
1645                         if (!err)
1646                                 tg3_nvram_unlock(tp);
1647                 }
1648         }
1649
1650         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1651
1652         /* Finally, set the new power state. */
1653         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1654         udelay(100);    /* Delay after power state change */
1655
1656         return 0;
1657 }
1658
1659 static void tg3_link_report(struct tg3 *tp)
1660 {
1661         if (!netif_carrier_ok(tp->dev)) {
1662                 if (netif_msg_link(tp))
1663                         printk(KERN_INFO PFX "%s: Link is down.\n",
1664                                tp->dev->name);
1665         } else if (netif_msg_link(tp)) {
1666                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1667                        tp->dev->name,
1668                        (tp->link_config.active_speed == SPEED_1000 ?
1669                         1000 :
1670                         (tp->link_config.active_speed == SPEED_100 ?
1671                          100 : 10)),
1672                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1673                         "full" : "half"));
1674
1675                 printk(KERN_INFO PFX
1676                        "%s: Flow control is %s for TX and %s for RX.\n",
1677                        tp->dev->name,
1678                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1679                        "on" : "off",
1680                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1681                        "on" : "off");
1682         }
1683 }
1684
1685 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1686 {
1687         u16 miireg;
1688
1689         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1690                 miireg = ADVERTISE_PAUSE_CAP;
1691         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1692                 miireg = ADVERTISE_PAUSE_ASYM;
1693         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1694                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1695         else
1696                 miireg = 0;
1697
1698         return miireg;
1699 }
1700
1701 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1702 {
1703         u16 miireg;
1704
1705         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1706                 miireg = ADVERTISE_1000XPAUSE;
1707         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1708                 miireg = ADVERTISE_1000XPSE_ASYM;
1709         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1710                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1711         else
1712                 miireg = 0;
1713
1714         return miireg;
1715 }
1716
1717 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1718 {
1719         u8 cap = 0;
1720
1721         if (lcladv & ADVERTISE_PAUSE_CAP) {
1722                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1723                         if (rmtadv & LPA_PAUSE_CAP)
1724                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1725                         else if (rmtadv & LPA_PAUSE_ASYM)
1726                                 cap = TG3_FLOW_CTRL_RX;
1727                 } else {
1728                         if (rmtadv & LPA_PAUSE_CAP)
1729                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1730                 }
1731         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1732                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1733                         cap = TG3_FLOW_CTRL_TX;
1734         }
1735
1736         return cap;
1737 }
1738
1739 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1740 {
1741         u8 cap = 0;
1742
1743         if (lcladv & ADVERTISE_1000XPAUSE) {
1744                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1745                         if (rmtadv & LPA_1000XPAUSE)
1746                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1747                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1748                                 cap = TG3_FLOW_CTRL_RX;
1749                 } else {
1750                         if (rmtadv & LPA_1000XPAUSE)
1751                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1752                 }
1753         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1754                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1755                         cap = TG3_FLOW_CTRL_TX;
1756         }
1757
1758         return cap;
1759 }
1760
1761 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1762 {
1763         u8 new_tg3_flags = 0;
1764         u32 old_rx_mode = tp->rx_mode;
1765         u32 old_tx_mode = tp->tx_mode;
1766
1767         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1768             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1769                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1770                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1771                                                                    remote_adv);
1772                 else
1773                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1774                                                                    remote_adv);
1775         } else {
1776                 new_tg3_flags = tp->link_config.flowctrl;
1777         }
1778
1779         tp->link_config.active_flowctrl = new_tg3_flags;
1780
1781         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1782                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1783         else
1784                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1785
1786         if (old_rx_mode != tp->rx_mode) {
1787                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1788         }
1789
1790         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1791                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1792         else
1793                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1794
1795         if (old_tx_mode != tp->tx_mode) {
1796                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1797         }
1798 }
1799
1800 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1801 {
1802         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1803         case MII_TG3_AUX_STAT_10HALF:
1804                 *speed = SPEED_10;
1805                 *duplex = DUPLEX_HALF;
1806                 break;
1807
1808         case MII_TG3_AUX_STAT_10FULL:
1809                 *speed = SPEED_10;
1810                 *duplex = DUPLEX_FULL;
1811                 break;
1812
1813         case MII_TG3_AUX_STAT_100HALF:
1814                 *speed = SPEED_100;
1815                 *duplex = DUPLEX_HALF;
1816                 break;
1817
1818         case MII_TG3_AUX_STAT_100FULL:
1819                 *speed = SPEED_100;
1820                 *duplex = DUPLEX_FULL;
1821                 break;
1822
1823         case MII_TG3_AUX_STAT_1000HALF:
1824                 *speed = SPEED_1000;
1825                 *duplex = DUPLEX_HALF;
1826                 break;
1827
1828         case MII_TG3_AUX_STAT_1000FULL:
1829                 *speed = SPEED_1000;
1830                 *duplex = DUPLEX_FULL;
1831                 break;
1832
1833         default:
1834                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1835                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1836                                  SPEED_10;
1837                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1838                                   DUPLEX_HALF;
1839                         break;
1840                 }
1841                 *speed = SPEED_INVALID;
1842                 *duplex = DUPLEX_INVALID;
1843                 break;
1844         };
1845 }
1846
1847 static void tg3_phy_copper_begin(struct tg3 *tp)
1848 {
1849         u32 new_adv;
1850         int i;
1851
1852         if (tp->link_config.phy_is_low_power) {
1853                 /* Entering low power mode.  Disable gigabit and
1854                  * 100baseT advertisements.
1855                  */
1856                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1857
1858                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1859                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1860                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1861                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1862
1863                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1864         } else if (tp->link_config.speed == SPEED_INVALID) {
1865                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1866                         tp->link_config.advertising &=
1867                                 ~(ADVERTISED_1000baseT_Half |
1868                                   ADVERTISED_1000baseT_Full);
1869
1870                 new_adv = ADVERTISE_CSMA;
1871                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1872                         new_adv |= ADVERTISE_10HALF;
1873                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1874                         new_adv |= ADVERTISE_10FULL;
1875                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1876                         new_adv |= ADVERTISE_100HALF;
1877                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1878                         new_adv |= ADVERTISE_100FULL;
1879
1880                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1881
1882                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1883
1884                 if (tp->link_config.advertising &
1885                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1886                         new_adv = 0;
1887                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1888                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1889                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1890                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1891                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1892                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1893                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1894                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1895                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1896                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1897                 } else {
1898                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1899                 }
1900         } else {
1901                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1902                 new_adv |= ADVERTISE_CSMA;
1903
1904                 /* Asking for a specific link mode. */
1905                 if (tp->link_config.speed == SPEED_1000) {
1906                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1907
1908                         if (tp->link_config.duplex == DUPLEX_FULL)
1909                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1910                         else
1911                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1912                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1913                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1914                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1915                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1916                 } else {
1917                         if (tp->link_config.speed == SPEED_100) {
1918                                 if (tp->link_config.duplex == DUPLEX_FULL)
1919                                         new_adv |= ADVERTISE_100FULL;
1920                                 else
1921                                         new_adv |= ADVERTISE_100HALF;
1922                         } else {
1923                                 if (tp->link_config.duplex == DUPLEX_FULL)
1924                                         new_adv |= ADVERTISE_10FULL;
1925                                 else
1926                                         new_adv |= ADVERTISE_10HALF;
1927                         }
1928                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1929
1930                         new_adv = 0;
1931                 }
1932
1933                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1934         }
1935
1936         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1937             tp->link_config.speed != SPEED_INVALID) {
1938                 u32 bmcr, orig_bmcr;
1939
1940                 tp->link_config.active_speed = tp->link_config.speed;
1941                 tp->link_config.active_duplex = tp->link_config.duplex;
1942
1943                 bmcr = 0;
1944                 switch (tp->link_config.speed) {
1945                 default:
1946                 case SPEED_10:
1947                         break;
1948
1949                 case SPEED_100:
1950                         bmcr |= BMCR_SPEED100;
1951                         break;
1952
1953                 case SPEED_1000:
1954                         bmcr |= TG3_BMCR_SPEED1000;
1955                         break;
1956                 };
1957
1958                 if (tp->link_config.duplex == DUPLEX_FULL)
1959                         bmcr |= BMCR_FULLDPLX;
1960
1961                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1962                     (bmcr != orig_bmcr)) {
1963                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1964                         for (i = 0; i < 1500; i++) {
1965                                 u32 tmp;
1966
1967                                 udelay(10);
1968                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1969                                     tg3_readphy(tp, MII_BMSR, &tmp))
1970                                         continue;
1971                                 if (!(tmp & BMSR_LSTATUS)) {
1972                                         udelay(40);
1973                                         break;
1974                                 }
1975                         }
1976                         tg3_writephy(tp, MII_BMCR, bmcr);
1977                         udelay(40);
1978                 }
1979         } else {
1980                 tg3_writephy(tp, MII_BMCR,
1981                              BMCR_ANENABLE | BMCR_ANRESTART);
1982         }
1983 }
1984
1985 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1986 {
1987         int err;
1988
1989         /* Turn off tap power management. */
1990         /* Set Extended packet length bit */
1991         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1992
1993         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1994         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1995
1996         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1997         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1998
1999         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2000         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2001
2002         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2003         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2004
2005         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2006         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2007
2008         udelay(40);
2009
2010         return err;
2011 }
2012
2013 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2014 {
2015         u32 adv_reg, all_mask = 0;
2016
2017         if (mask & ADVERTISED_10baseT_Half)
2018                 all_mask |= ADVERTISE_10HALF;
2019         if (mask & ADVERTISED_10baseT_Full)
2020                 all_mask |= ADVERTISE_10FULL;
2021         if (mask & ADVERTISED_100baseT_Half)
2022                 all_mask |= ADVERTISE_100HALF;
2023         if (mask & ADVERTISED_100baseT_Full)
2024                 all_mask |= ADVERTISE_100FULL;
2025
2026         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2027                 return 0;
2028
2029         if ((adv_reg & all_mask) != all_mask)
2030                 return 0;
2031         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2032                 u32 tg3_ctrl;
2033
2034                 all_mask = 0;
2035                 if (mask & ADVERTISED_1000baseT_Half)
2036                         all_mask |= ADVERTISE_1000HALF;
2037                 if (mask & ADVERTISED_1000baseT_Full)
2038                         all_mask |= ADVERTISE_1000FULL;
2039
2040                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2041                         return 0;
2042
2043                 if ((tg3_ctrl & all_mask) != all_mask)
2044                         return 0;
2045         }
2046         return 1;
2047 }
2048
2049 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2050 {
2051         u32 curadv, reqadv;
2052
2053         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2054                 return 1;
2055
2056         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2057         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2058
2059         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2060                 if (curadv != reqadv)
2061                         return 0;
2062
2063                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2064                         tg3_readphy(tp, MII_LPA, rmtadv);
2065         } else {
2066                 /* Reprogram the advertisement register, even if it
2067                  * does not affect the current link.  If the link
2068                  * gets renegotiated in the future, we can save an
2069                  * additional renegotiation cycle by advertising
2070                  * it correctly in the first place.
2071                  */
2072                 if (curadv != reqadv) {
2073                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2074                                      ADVERTISE_PAUSE_ASYM);
2075                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2076                 }
2077         }
2078
2079         return 1;
2080 }
2081
2082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2083 {
2084         int current_link_up;
2085         u32 bmsr, dummy;
2086         u32 lcl_adv, rmt_adv;
2087         u16 current_speed;
2088         u8 current_duplex;
2089         int i, err;
2090
2091         tw32(MAC_EVENT, 0);
2092
2093         tw32_f(MAC_STATUS,
2094              (MAC_STATUS_SYNC_CHANGED |
2095               MAC_STATUS_CFG_CHANGED |
2096               MAC_STATUS_MI_COMPLETION |
2097               MAC_STATUS_LNKSTATE_CHANGED));
2098         udelay(40);
2099
2100         tp->mi_mode = MAC_MI_MODE_BASE;
2101         tw32_f(MAC_MI_MODE, tp->mi_mode);
2102         udelay(80);
2103
2104         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2105
2106         /* Some third-party PHYs need to be reset on link going
2107          * down.
2108          */
2109         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2110              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2111              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2112             netif_carrier_ok(tp->dev)) {
2113                 tg3_readphy(tp, MII_BMSR, &bmsr);
2114                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2115                     !(bmsr & BMSR_LSTATUS))
2116                         force_reset = 1;
2117         }
2118         if (force_reset)
2119                 tg3_phy_reset(tp);
2120
2121         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2122                 tg3_readphy(tp, MII_BMSR, &bmsr);
2123                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2124                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2125                         bmsr = 0;
2126
2127                 if (!(bmsr & BMSR_LSTATUS)) {
2128                         err = tg3_init_5401phy_dsp(tp);
2129                         if (err)
2130                                 return err;
2131
2132                         tg3_readphy(tp, MII_BMSR, &bmsr);
2133                         for (i = 0; i < 1000; i++) {
2134                                 udelay(10);
2135                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2136                                     (bmsr & BMSR_LSTATUS)) {
2137                                         udelay(40);
2138                                         break;
2139                                 }
2140                         }
2141
2142                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2143                             !(bmsr & BMSR_LSTATUS) &&
2144                             tp->link_config.active_speed == SPEED_1000) {
2145                                 err = tg3_phy_reset(tp);
2146                                 if (!err)
2147                                         err = tg3_init_5401phy_dsp(tp);
2148                                 if (err)
2149                                         return err;
2150                         }
2151                 }
2152         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2153                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2154                 /* 5701 {A0,B0} CRC bug workaround */
2155                 tg3_writephy(tp, 0x15, 0x0a75);
2156                 tg3_writephy(tp, 0x1c, 0x8c68);
2157                 tg3_writephy(tp, 0x1c, 0x8d68);
2158                 tg3_writephy(tp, 0x1c, 0x8c68);
2159         }
2160
2161         /* Clear pending interrupts... */
2162         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2163         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2164
2165         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2166                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2167         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2168                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2169
2170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2172                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2173                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2174                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2175                 else
2176                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2177         }
2178
2179         current_link_up = 0;
2180         current_speed = SPEED_INVALID;
2181         current_duplex = DUPLEX_INVALID;
2182
2183         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2184                 u32 val;
2185
2186                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2187                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2188                 if (!(val & (1 << 10))) {
2189                         val |= (1 << 10);
2190                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2191                         goto relink;
2192                 }
2193         }
2194
2195         bmsr = 0;
2196         for (i = 0; i < 100; i++) {
2197                 tg3_readphy(tp, MII_BMSR, &bmsr);
2198                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2199                     (bmsr & BMSR_LSTATUS))
2200                         break;
2201                 udelay(40);
2202         }
2203
2204         if (bmsr & BMSR_LSTATUS) {
2205                 u32 aux_stat, bmcr;
2206
2207                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2208                 for (i = 0; i < 2000; i++) {
2209                         udelay(10);
2210                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2211                             aux_stat)
2212                                 break;
2213                 }
2214
2215                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2216                                              &current_speed,
2217                                              &current_duplex);
2218
2219                 bmcr = 0;
2220                 for (i = 0; i < 200; i++) {
2221                         tg3_readphy(tp, MII_BMCR, &bmcr);
2222                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2223                                 continue;
2224                         if (bmcr && bmcr != 0x7fff)
2225                                 break;
2226                         udelay(10);
2227                 }
2228
2229                 lcl_adv = 0;
2230                 rmt_adv = 0;
2231
2232                 tp->link_config.active_speed = current_speed;
2233                 tp->link_config.active_duplex = current_duplex;
2234
2235                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2236                         if ((bmcr & BMCR_ANENABLE) &&
2237                             tg3_copper_is_advertising_all(tp,
2238                                                 tp->link_config.advertising)) {
2239                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2240                                                                   &rmt_adv))
2241                                         current_link_up = 1;
2242                         }
2243                 } else {
2244                         if (!(bmcr & BMCR_ANENABLE) &&
2245                             tp->link_config.speed == current_speed &&
2246                             tp->link_config.duplex == current_duplex &&
2247                             tp->link_config.flowctrl ==
2248                             tp->link_config.active_flowctrl) {
2249                                 current_link_up = 1;
2250                         }
2251                 }
2252
2253                 if (current_link_up == 1 &&
2254                     tp->link_config.active_duplex == DUPLEX_FULL)
2255                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2256         }
2257
2258 relink:
2259         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2260                 u32 tmp;
2261
2262                 tg3_phy_copper_begin(tp);
2263
2264                 tg3_readphy(tp, MII_BMSR, &tmp);
2265                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2266                     (tmp & BMSR_LSTATUS))
2267                         current_link_up = 1;
2268         }
2269
2270         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2271         if (current_link_up == 1) {
2272                 if (tp->link_config.active_speed == SPEED_100 ||
2273                     tp->link_config.active_speed == SPEED_10)
2274                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2275                 else
2276                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2277         } else
2278                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2279
2280         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2281         if (tp->link_config.active_duplex == DUPLEX_HALF)
2282                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2283
2284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2285                 if (current_link_up == 1 &&
2286                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2287                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2288                 else
2289                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2290         }
2291
2292         /* ??? Without this setting Netgear GA302T PHY does not
2293          * ??? send/receive packets...
2294          */
2295         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2296             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2297                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2298                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2299                 udelay(80);
2300         }
2301
2302         tw32_f(MAC_MODE, tp->mac_mode);
2303         udelay(40);
2304
2305         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2306                 /* Polled via timer. */
2307                 tw32_f(MAC_EVENT, 0);
2308         } else {
2309                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2310         }
2311         udelay(40);
2312
2313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2314             current_link_up == 1 &&
2315             tp->link_config.active_speed == SPEED_1000 &&
2316             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2317              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2318                 udelay(120);
2319                 tw32_f(MAC_STATUS,
2320                      (MAC_STATUS_SYNC_CHANGED |
2321                       MAC_STATUS_CFG_CHANGED));
2322                 udelay(40);
2323                 tg3_write_mem(tp,
2324                               NIC_SRAM_FIRMWARE_MBOX,
2325                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2326         }
2327
2328         if (current_link_up != netif_carrier_ok(tp->dev)) {
2329                 if (current_link_up)
2330                         netif_carrier_on(tp->dev);
2331                 else
2332                         netif_carrier_off(tp->dev);
2333                 tg3_link_report(tp);
2334         }
2335
2336         return 0;
2337 }
2338
2339 struct tg3_fiber_aneginfo {
2340         int state;
2341 #define ANEG_STATE_UNKNOWN              0
2342 #define ANEG_STATE_AN_ENABLE            1
2343 #define ANEG_STATE_RESTART_INIT         2
2344 #define ANEG_STATE_RESTART              3
2345 #define ANEG_STATE_DISABLE_LINK_OK      4
2346 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2347 #define ANEG_STATE_ABILITY_DETECT       6
2348 #define ANEG_STATE_ACK_DETECT_INIT      7
2349 #define ANEG_STATE_ACK_DETECT           8
2350 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2351 #define ANEG_STATE_COMPLETE_ACK         10
2352 #define ANEG_STATE_IDLE_DETECT_INIT     11
2353 #define ANEG_STATE_IDLE_DETECT          12
2354 #define ANEG_STATE_LINK_OK              13
2355 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2356 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2357
2358         u32 flags;
2359 #define MR_AN_ENABLE            0x00000001
2360 #define MR_RESTART_AN           0x00000002
2361 #define MR_AN_COMPLETE          0x00000004
2362 #define MR_PAGE_RX              0x00000008
2363 #define MR_NP_LOADED            0x00000010
2364 #define MR_TOGGLE_TX            0x00000020
2365 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2366 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2367 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2368 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2369 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2370 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2371 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2372 #define MR_TOGGLE_RX            0x00002000
2373 #define MR_NP_RX                0x00004000
2374
2375 #define MR_LINK_OK              0x80000000
2376
2377         unsigned long link_time, cur_time;
2378
2379         u32 ability_match_cfg;
2380         int ability_match_count;
2381
2382         char ability_match, idle_match, ack_match;
2383
2384         u32 txconfig, rxconfig;
2385 #define ANEG_CFG_NP             0x00000080
2386 #define ANEG_CFG_ACK            0x00000040
2387 #define ANEG_CFG_RF2            0x00000020
2388 #define ANEG_CFG_RF1            0x00000010
2389 #define ANEG_CFG_PS2            0x00000001
2390 #define ANEG_CFG_PS1            0x00008000
2391 #define ANEG_CFG_HD             0x00004000
2392 #define ANEG_CFG_FD             0x00002000
2393 #define ANEG_CFG_INVAL          0x00001f06
2394
2395 };
2396 #define ANEG_OK         0
2397 #define ANEG_DONE       1
2398 #define ANEG_TIMER_ENAB 2
2399 #define ANEG_FAILED     -1
2400
2401 #define ANEG_STATE_SETTLE_TIME  10000
2402
2403 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2404                                    struct tg3_fiber_aneginfo *ap)
2405 {
2406         u16 flowctrl;
2407         unsigned long delta;
2408         u32 rx_cfg_reg;
2409         int ret;
2410
2411         if (ap->state == ANEG_STATE_UNKNOWN) {
2412                 ap->rxconfig = 0;
2413                 ap->link_time = 0;
2414                 ap->cur_time = 0;
2415                 ap->ability_match_cfg = 0;
2416                 ap->ability_match_count = 0;
2417                 ap->ability_match = 0;
2418                 ap->idle_match = 0;
2419                 ap->ack_match = 0;
2420         }
2421         ap->cur_time++;
2422
2423         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2424                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2425
2426                 if (rx_cfg_reg != ap->ability_match_cfg) {
2427                         ap->ability_match_cfg = rx_cfg_reg;
2428                         ap->ability_match = 0;
2429                         ap->ability_match_count = 0;
2430                 } else {
2431                         if (++ap->ability_match_count > 1) {
2432                                 ap->ability_match = 1;
2433                                 ap->ability_match_cfg = rx_cfg_reg;
2434                         }
2435                 }
2436                 if (rx_cfg_reg & ANEG_CFG_ACK)
2437                         ap->ack_match = 1;
2438                 else
2439                         ap->ack_match = 0;
2440
2441                 ap->idle_match = 0;
2442         } else {
2443                 ap->idle_match = 1;
2444                 ap->ability_match_cfg = 0;
2445                 ap->ability_match_count = 0;
2446                 ap->ability_match = 0;
2447                 ap->ack_match = 0;
2448
2449                 rx_cfg_reg = 0;
2450         }
2451
2452         ap->rxconfig = rx_cfg_reg;
2453         ret = ANEG_OK;
2454
2455         switch(ap->state) {
2456         case ANEG_STATE_UNKNOWN:
2457                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459
2460                 /* fallthru */
2461         case ANEG_STATE_AN_ENABLE:
2462                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2463                 if (ap->flags & MR_AN_ENABLE) {
2464                         ap->link_time = 0;
2465                         ap->cur_time = 0;
2466                         ap->ability_match_cfg = 0;
2467                         ap->ability_match_count = 0;
2468                         ap->ability_match = 0;
2469                         ap->idle_match = 0;
2470                         ap->ack_match = 0;
2471
2472                         ap->state = ANEG_STATE_RESTART_INIT;
2473                 } else {
2474                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2475                 }
2476                 break;
2477
2478         case ANEG_STATE_RESTART_INIT:
2479                 ap->link_time = ap->cur_time;
2480                 ap->flags &= ~(MR_NP_LOADED);
2481                 ap->txconfig = 0;
2482                 tw32(MAC_TX_AUTO_NEG, 0);
2483                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2484                 tw32_f(MAC_MODE, tp->mac_mode);
2485                 udelay(40);
2486
2487                 ret = ANEG_TIMER_ENAB;
2488                 ap->state = ANEG_STATE_RESTART;
2489
2490                 /* fallthru */
2491         case ANEG_STATE_RESTART:
2492                 delta = ap->cur_time - ap->link_time;
2493                 if (delta > ANEG_STATE_SETTLE_TIME) {
2494                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2495                 } else {
2496                         ret = ANEG_TIMER_ENAB;
2497                 }
2498                 break;
2499
2500         case ANEG_STATE_DISABLE_LINK_OK:
2501                 ret = ANEG_DONE;
2502                 break;
2503
2504         case ANEG_STATE_ABILITY_DETECT_INIT:
2505                 ap->flags &= ~(MR_TOGGLE_TX);
2506                 ap->txconfig = ANEG_CFG_FD;
2507                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2508                 if (flowctrl & ADVERTISE_1000XPAUSE)
2509                         ap->txconfig |= ANEG_CFG_PS1;
2510                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2511                         ap->txconfig |= ANEG_CFG_PS2;
2512                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2513                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2514                 tw32_f(MAC_MODE, tp->mac_mode);
2515                 udelay(40);
2516
2517                 ap->state = ANEG_STATE_ABILITY_DETECT;
2518                 break;
2519
2520         case ANEG_STATE_ABILITY_DETECT:
2521                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2522                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2523                 }
2524                 break;
2525
2526         case ANEG_STATE_ACK_DETECT_INIT:
2527                 ap->txconfig |= ANEG_CFG_ACK;
2528                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2529                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2530                 tw32_f(MAC_MODE, tp->mac_mode);
2531                 udelay(40);
2532
2533                 ap->state = ANEG_STATE_ACK_DETECT;
2534
2535                 /* fallthru */
2536         case ANEG_STATE_ACK_DETECT:
2537                 if (ap->ack_match != 0) {
2538                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2539                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2540                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2541                         } else {
2542                                 ap->state = ANEG_STATE_AN_ENABLE;
2543                         }
2544                 } else if (ap->ability_match != 0 &&
2545                            ap->rxconfig == 0) {
2546                         ap->state = ANEG_STATE_AN_ENABLE;
2547                 }
2548                 break;
2549
2550         case ANEG_STATE_COMPLETE_ACK_INIT:
2551                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2552                         ret = ANEG_FAILED;
2553                         break;
2554                 }
2555                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2556                                MR_LP_ADV_HALF_DUPLEX |
2557                                MR_LP_ADV_SYM_PAUSE |
2558                                MR_LP_ADV_ASYM_PAUSE |
2559                                MR_LP_ADV_REMOTE_FAULT1 |
2560                                MR_LP_ADV_REMOTE_FAULT2 |
2561                                MR_LP_ADV_NEXT_PAGE |
2562                                MR_TOGGLE_RX |
2563                                MR_NP_RX);
2564                 if (ap->rxconfig & ANEG_CFG_FD)
2565                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2566                 if (ap->rxconfig & ANEG_CFG_HD)
2567                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2568                 if (ap->rxconfig & ANEG_CFG_PS1)
2569                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2570                 if (ap->rxconfig & ANEG_CFG_PS2)
2571                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2572                 if (ap->rxconfig & ANEG_CFG_RF1)
2573                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2574                 if (ap->rxconfig & ANEG_CFG_RF2)
2575                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2576                 if (ap->rxconfig & ANEG_CFG_NP)
2577                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2578
2579                 ap->link_time = ap->cur_time;
2580
2581                 ap->flags ^= (MR_TOGGLE_TX);
2582                 if (ap->rxconfig & 0x0008)
2583                         ap->flags |= MR_TOGGLE_RX;
2584                 if (ap->rxconfig & ANEG_CFG_NP)
2585                         ap->flags |= MR_NP_RX;
2586                 ap->flags |= MR_PAGE_RX;
2587
2588                 ap->state = ANEG_STATE_COMPLETE_ACK;
2589                 ret = ANEG_TIMER_ENAB;
2590                 break;
2591
2592         case ANEG_STATE_COMPLETE_ACK:
2593                 if (ap->ability_match != 0 &&
2594                     ap->rxconfig == 0) {
2595                         ap->state = ANEG_STATE_AN_ENABLE;
2596                         break;
2597                 }
2598                 delta = ap->cur_time - ap->link_time;
2599                 if (delta > ANEG_STATE_SETTLE_TIME) {
2600                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2601                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2602                         } else {
2603                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2604                                     !(ap->flags & MR_NP_RX)) {
2605                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2606                                 } else {
2607                                         ret = ANEG_FAILED;
2608                                 }
2609                         }
2610                 }
2611                 break;
2612
2613         case ANEG_STATE_IDLE_DETECT_INIT:
2614                 ap->link_time = ap->cur_time;
2615                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2616                 tw32_f(MAC_MODE, tp->mac_mode);
2617                 udelay(40);
2618
2619                 ap->state = ANEG_STATE_IDLE_DETECT;
2620                 ret = ANEG_TIMER_ENAB;
2621                 break;
2622
2623         case ANEG_STATE_IDLE_DETECT:
2624                 if (ap->ability_match != 0 &&
2625                     ap->rxconfig == 0) {
2626                         ap->state = ANEG_STATE_AN_ENABLE;
2627                         break;
2628                 }
2629                 delta = ap->cur_time - ap->link_time;
2630                 if (delta > ANEG_STATE_SETTLE_TIME) {
2631                         /* XXX another gem from the Broadcom driver :( */
2632                         ap->state = ANEG_STATE_LINK_OK;
2633                 }
2634                 break;
2635
2636         case ANEG_STATE_LINK_OK:
2637                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2638                 ret = ANEG_DONE;
2639                 break;
2640
2641         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2642                 /* ??? unimplemented */
2643                 break;
2644
2645         case ANEG_STATE_NEXT_PAGE_WAIT:
2646                 /* ??? unimplemented */
2647                 break;
2648
2649         default:
2650                 ret = ANEG_FAILED;
2651                 break;
2652         };
2653
2654         return ret;
2655 }
2656
2657 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2658 {
2659         int res = 0;
2660         struct tg3_fiber_aneginfo aninfo;
2661         int status = ANEG_FAILED;
2662         unsigned int tick;
2663         u32 tmp;
2664
2665         tw32_f(MAC_TX_AUTO_NEG, 0);
2666
2667         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2668         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2669         udelay(40);
2670
2671         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2672         udelay(40);
2673
2674         memset(&aninfo, 0, sizeof(aninfo));
2675         aninfo.flags |= MR_AN_ENABLE;
2676         aninfo.state = ANEG_STATE_UNKNOWN;
2677         aninfo.cur_time = 0;
2678         tick = 0;
2679         while (++tick < 195000) {
2680                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2681                 if (status == ANEG_DONE || status == ANEG_FAILED)
2682                         break;
2683
2684                 udelay(1);
2685         }
2686
2687         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2688         tw32_f(MAC_MODE, tp->mac_mode);
2689         udelay(40);
2690
2691         *txflags = aninfo.txconfig;
2692         *rxflags = aninfo.flags;
2693
2694         if (status == ANEG_DONE &&
2695             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2696                              MR_LP_ADV_FULL_DUPLEX)))
2697                 res = 1;
2698
2699         return res;
2700 }
2701
2702 static void tg3_init_bcm8002(struct tg3 *tp)
2703 {
2704         u32 mac_status = tr32(MAC_STATUS);
2705         int i;
2706
2707         /* Reset when initting first time or we have a link. */
2708         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2709             !(mac_status & MAC_STATUS_PCS_SYNCED))
2710                 return;
2711
2712         /* Set PLL lock range. */
2713         tg3_writephy(tp, 0x16, 0x8007);
2714
2715         /* SW reset */
2716         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2717
2718         /* Wait for reset to complete. */
2719         /* XXX schedule_timeout() ... */
2720         for (i = 0; i < 500; i++)
2721                 udelay(10);
2722
2723         /* Config mode; select PMA/Ch 1 regs. */
2724         tg3_writephy(tp, 0x10, 0x8411);
2725
2726         /* Enable auto-lock and comdet, select txclk for tx. */
2727         tg3_writephy(tp, 0x11, 0x0a10);
2728
2729         tg3_writephy(tp, 0x18, 0x00a0);
2730         tg3_writephy(tp, 0x16, 0x41ff);
2731
2732         /* Assert and deassert POR. */
2733         tg3_writephy(tp, 0x13, 0x0400);
2734         udelay(40);
2735         tg3_writephy(tp, 0x13, 0x0000);
2736
2737         tg3_writephy(tp, 0x11, 0x0a50);
2738         udelay(40);
2739         tg3_writephy(tp, 0x11, 0x0a10);
2740
2741         /* Wait for signal to stabilize */
2742         /* XXX schedule_timeout() ... */
2743         for (i = 0; i < 15000; i++)
2744                 udelay(10);
2745
2746         /* Deselect the channel register so we can read the PHYID
2747          * later.
2748          */
2749         tg3_writephy(tp, 0x10, 0x8011);
2750 }
2751
2752 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2753 {
2754         u16 flowctrl;
2755         u32 sg_dig_ctrl, sg_dig_status;
2756         u32 serdes_cfg, expected_sg_dig_ctrl;
2757         int workaround, port_a;
2758         int current_link_up;
2759
2760         serdes_cfg = 0;
2761         expected_sg_dig_ctrl = 0;
2762         workaround = 0;
2763         port_a = 1;
2764         current_link_up = 0;
2765
2766         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2767             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2768                 workaround = 1;
2769                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2770                         port_a = 0;
2771
2772                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2773                 /* preserve bits 20-23 for voltage regulator */
2774                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2775         }
2776
2777         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2778
2779         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2780                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2781                         if (workaround) {
2782                                 u32 val = serdes_cfg;
2783
2784                                 if (port_a)
2785                                         val |= 0xc010000;
2786                                 else
2787                                         val |= 0x4010000;
2788                                 tw32_f(MAC_SERDES_CFG, val);
2789                         }
2790
2791                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2792                 }
2793                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2794                         tg3_setup_flow_control(tp, 0, 0);
2795                         current_link_up = 1;
2796                 }
2797                 goto out;
2798         }
2799
2800         /* Want auto-negotiation.  */
2801         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2802
2803         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2804         if (flowctrl & ADVERTISE_1000XPAUSE)
2805                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2806         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2807                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2808
2809         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2810                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2811                     tp->serdes_counter &&
2812                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2813                                     MAC_STATUS_RCVD_CFG)) ==
2814                      MAC_STATUS_PCS_SYNCED)) {
2815                         tp->serdes_counter--;
2816                         current_link_up = 1;
2817                         goto out;
2818                 }
2819 restart_autoneg:
2820                 if (workaround)
2821                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2822                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2823                 udelay(5);
2824                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2825
2826                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2827                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2828         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2829                                  MAC_STATUS_SIGNAL_DET)) {
2830                 sg_dig_status = tr32(SG_DIG_STATUS);
2831                 mac_status = tr32(MAC_STATUS);
2832
2833                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2834                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2835                         u32 local_adv = 0, remote_adv = 0;
2836
2837                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2838                                 local_adv |= ADVERTISE_1000XPAUSE;
2839                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2840                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2841
2842                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2843                                 remote_adv |= LPA_1000XPAUSE;
2844                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2845                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2846
2847                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2848                         current_link_up = 1;
2849                         tp->serdes_counter = 0;
2850                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2851                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2852                         if (tp->serdes_counter)
2853                                 tp->serdes_counter--;
2854                         else {
2855                                 if (workaround) {
2856                                         u32 val = serdes_cfg;
2857
2858                                         if (port_a)
2859                                                 val |= 0xc010000;
2860                                         else
2861                                                 val |= 0x4010000;
2862
2863                                         tw32_f(MAC_SERDES_CFG, val);
2864                                 }
2865
2866                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2867                                 udelay(40);
2868
2869                                 /* Link parallel detection - link is up */
2870                                 /* only if we have PCS_SYNC and not */
2871                                 /* receiving config code words */
2872                                 mac_status = tr32(MAC_STATUS);
2873                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2874                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2875                                         tg3_setup_flow_control(tp, 0, 0);
2876                                         current_link_up = 1;
2877                                         tp->tg3_flags2 |=
2878                                                 TG3_FLG2_PARALLEL_DETECT;
2879                                         tp->serdes_counter =
2880                                                 SERDES_PARALLEL_DET_TIMEOUT;
2881                                 } else
2882                                         goto restart_autoneg;
2883                         }
2884                 }
2885         } else {
2886                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2887                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888         }
2889
2890 out:
2891         return current_link_up;
2892 }
2893
2894 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2895 {
2896         int current_link_up = 0;
2897
2898         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2899                 goto out;
2900
2901         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2902                 u32 txflags, rxflags;
2903                 int i;
2904
2905                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2906                         u32 local_adv = 0, remote_adv = 0;
2907
2908                         if (txflags & ANEG_CFG_PS1)
2909                                 local_adv |= ADVERTISE_1000XPAUSE;
2910                         if (txflags & ANEG_CFG_PS2)
2911                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2912
2913                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2914                                 remote_adv |= LPA_1000XPAUSE;
2915                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2916                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2917
2918                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2919
2920                         current_link_up = 1;
2921                 }
2922                 for (i = 0; i < 30; i++) {
2923                         udelay(20);
2924                         tw32_f(MAC_STATUS,
2925                                (MAC_STATUS_SYNC_CHANGED |
2926                                 MAC_STATUS_CFG_CHANGED));
2927                         udelay(40);
2928                         if ((tr32(MAC_STATUS) &
2929                              (MAC_STATUS_SYNC_CHANGED |
2930                               MAC_STATUS_CFG_CHANGED)) == 0)
2931                                 break;
2932                 }
2933
2934                 mac_status = tr32(MAC_STATUS);
2935                 if (current_link_up == 0 &&
2936                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2937                     !(mac_status & MAC_STATUS_RCVD_CFG))
2938                         current_link_up = 1;
2939         } else {
2940                 tg3_setup_flow_control(tp, 0, 0);
2941
2942                 /* Forcing 1000FD link up. */
2943                 current_link_up = 1;
2944
2945                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2946                 udelay(40);
2947
2948                 tw32_f(MAC_MODE, tp->mac_mode);
2949                 udelay(40);
2950         }
2951
2952 out:
2953         return current_link_up;
2954 }
2955
2956 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2957 {
2958         u32 orig_pause_cfg;
2959         u16 orig_active_speed;
2960         u8 orig_active_duplex;
2961         u32 mac_status;
2962         int current_link_up;
2963         int i;
2964
2965         orig_pause_cfg = tp->link_config.active_flowctrl;
2966         orig_active_speed = tp->link_config.active_speed;
2967         orig_active_duplex = tp->link_config.active_duplex;
2968
2969         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2970             netif_carrier_ok(tp->dev) &&
2971             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2972                 mac_status = tr32(MAC_STATUS);
2973                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2974                                MAC_STATUS_SIGNAL_DET |
2975                                MAC_STATUS_CFG_CHANGED |
2976                                MAC_STATUS_RCVD_CFG);
2977                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2978                                    MAC_STATUS_SIGNAL_DET)) {
2979                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2980                                             MAC_STATUS_CFG_CHANGED));
2981                         return 0;
2982                 }
2983         }
2984
2985         tw32_f(MAC_TX_AUTO_NEG, 0);
2986
2987         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2988         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2989         tw32_f(MAC_MODE, tp->mac_mode);
2990         udelay(40);
2991
2992         if (tp->phy_id == PHY_ID_BCM8002)
2993                 tg3_init_bcm8002(tp);
2994
2995         /* Enable link change event even when serdes polling.  */
2996         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2997         udelay(40);
2998
2999         current_link_up = 0;
3000         mac_status = tr32(MAC_STATUS);
3001
3002         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3003                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3004         else
3005                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3006
3007         tp->hw_status->status =
3008                 (SD_STATUS_UPDATED |
3009                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3010
3011         for (i = 0; i < 100; i++) {
3012                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3013                                     MAC_STATUS_CFG_CHANGED));
3014                 udelay(5);
3015                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3016                                          MAC_STATUS_CFG_CHANGED |
3017                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3018                         break;
3019         }
3020
3021         mac_status = tr32(MAC_STATUS);
3022         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3023                 current_link_up = 0;
3024                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3025                     tp->serdes_counter == 0) {
3026                         tw32_f(MAC_MODE, (tp->mac_mode |
3027                                           MAC_MODE_SEND_CONFIGS));
3028                         udelay(1);
3029                         tw32_f(MAC_MODE, tp->mac_mode);
3030                 }
3031         }
3032
3033         if (current_link_up == 1) {
3034                 tp->link_config.active_speed = SPEED_1000;
3035                 tp->link_config.active_duplex = DUPLEX_FULL;
3036                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3037                                     LED_CTRL_LNKLED_OVERRIDE |
3038                                     LED_CTRL_1000MBPS_ON));
3039         } else {
3040                 tp->link_config.active_speed = SPEED_INVALID;
3041                 tp->link_config.active_duplex = DUPLEX_INVALID;
3042                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3043                                     LED_CTRL_LNKLED_OVERRIDE |
3044                                     LED_CTRL_TRAFFIC_OVERRIDE));
3045         }
3046
3047         if (current_link_up != netif_carrier_ok(tp->dev)) {
3048                 if (current_link_up)
3049                         netif_carrier_on(tp->dev);
3050                 else
3051                         netif_carrier_off(tp->dev);
3052                 tg3_link_report(tp);
3053         } else {
3054                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3055                 if (orig_pause_cfg != now_pause_cfg ||
3056                     orig_active_speed != tp->link_config.active_speed ||
3057                     orig_active_duplex != tp->link_config.active_duplex)
3058                         tg3_link_report(tp);
3059         }
3060
3061         return 0;
3062 }
3063
3064 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3065 {
3066         int current_link_up, err = 0;
3067         u32 bmsr, bmcr;
3068         u16 current_speed;
3069         u8 current_duplex;
3070         u32 local_adv, remote_adv;
3071
3072         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3073         tw32_f(MAC_MODE, tp->mac_mode);
3074         udelay(40);
3075
3076         tw32(MAC_EVENT, 0);
3077
3078         tw32_f(MAC_STATUS,
3079              (MAC_STATUS_SYNC_CHANGED |
3080               MAC_STATUS_CFG_CHANGED |
3081               MAC_STATUS_MI_COMPLETION |
3082               MAC_STATUS_LNKSTATE_CHANGED));
3083         udelay(40);
3084
3085         if (force_reset)
3086                 tg3_phy_reset(tp);
3087
3088         current_link_up = 0;
3089         current_speed = SPEED_INVALID;
3090         current_duplex = DUPLEX_INVALID;
3091
3092         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3093         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3095                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3096                         bmsr |= BMSR_LSTATUS;
3097                 else
3098                         bmsr &= ~BMSR_LSTATUS;
3099         }
3100
3101         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3102
3103         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3104             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3105              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3106                 /* do nothing, just check for link up at the end */
3107         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3108                 u32 adv, new_adv;
3109
3110                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3111                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3112                                   ADVERTISE_1000XPAUSE |
3113                                   ADVERTISE_1000XPSE_ASYM |
3114                                   ADVERTISE_SLCT);
3115
3116                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3117
3118                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3119                         new_adv |= ADVERTISE_1000XHALF;
3120                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3121                         new_adv |= ADVERTISE_1000XFULL;
3122
3123                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3124                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3125                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3126                         tg3_writephy(tp, MII_BMCR, bmcr);
3127
3128                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3129                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3130                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3131
3132                         return err;
3133                 }
3134         } else {
3135                 u32 new_bmcr;
3136
3137                 bmcr &= ~BMCR_SPEED1000;
3138                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3139
3140                 if (tp->link_config.duplex == DUPLEX_FULL)
3141                         new_bmcr |= BMCR_FULLDPLX;
3142
3143                 if (new_bmcr != bmcr) {
3144                         /* BMCR_SPEED1000 is a reserved bit that needs
3145                          * to be set on write.
3146                          */
3147                         new_bmcr |= BMCR_SPEED1000;
3148
3149                         /* Force a linkdown */
3150                         if (netif_carrier_ok(tp->dev)) {
3151                                 u32 adv;
3152
3153                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3154                                 adv &= ~(ADVERTISE_1000XFULL |
3155                                          ADVERTISE_1000XHALF |
3156                                          ADVERTISE_SLCT);
3157                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3158                                 tg3_writephy(tp, MII_BMCR, bmcr |
3159                                                            BMCR_ANRESTART |
3160                                                            BMCR_ANENABLE);
3161                                 udelay(10);
3162                                 netif_carrier_off(tp->dev);
3163                         }
3164                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3165                         bmcr = new_bmcr;
3166                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3167                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3168                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3169                             ASIC_REV_5714) {
3170                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3171                                         bmsr |= BMSR_LSTATUS;
3172                                 else
3173                                         bmsr &= ~BMSR_LSTATUS;
3174                         }
3175                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3176                 }
3177         }
3178
3179         if (bmsr & BMSR_LSTATUS) {
3180                 current_speed = SPEED_1000;
3181                 current_link_up = 1;
3182                 if (bmcr & BMCR_FULLDPLX)
3183                         current_duplex = DUPLEX_FULL;
3184                 else
3185                         current_duplex = DUPLEX_HALF;
3186
3187                 local_adv = 0;
3188                 remote_adv = 0;
3189
3190                 if (bmcr & BMCR_ANENABLE) {
3191                         u32 common;
3192
3193                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3194                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3195                         common = local_adv & remote_adv;
3196                         if (common & (ADVERTISE_1000XHALF |
3197                                       ADVERTISE_1000XFULL)) {
3198                                 if (common & ADVERTISE_1000XFULL)
3199                                         current_duplex = DUPLEX_FULL;
3200                                 else
3201                                         current_duplex = DUPLEX_HALF;
3202                         }
3203                         else
3204                                 current_link_up = 0;
3205                 }
3206         }
3207
3208         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3209                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3210
3211         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3212         if (tp->link_config.active_duplex == DUPLEX_HALF)
3213                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3214
3215         tw32_f(MAC_MODE, tp->mac_mode);
3216         udelay(40);
3217
3218         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3219
3220         tp->link_config.active_speed = current_speed;
3221         tp->link_config.active_duplex = current_duplex;
3222
3223         if (current_link_up != netif_carrier_ok(tp->dev)) {
3224                 if (current_link_up)
3225                         netif_carrier_on(tp->dev);
3226                 else {
3227                         netif_carrier_off(tp->dev);
3228                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3229                 }
3230                 tg3_link_report(tp);
3231         }
3232         return err;
3233 }
3234
3235 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3236 {
3237         if (tp->serdes_counter) {
3238                 /* Give autoneg time to complete. */
3239                 tp->serdes_counter--;
3240                 return;
3241         }
3242         if (!netif_carrier_ok(tp->dev) &&
3243             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3244                 u32 bmcr;
3245
3246                 tg3_readphy(tp, MII_BMCR, &bmcr);
3247                 if (bmcr & BMCR_ANENABLE) {
3248                         u32 phy1, phy2;
3249
3250                         /* Select shadow register 0x1f */
3251                         tg3_writephy(tp, 0x1c, 0x7c00);
3252                         tg3_readphy(tp, 0x1c, &phy1);
3253
3254                         /* Select expansion interrupt status register */
3255                         tg3_writephy(tp, 0x17, 0x0f01);
3256                         tg3_readphy(tp, 0x15, &phy2);
3257                         tg3_readphy(tp, 0x15, &phy2);
3258
3259                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3260                                 /* We have signal detect and not receiving
3261                                  * config code words, link is up by parallel
3262                                  * detection.
3263                                  */
3264
3265                                 bmcr &= ~BMCR_ANENABLE;
3266                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3267                                 tg3_writephy(tp, MII_BMCR, bmcr);
3268                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3269                         }
3270                 }
3271         }
3272         else if (netif_carrier_ok(tp->dev) &&
3273                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3274                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3275                 u32 phy2;
3276
3277                 /* Select expansion interrupt status register */
3278                 tg3_writephy(tp, 0x17, 0x0f01);
3279                 tg3_readphy(tp, 0x15, &phy2);
3280                 if (phy2 & 0x20) {
3281                         u32 bmcr;
3282
3283                         /* Config code words received, turn on autoneg. */
3284                         tg3_readphy(tp, MII_BMCR, &bmcr);
3285                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3286
3287                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3288
3289                 }
3290         }
3291 }
3292
3293 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3294 {
3295         int err;
3296
3297         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3298                 err = tg3_setup_fiber_phy(tp, force_reset);
3299         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3300                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3301         } else {
3302                 err = tg3_setup_copper_phy(tp, force_reset);
3303         }
3304
3305         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3306             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3307                 u32 val, scale;
3308
3309                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3310                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3311                         scale = 65;
3312                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3313                         scale = 6;
3314                 else
3315                         scale = 12;
3316
3317                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3318                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3319                 tw32(GRC_MISC_CFG, val);
3320         }
3321
3322         if (tp->link_config.active_speed == SPEED_1000 &&
3323             tp->link_config.active_duplex == DUPLEX_HALF)
3324                 tw32(MAC_TX_LENGTHS,
3325                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3326                       (6 << TX_LENGTHS_IPG_SHIFT) |
3327                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3328         else
3329                 tw32(MAC_TX_LENGTHS,
3330                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3331                       (6 << TX_LENGTHS_IPG_SHIFT) |
3332                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3333
3334         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3335                 if (netif_carrier_ok(tp->dev)) {
3336                         tw32(HOSTCC_STAT_COAL_TICKS,
3337                              tp->coal.stats_block_coalesce_usecs);
3338                 } else {
3339                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3340                 }
3341         }
3342
3343         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3344                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3345                 if (!netif_carrier_ok(tp->dev))
3346                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3347                               tp->pwrmgmt_thresh;
3348                 else
3349                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3350                 tw32(PCIE_PWR_MGMT_THRESH, val);
3351         }
3352
3353         return err;
3354 }
3355
3356 /* This is called whenever we suspect that the system chipset is re-
3357  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3358  * is bogus tx completions. We try to recover by setting the
3359  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3360  * in the workqueue.
3361  */
3362 static void tg3_tx_recover(struct tg3 *tp)
3363 {
3364         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3365                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3366
3367         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3368                "mapped I/O cycles to the network device, attempting to "
3369                "recover. Please report the problem to the driver maintainer "
3370                "and include system chipset information.\n", tp->dev->name);
3371
3372         spin_lock(&tp->lock);
3373         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3374         spin_unlock(&tp->lock);
3375 }
3376
3377 static inline u32 tg3_tx_avail(struct tg3 *tp)
3378 {
3379         smp_mb();
3380         return (tp->tx_pending -
3381                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3382 }
3383
3384 /* Tigon3 never reports partial packet sends.  So we do not
3385  * need special logic to handle SKBs that have not had all
3386  * of their frags sent yet, like SunGEM does.
3387  */
3388 static void tg3_tx(struct tg3 *tp)
3389 {
3390         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3391         u32 sw_idx = tp->tx_cons;
3392
3393         while (sw_idx != hw_idx) {
3394                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3395                 struct sk_buff *skb = ri->skb;
3396                 int i, tx_bug = 0;
3397
3398                 if (unlikely(skb == NULL)) {
3399                         tg3_tx_recover(tp);
3400                         return;
3401                 }
3402
3403                 pci_unmap_single(tp->pdev,
3404                                  pci_unmap_addr(ri, mapping),
3405                                  skb_headlen(skb),
3406                                  PCI_DMA_TODEVICE);
3407
3408                 ri->skb = NULL;
3409
3410                 sw_idx = NEXT_TX(sw_idx);
3411
3412                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3413                         ri = &tp->tx_buffers[sw_idx];
3414                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3415                                 tx_bug = 1;
3416
3417                         pci_unmap_page(tp->pdev,
3418                                        pci_unmap_addr(ri, mapping),
3419                                        skb_shinfo(skb)->frags[i].size,
3420                                        PCI_DMA_TODEVICE);
3421
3422                         sw_idx = NEXT_TX(sw_idx);
3423                 }
3424
3425                 dev_kfree_skb(skb);
3426
3427                 if (unlikely(tx_bug)) {
3428                         tg3_tx_recover(tp);
3429                         return;
3430                 }
3431         }
3432
3433         tp->tx_cons = sw_idx;
3434
3435         /* Need to make the tx_cons update visible to tg3_start_xmit()
3436          * before checking for netif_queue_stopped().  Without the
3437          * memory barrier, there is a small possibility that tg3_start_xmit()
3438          * will miss it and cause the queue to be stopped forever.
3439          */
3440         smp_mb();
3441
3442         if (unlikely(netif_queue_stopped(tp->dev) &&
3443                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3444                 netif_tx_lock(tp->dev);
3445                 if (netif_queue_stopped(tp->dev) &&
3446                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3447                         netif_wake_queue(tp->dev);
3448                 netif_tx_unlock(tp->dev);
3449         }
3450 }
3451
3452 /* Returns size of skb allocated or < 0 on error.
3453  *
3454  * We only need to fill in the address because the other members
3455  * of the RX descriptor are invariant, see tg3_init_rings.
3456  *
3457  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3458  * posting buffers we only dirty the first cache line of the RX
3459  * descriptor (containing the address).  Whereas for the RX status
3460  * buffers the cpu only reads the last cacheline of the RX descriptor
3461  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3462  */
3463 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3464                             int src_idx, u32 dest_idx_unmasked)
3465 {
3466         struct tg3_rx_buffer_desc *desc;
3467         struct ring_info *map, *src_map;
3468         struct sk_buff *skb;
3469         dma_addr_t mapping;
3470         int skb_size, dest_idx;
3471
3472         src_map = NULL;
3473         switch (opaque_key) {
3474         case RXD_OPAQUE_RING_STD:
3475                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3476                 desc = &tp->rx_std[dest_idx];
3477                 map = &tp->rx_std_buffers[dest_idx];
3478                 if (src_idx >= 0)
3479                         src_map = &tp->rx_std_buffers[src_idx];
3480                 skb_size = tp->rx_pkt_buf_sz;
3481                 break;
3482
3483         case RXD_OPAQUE_RING_JUMBO:
3484                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3485                 desc = &tp->rx_jumbo[dest_idx];
3486                 map = &tp->rx_jumbo_buffers[dest_idx];
3487                 if (src_idx >= 0)
3488                         src_map = &tp->rx_jumbo_buffers[src_idx];
3489                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3490                 break;
3491
3492         default:
3493                 return -EINVAL;
3494         };
3495
3496         /* Do not overwrite any of the map or rp information
3497          * until we are sure we can commit to a new buffer.
3498          *
3499          * Callers depend upon this behavior and assume that
3500          * we leave everything unchanged if we fail.
3501          */
3502         skb = netdev_alloc_skb(tp->dev, skb_size);
3503         if (skb == NULL)
3504                 return -ENOMEM;
3505
3506         skb_reserve(skb, tp->rx_offset);
3507
3508         mapping = pci_map_single(tp->pdev, skb->data,
3509                                  skb_size - tp->rx_offset,
3510                                  PCI_DMA_FROMDEVICE);
3511
3512         map->skb = skb;
3513         pci_unmap_addr_set(map, mapping, mapping);
3514
3515         if (src_map != NULL)
3516                 src_map->skb = NULL;
3517
3518         desc->addr_hi = ((u64)mapping >> 32);
3519         desc->addr_lo = ((u64)mapping & 0xffffffff);
3520
3521         return skb_size;
3522 }
3523
3524 /* We only need to move over in the address because the other
3525  * members of the RX descriptor are invariant.  See notes above
3526  * tg3_alloc_rx_skb for full details.
3527  */
3528 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3529                            int src_idx, u32 dest_idx_unmasked)
3530 {
3531         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3532         struct ring_info *src_map, *dest_map;
3533         int dest_idx;
3534
3535         switch (opaque_key) {
3536         case RXD_OPAQUE_RING_STD:
3537                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3538                 dest_desc = &tp->rx_std[dest_idx];
3539                 dest_map = &tp->rx_std_buffers[dest_idx];
3540                 src_desc = &tp->rx_std[src_idx];
3541                 src_map = &tp->rx_std_buffers[src_idx];
3542                 break;
3543
3544         case RXD_OPAQUE_RING_JUMBO:
3545                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3546                 dest_desc = &tp->rx_jumbo[dest_idx];
3547                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3548                 src_desc = &tp->rx_jumbo[src_idx];
3549                 src_map = &tp->rx_jumbo_buffers[src_idx];
3550                 break;
3551
3552         default:
3553                 return;
3554         };
3555
3556         dest_map->skb = src_map->skb;
3557         pci_unmap_addr_set(dest_map, mapping,
3558                            pci_unmap_addr(src_map, mapping));
3559         dest_desc->addr_hi = src_desc->addr_hi;
3560         dest_desc->addr_lo = src_desc->addr_lo;
3561
3562         src_map->skb = NULL;
3563 }
3564
3565 #if TG3_VLAN_TAG_USED
3566 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3567 {
3568         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3569 }
3570 #endif
3571
3572 /* The RX ring scheme is composed of multiple rings which post fresh
3573  * buffers to the chip, and one special ring the chip uses to report
3574  * status back to the host.
3575  *
3576  * The special ring reports the status of received packets to the
3577  * host.  The chip does not write into the original descriptor the
3578  * RX buffer was obtained from.  The chip simply takes the original
3579  * descriptor as provided by the host, updates the status and length
3580  * field, then writes this into the next status ring entry.
3581  *
3582  * Each ring the host uses to post buffers to the chip is described
3583  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3584  * it is first placed into the on-chip ram.  When the packet's length
3585  * is known, it walks down the TG3_BDINFO entries to select the ring.
3586  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3587  * which is within the range of the new packet's length is chosen.
3588  *
3589  * The "separate ring for rx status" scheme may sound queer, but it makes
3590  * sense from a cache coherency perspective.  If only the host writes
3591  * to the buffer post rings, and only the chip writes to the rx status
3592  * rings, then cache lines never move beyond shared-modified state.
3593  * If both the host and chip were to write into the same ring, cache line
3594  * eviction could occur since both entities want it in an exclusive state.
3595  */
3596 static int tg3_rx(struct tg3 *tp, int budget)
3597 {
3598         u32 work_mask, rx_std_posted = 0;
3599         u32 sw_idx = tp->rx_rcb_ptr;
3600         u16 hw_idx;
3601         int received;
3602
3603         hw_idx = tp->hw_status->idx[0].rx_producer;
3604         /*
3605          * We need to order the read of hw_idx and the read of
3606          * the opaque cookie.
3607          */
3608         rmb();
3609         work_mask = 0;
3610         received = 0;
3611         while (sw_idx != hw_idx && budget > 0) {
3612                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3613                 unsigned int len;
3614                 struct sk_buff *skb;
3615                 dma_addr_t dma_addr;
3616                 u32 opaque_key, desc_idx, *post_ptr;
3617
3618                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3619                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3620                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3621                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3622                                                   mapping);
3623                         skb = tp->rx_std_buffers[desc_idx].skb;
3624                         post_ptr = &tp->rx_std_ptr;
3625                         rx_std_posted++;
3626                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3627                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3628                                                   mapping);
3629                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3630                         post_ptr = &tp->rx_jumbo_ptr;
3631                 }
3632                 else {
3633                         goto next_pkt_nopost;
3634                 }
3635
3636                 work_mask |= opaque_key;
3637
3638                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3639                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3640                 drop_it:
3641                         tg3_recycle_rx(tp, opaque_key,
3642                                        desc_idx, *post_ptr);
3643                 drop_it_no_recycle:
3644                         /* Other statistics kept track of by card. */
3645                         tp->net_stats.rx_dropped++;
3646                         goto next_pkt;
3647                 }
3648
3649                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3650
3651                 if (len > RX_COPY_THRESHOLD
3652                         && tp->rx_offset == 2
3653                         /* rx_offset != 2 iff this is a 5701 card running
3654                          * in PCI-X mode [see tg3_get_invariants()] */
3655                 ) {
3656                         int skb_size;
3657
3658                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3659                                                     desc_idx, *post_ptr);
3660                         if (skb_size < 0)
3661                                 goto drop_it;
3662
3663                         pci_unmap_single(tp->pdev, dma_addr,
3664                                          skb_size - tp->rx_offset,
3665                                          PCI_DMA_FROMDEVICE);
3666
3667                         skb_put(skb, len);
3668                 } else {
3669                         struct sk_buff *copy_skb;
3670
3671                         tg3_recycle_rx(tp, opaque_key,
3672                                        desc_idx, *post_ptr);
3673
3674                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3675                         if (copy_skb == NULL)
3676                                 goto drop_it_no_recycle;
3677
3678                         skb_reserve(copy_skb, 2);
3679                         skb_put(copy_skb, len);
3680                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3681                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3682                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3683
3684                         /* We'll reuse the original ring buffer. */
3685                         skb = copy_skb;
3686                 }
3687
3688                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3689                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3690                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3691                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3692                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3693                 else
3694                         skb->ip_summed = CHECKSUM_NONE;
3695
3696                 skb->protocol = eth_type_trans(skb, tp->dev);
3697 #if TG3_VLAN_TAG_USED
3698                 if (tp->vlgrp != NULL &&
3699                     desc->type_flags & RXD_FLAG_VLAN) {
3700                         tg3_vlan_rx(tp, skb,
3701                                     desc->err_vlan & RXD_VLAN_MASK);
3702                 } else
3703 #endif
3704                         netif_receive_skb(skb);
3705
3706                 tp->dev->last_rx = jiffies;
3707                 received++;
3708                 budget--;
3709
3710 next_pkt:
3711                 (*post_ptr)++;
3712
3713                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3714                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3715
3716                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3717                                      TG3_64BIT_REG_LOW, idx);
3718                         work_mask &= ~RXD_OPAQUE_RING_STD;
3719                         rx_std_posted = 0;
3720                 }
3721 next_pkt_nopost:
3722                 sw_idx++;
3723                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3724
3725                 /* Refresh hw_idx to see if there is new work */
3726                 if (sw_idx == hw_idx) {
3727                         hw_idx = tp->hw_status->idx[0].rx_producer;
3728                         rmb();
3729                 }
3730         }
3731
3732         /* ACK the status ring. */
3733         tp->rx_rcb_ptr = sw_idx;
3734         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3735
3736         /* Refill RX ring(s). */
3737         if (work_mask & RXD_OPAQUE_RING_STD) {
3738                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3739                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3740                              sw_idx);
3741         }
3742         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3743                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3744                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3745                              sw_idx);
3746         }
3747         mmiowb();
3748
3749         return received;
3750 }
3751
3752 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3753 {
3754         struct tg3_hw_status *sblk = tp->hw_status;
3755
3756         /* handle link change and other phy events */
3757         if (!(tp->tg3_flags &
3758               (TG3_FLAG_USE_LINKCHG_REG |
3759                TG3_FLAG_POLL_SERDES))) {
3760                 if (sblk->status & SD_STATUS_LINK_CHG) {
3761                         sblk->status = SD_STATUS_UPDATED |
3762                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3763                         spin_lock(&tp->lock);
3764                         tg3_setup_phy(tp, 0);
3765                         spin_unlock(&tp->lock);
3766                 }
3767         }
3768
3769         /* run TX completion thread */
3770         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3771                 tg3_tx(tp);
3772                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3773                         return work_done;
3774         }
3775
3776         /* run RX thread, within the bounds set by NAPI.
3777          * All RX "locking" is done by ensuring outside
3778          * code synchronizes with tg3->napi.poll()
3779          */
3780         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3781                 work_done += tg3_rx(tp, budget - work_done);
3782
3783         return work_done;
3784 }
3785
3786 static int tg3_poll(struct napi_struct *napi, int budget)
3787 {
3788         struct tg3 *tp = container_of(napi, struct tg3, napi);
3789         int work_done = 0;
3790         struct tg3_hw_status *sblk = tp->hw_status;
3791
3792         while (1) {
3793                 work_done = tg3_poll_work(tp, work_done, budget);
3794
3795                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3796                         goto tx_recovery;
3797
3798                 if (unlikely(work_done >= budget))
3799                         break;
3800
3801                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3802                         /* tp->last_tag is used in tg3_restart_ints() below
3803                          * to tell the hw how much work has been processed,
3804                          * so we must read it before checking for more work.
3805                          */
3806                         tp->last_tag = sblk->status_tag;
3807                         rmb();
3808                 } else
3809                         sblk->status &= ~SD_STATUS_UPDATED;
3810
3811                 if (likely(!tg3_has_work(tp))) {
3812                         netif_rx_complete(tp->dev, napi);
3813                         tg3_restart_ints(tp);
3814                         break;
3815                 }
3816         }
3817
3818         return work_done;
3819
3820 tx_recovery:
3821         /* work_done is guaranteed to be less than budget. */
3822         netif_rx_complete(tp->dev, napi);
3823         schedule_work(&tp->reset_task);
3824         return work_done;
3825 }
3826
3827 static void tg3_irq_quiesce(struct tg3 *tp)
3828 {
3829         BUG_ON(tp->irq_sync);
3830
3831         tp->irq_sync = 1;
3832         smp_mb();
3833
3834         synchronize_irq(tp->pdev->irq);
3835 }
3836
3837 static inline int tg3_irq_sync(struct tg3 *tp)
3838 {
3839         return tp->irq_sync;
3840 }
3841
3842 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3843  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3844  * with as well.  Most of the time, this is not necessary except when
3845  * shutting down the device.
3846  */
3847 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3848 {
3849         spin_lock_bh(&tp->lock);
3850         if (irq_sync)
3851                 tg3_irq_quiesce(tp);
3852 }
3853
3854 static inline void tg3_full_unlock(struct tg3 *tp)
3855 {
3856         spin_unlock_bh(&tp->lock);
3857 }
3858
3859 /* One-shot MSI handler - Chip automatically disables interrupt
3860  * after sending MSI so driver doesn't have to do it.
3861  */
3862 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3863 {
3864         struct net_device *dev = dev_id;
3865         struct tg3 *tp = netdev_priv(dev);
3866
3867         prefetch(tp->hw_status);
3868         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3869
3870         if (likely(!tg3_irq_sync(tp)))
3871                 netif_rx_schedule(dev, &tp->napi);
3872
3873         return IRQ_HANDLED;
3874 }
3875
3876 /* MSI ISR - No need to check for interrupt sharing and no need to
3877  * flush status block and interrupt mailbox. PCI ordering rules
3878  * guarantee that MSI will arrive after the status block.
3879  */
3880 static irqreturn_t tg3_msi(int irq, void *dev_id)
3881 {
3882         struct net_device *dev = dev_id;
3883         struct tg3 *tp = netdev_priv(dev);
3884
3885         prefetch(tp->hw_status);
3886         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3887         /*
3888          * Writing any value to intr-mbox-0 clears PCI INTA# and
3889          * chip-internal interrupt pending events.
3890          * Writing non-zero to intr-mbox-0 additional tells the
3891          * NIC to stop sending us irqs, engaging "in-intr-handler"
3892          * event coalescing.
3893          */
3894         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3895         if (likely(!tg3_irq_sync(tp)))
3896                 netif_rx_schedule(dev, &tp->napi);
3897
3898         return IRQ_RETVAL(1);
3899 }
3900
3901 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3902 {
3903         struct net_device *dev = dev_id;
3904         struct tg3 *tp = netdev_priv(dev);
3905         struct tg3_hw_status *sblk = tp->hw_status;
3906         unsigned int handled = 1;
3907
3908         /* In INTx mode, it is possible for the interrupt to arrive at
3909          * the CPU before the status block posted prior to the interrupt.
3910          * Reading the PCI State register will confirm whether the
3911          * interrupt is ours and will flush the status block.
3912          */
3913         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3914                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3915                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3916                         handled = 0;
3917                         goto out;
3918                 }
3919         }
3920
3921         /*
3922          * Writing any value to intr-mbox-0 clears PCI INTA# and
3923          * chip-internal interrupt pending events.
3924          * Writing non-zero to intr-mbox-0 additional tells the
3925          * NIC to stop sending us irqs, engaging "in-intr-handler"
3926          * event coalescing.
3927          *
3928          * Flush the mailbox to de-assert the IRQ immediately to prevent
3929          * spurious interrupts.  The flush impacts performance but
3930          * excessive spurious interrupts can be worse in some cases.
3931          */
3932         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3933         if (tg3_irq_sync(tp))
3934                 goto out;
3935         sblk->status &= ~SD_STATUS_UPDATED;
3936         if (likely(tg3_has_work(tp))) {
3937                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3938                 netif_rx_schedule(dev, &tp->napi);
3939         } else {
3940                 /* No work, shared interrupt perhaps?  re-enable
3941                  * interrupts, and flush that PCI write
3942                  */
3943                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3944                                0x00000000);
3945         }
3946 out:
3947         return IRQ_RETVAL(handled);
3948 }
3949
3950 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3951 {
3952         struct net_device *dev = dev_id;
3953         struct tg3 *tp = netdev_priv(dev);
3954         struct tg3_hw_status *sblk = tp->hw_status;
3955         unsigned int handled = 1;
3956
3957         /* In INTx mode, it is possible for the interrupt to arrive at
3958          * the CPU before the status block posted prior to the interrupt.
3959          * Reading the PCI State register will confirm whether the
3960          * interrupt is ours and will flush the status block.
3961          */
3962         if (unlikely(sblk->status_tag == tp->last_tag)) {
3963                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3964                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3965                         handled = 0;
3966                         goto out;
3967                 }
3968         }
3969
3970         /*
3971          * writing any value to intr-mbox-0 clears PCI INTA# and
3972          * chip-internal interrupt pending events.
3973          * writing non-zero to intr-mbox-0 additional tells the
3974          * NIC to stop sending us irqs, engaging "in-intr-handler"
3975          * event coalescing.
3976          *
3977          * Flush the mailbox to de-assert the IRQ immediately to prevent
3978          * spurious interrupts.  The flush impacts performance but
3979          * excessive spurious interrupts can be worse in some cases.
3980          */
3981         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3982         if (tg3_irq_sync(tp))
3983                 goto out;
3984         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3985                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3986                 /* Update last_tag to mark that this status has been
3987                  * seen. Because interrupt may be shared, we may be
3988                  * racing with tg3_poll(), so only update last_tag
3989                  * if tg3_poll() is not scheduled.
3990                  */
3991                 tp->last_tag = sblk->status_tag;
3992                 __netif_rx_schedule(dev, &tp->napi);
3993         }
3994 out:
3995         return IRQ_RETVAL(handled);
3996 }
3997
3998 /* ISR for interrupt test */
3999 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4000 {
4001         struct net_device *dev = dev_id;
4002         struct tg3 *tp = netdev_priv(dev);
4003         struct tg3_hw_status *sblk = tp->hw_status;
4004
4005         if ((sblk->status & SD_STATUS_UPDATED) ||
4006             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4007                 tg3_disable_ints(tp);
4008                 return IRQ_RETVAL(1);
4009         }
4010         return IRQ_RETVAL(0);
4011 }
4012
4013 static int tg3_init_hw(struct tg3 *, int);
4014 static int tg3_halt(struct tg3 *, int, int);
4015
4016 /* Restart hardware after configuration changes, self-test, etc.
4017  * Invoked with tp->lock held.
4018  */
4019 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4020         __releases(tp->lock)
4021         __acquires(tp->lock)
4022 {
4023         int err;
4024
4025         err = tg3_init_hw(tp, reset_phy);
4026         if (err) {
4027                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4028                        "aborting.\n", tp->dev->name);
4029                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4030                 tg3_full_unlock(tp);
4031                 del_timer_sync(&tp->timer);
4032                 tp->irq_sync = 0;
4033                 napi_enable(&tp->napi);
4034                 dev_close(tp->dev);
4035                 tg3_full_lock(tp, 0);
4036         }
4037         return err;
4038 }
4039
4040 #ifdef CONFIG_NET_POLL_CONTROLLER
4041 static void tg3_poll_controller(struct net_device *dev)
4042 {
4043         struct tg3 *tp = netdev_priv(dev);
4044
4045         tg3_interrupt(tp->pdev->irq, dev);
4046 }
4047 #endif
4048
4049 static void tg3_reset_task(struct work_struct *work)
4050 {
4051         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4052         unsigned int restart_timer;
4053
4054         tg3_full_lock(tp, 0);
4055
4056         if (!netif_running(tp->dev)) {
4057                 tg3_full_unlock(tp);
4058                 return;
4059         }
4060
4061         tg3_full_unlock(tp);
4062
4063         tg3_netif_stop(tp);
4064
4065         tg3_full_lock(tp, 1);
4066
4067         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4068         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4069
4070         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4071                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4072                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4073                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4074                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4075         }
4076
4077         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4078         if (tg3_init_hw(tp, 1))
4079                 goto out;
4080
4081         tg3_netif_start(tp);
4082
4083         if (restart_timer)
4084                 mod_timer(&tp->timer, jiffies + 1);
4085
4086 out:
4087         tg3_full_unlock(tp);
4088 }
4089
4090 static void tg3_dump_short_state(struct tg3 *tp)
4091 {
4092         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4093                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4094         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4095                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4096 }
4097
4098 static void tg3_tx_timeout(struct net_device *dev)
4099 {
4100         struct tg3 *tp = netdev_priv(dev);
4101
4102         if (netif_msg_tx_err(tp)) {
4103                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4104                        dev->name);
4105                 tg3_dump_short_state(tp);
4106         }
4107
4108         schedule_work(&tp->reset_task);
4109 }
4110
4111 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4112 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4113 {
4114         u32 base = (u32) mapping & 0xffffffff;
4115
4116         return ((base > 0xffffdcc0) &&
4117                 (base + len + 8 < base));
4118 }
4119
4120 /* Test for DMA addresses > 40-bit */
4121 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4122                                           int len)
4123 {
4124 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4125         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4126                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4127         return 0;
4128 #else
4129         return 0;
4130 #endif
4131 }
4132
4133 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4134
4135 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4136 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4137                                        u32 last_plus_one, u32 *start,
4138                                        u32 base_flags, u32 mss)
4139 {
4140         struct sk_buff *new_skb;
4141         dma_addr_t new_addr = 0;
4142         u32 entry = *start;
4143         int i, ret = 0;
4144
4145         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4146                 new_skb = skb_copy(skb, GFP_ATOMIC);
4147         else {
4148                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4149
4150                 new_skb = skb_copy_expand(skb,
4151                                           skb_headroom(skb) + more_headroom,
4152                                           skb_tailroom(skb), GFP_ATOMIC);
4153         }
4154
4155         if (!new_skb) {
4156                 ret = -1;
4157         } else {
4158                 /* New SKB is guaranteed to be linear. */
4159                 entry = *start;
4160                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4161                                           PCI_DMA_TODEVICE);
4162                 /* Make sure new skb does not cross any 4G boundaries.
4163                  * Drop the packet if it does.
4164                  */
4165                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4166                         ret = -1;
4167                         dev_kfree_skb(new_skb);
4168                         new_skb = NULL;
4169                 } else {
4170                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4171                                     base_flags, 1 | (mss << 1));
4172                         *start = NEXT_TX(entry);
4173                 }
4174         }
4175
4176         /* Now clean up the sw ring entries. */
4177         i = 0;
4178         while (entry != last_plus_one) {
4179                 int len;
4180
4181                 if (i == 0)
4182                         len = skb_headlen(skb);
4183                 else
4184                         len = skb_shinfo(skb)->frags[i-1].size;
4185                 pci_unmap_single(tp->pdev,
4186                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4187                                  len, PCI_DMA_TODEVICE);
4188                 if (i == 0) {
4189                         tp->tx_buffers[entry].skb = new_skb;
4190                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4191                 } else {
4192                         tp->tx_buffers[entry].skb = NULL;
4193                 }
4194                 entry = NEXT_TX(entry);
4195                 i++;
4196         }
4197
4198         dev_kfree_skb(skb);
4199
4200         return ret;
4201 }
4202
4203 static void tg3_set_txd(struct tg3 *tp, int entry,
4204                         dma_addr_t mapping, int len, u32 flags,
4205                         u32 mss_and_is_end)
4206 {
4207         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4208         int is_end = (mss_and_is_end & 0x1);
4209         u32 mss = (mss_and_is_end >> 1);
4210         u32 vlan_tag = 0;
4211
4212         if (is_end)
4213                 flags |= TXD_FLAG_END;
4214         if (flags & TXD_FLAG_VLAN) {
4215                 vlan_tag = flags >> 16;
4216                 flags &= 0xffff;
4217         }
4218         vlan_tag |= (mss << TXD_MSS_SHIFT);
4219
4220         txd->addr_hi = ((u64) mapping >> 32);
4221         txd->addr_lo = ((u64) mapping & 0xffffffff);
4222         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4223         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4224 }
4225
4226 /* hard_start_xmit for devices that don't have any bugs and
4227  * support TG3_FLG2_HW_TSO_2 only.
4228  */
4229 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4230 {
4231         struct tg3 *tp = netdev_priv(dev);
4232         dma_addr_t mapping;
4233         u32 len, entry, base_flags, mss;
4234
4235         len = skb_headlen(skb);
4236
4237         /* We are running in BH disabled context with netif_tx_lock
4238          * and TX reclaim runs via tp->napi.poll inside of a software
4239          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4240          * no IRQ context deadlocks to worry about either.  Rejoice!
4241          */
4242         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4243                 if (!netif_queue_stopped(dev)) {
4244                         netif_stop_queue(dev);
4245
4246                         /* This is a hard error, log it. */
4247                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4248                                "queue awake!\n", dev->name);
4249                 }
4250                 return NETDEV_TX_BUSY;
4251         }
4252
4253         entry = tp->tx_prod;
4254         base_flags = 0;
4255         mss = 0;
4256         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4257                 int tcp_opt_len, ip_tcp_len;
4258
4259                 if (skb_header_cloned(skb) &&
4260                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4261                         dev_kfree_skb(skb);
4262                         goto out_unlock;
4263                 }
4264
4265                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4266                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4267                 else {
4268                         struct iphdr *iph = ip_hdr(skb);
4269
4270                         tcp_opt_len = tcp_optlen(skb);
4271                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4272
4273                         iph->check = 0;
4274                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4275                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4276                 }
4277
4278                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4279                                TXD_FLAG_CPU_POST_DMA);
4280
4281                 tcp_hdr(skb)->check = 0;
4282
4283         }
4284         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4285                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4286 #if TG3_VLAN_TAG_USED
4287         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4288                 base_flags |= (TXD_FLAG_VLAN |
4289                                (vlan_tx_tag_get(skb) << 16));
4290 #endif
4291
4292         /* Queue skb data, a.k.a. the main skb fragment. */
4293         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4294
4295         tp->tx_buffers[entry].skb = skb;
4296         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4297
4298         tg3_set_txd(tp, entry, mapping, len, base_flags,
4299                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4300
4301         entry = NEXT_TX(entry);
4302
4303         /* Now loop through additional data fragments, and queue them. */
4304         if (skb_shinfo(skb)->nr_frags > 0) {
4305                 unsigned int i, last;
4306
4307                 last = skb_shinfo(skb)->nr_frags - 1;
4308                 for (i = 0; i <= last; i++) {
4309                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4310
4311                         len = frag->size;
4312                         mapping = pci_map_page(tp->pdev,
4313                                                frag->page,
4314                                                frag->page_offset,
4315                                                len, PCI_DMA_TODEVICE);
4316
4317                         tp->tx_buffers[entry].skb = NULL;
4318                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4319
4320                         tg3_set_txd(tp, entry, mapping, len,
4321                                     base_flags, (i == last) | (mss << 1));
4322
4323                         entry = NEXT_TX(entry);
4324                 }
4325         }
4326
4327         /* Packets are ready, update Tx producer idx local and on card. */
4328         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4329
4330         tp->tx_prod = entry;
4331         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4332                 netif_stop_queue(dev);
4333                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4334                         netif_wake_queue(tp->dev);
4335         }
4336
4337 out_unlock:
4338         mmiowb();
4339
4340         dev->trans_start = jiffies;
4341
4342         return NETDEV_TX_OK;
4343 }
4344
4345 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4346
4347 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4348  * TSO header is greater than 80 bytes.
4349  */
4350 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4351 {
4352         struct sk_buff *segs, *nskb;
4353
4354         /* Estimate the number of fragments in the worst case */
4355         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4356                 netif_stop_queue(tp->dev);
4357                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4358                         return NETDEV_TX_BUSY;
4359
4360                 netif_wake_queue(tp->dev);
4361         }
4362
4363         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4364         if (unlikely(IS_ERR(segs)))
4365                 goto tg3_tso_bug_end;
4366
4367         do {
4368                 nskb = segs;
4369                 segs = segs->next;
4370                 nskb->next = NULL;
4371                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4372         } while (segs);
4373
4374 tg3_tso_bug_end:
4375         dev_kfree_skb(skb);
4376
4377         return NETDEV_TX_OK;
4378 }
4379
4380 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4381  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4382  */
4383 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4384 {
4385         struct tg3 *tp = netdev_priv(dev);
4386         dma_addr_t mapping;
4387         u32 len, entry, base_flags, mss;
4388         int would_hit_hwbug;
4389
4390         len = skb_headlen(skb);
4391
4392         /* We are running in BH disabled context with netif_tx_lock
4393          * and TX reclaim runs via tp->napi.poll inside of a software
4394          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4395          * no IRQ context deadlocks to worry about either.  Rejoice!
4396          */
4397         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4398                 if (!netif_queue_stopped(dev)) {
4399                         netif_stop_queue(dev);
4400
4401                         /* This is a hard error, log it. */
4402                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4403                                "queue awake!\n", dev->name);
4404                 }
4405                 return NETDEV_TX_BUSY;
4406         }
4407
4408         entry = tp->tx_prod;
4409         base_flags = 0;
4410         if (skb->ip_summed == CHECKSUM_PARTIAL)
4411                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4412         mss = 0;
4413         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4414                 struct iphdr *iph;
4415                 int tcp_opt_len, ip_tcp_len, hdr_len;
4416
4417                 if (skb_header_cloned(skb) &&
4418                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4419                         dev_kfree_skb(skb);
4420                         goto out_unlock;
4421                 }
4422
4423                 tcp_opt_len = tcp_optlen(skb);
4424                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4425
4426                 hdr_len = ip_tcp_len + tcp_opt_len;
4427                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4428                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4429                         return (tg3_tso_bug(tp, skb));
4430
4431                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4432                                TXD_FLAG_CPU_POST_DMA);
4433
4434                 iph = ip_hdr(skb);
4435                 iph->check = 0;
4436                 iph->tot_len = htons(mss + hdr_len);
4437                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4438                         tcp_hdr(skb)->check = 0;
4439                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4440                 } else
4441                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4442                                                                  iph->daddr, 0,
4443                                                                  IPPROTO_TCP,
4444                                                                  0);
4445
4446                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4447                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4448                         if (tcp_opt_len || iph->ihl > 5) {
4449                                 int tsflags;
4450
4451                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4452                                 mss |= (tsflags << 11);
4453                         }
4454                 } else {
4455                         if (tcp_opt_len || iph->ihl > 5) {
4456                                 int tsflags;
4457
4458                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4459                                 base_flags |= tsflags << 12;
4460                         }
4461                 }
4462         }
4463 #if TG3_VLAN_TAG_USED
4464         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4465                 base_flags |= (TXD_FLAG_VLAN |
4466                                (vlan_tx_tag_get(skb) << 16));
4467 #endif
4468
4469         /* Queue skb data, a.k.a. the main skb fragment. */
4470         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4471
4472         tp->tx_buffers[entry].skb = skb;
4473         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4474
4475         would_hit_hwbug = 0;
4476
4477         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4478                 would_hit_hwbug = 1;
4479         else if (tg3_4g_overflow_test(mapping, len))
4480                 would_hit_hwbug = 1;
4481
4482         tg3_set_txd(tp, entry, mapping, len, base_flags,
4483                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4484
4485         entry = NEXT_TX(entry);
4486
4487         /* Now loop through additional data fragments, and queue them. */
4488         if (skb_shinfo(skb)->nr_frags > 0) {
4489                 unsigned int i, last;
4490
4491                 last = skb_shinfo(skb)->nr_frags - 1;
4492                 for (i = 0; i <= last; i++) {
4493                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4494
4495                         len = frag->size;
4496                         mapping = pci_map_page(tp->pdev,
4497                                                frag->page,
4498                                                frag->page_offset,
4499                                                len, PCI_DMA_TODEVICE);
4500
4501                         tp->tx_buffers[entry].skb = NULL;
4502                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4503
4504                         if (tg3_4g_overflow_test(mapping, len))
4505                                 would_hit_hwbug = 1;
4506
4507                         if (tg3_40bit_overflow_test(tp, mapping, len))
4508                                 would_hit_hwbug = 1;
4509
4510                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4511                                 tg3_set_txd(tp, entry, mapping, len,
4512                                             base_flags, (i == last)|(mss << 1));
4513                         else
4514                                 tg3_set_txd(tp, entry, mapping, len,
4515                                             base_flags, (i == last));
4516
4517                         entry = NEXT_TX(entry);
4518                 }
4519         }
4520
4521         if (would_hit_hwbug) {
4522                 u32 last_plus_one = entry;
4523                 u32 start;
4524
4525                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4526                 start &= (TG3_TX_RING_SIZE - 1);
4527
4528                 /* If the workaround fails due to memory/mapping
4529                  * failure, silently drop this packet.
4530                  */
4531                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4532                                                 &start, base_flags, mss))
4533                         goto out_unlock;
4534
4535                 entry = start;
4536         }
4537
4538         /* Packets are ready, update Tx producer idx local and on card. */
4539         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4540
4541         tp->tx_prod = entry;
4542         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4543                 netif_stop_queue(dev);
4544                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4545                         netif_wake_queue(tp->dev);
4546         }
4547
4548 out_unlock:
4549         mmiowb();
4550
4551         dev->trans_start = jiffies;
4552
4553         return NETDEV_TX_OK;
4554 }
4555
4556 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4557                                int new_mtu)
4558 {
4559         dev->mtu = new_mtu;
4560
4561         if (new_mtu > ETH_DATA_LEN) {
4562                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4563                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4564                         ethtool_op_set_tso(dev, 0);
4565                 }
4566                 else
4567                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4568         } else {
4569                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4570                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4571                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4572         }
4573 }
4574
4575 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4576 {
4577         struct tg3 *tp = netdev_priv(dev);
4578         int err;
4579
4580         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4581                 return -EINVAL;
4582
4583         if (!netif_running(dev)) {
4584                 /* We'll just catch it later when the
4585                  * device is up'd.
4586                  */
4587                 tg3_set_mtu(dev, tp, new_mtu);
4588                 return 0;
4589         }
4590
4591         tg3_netif_stop(tp);
4592
4593         tg3_full_lock(tp, 1);
4594
4595         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4596
4597         tg3_set_mtu(dev, tp, new_mtu);
4598
4599         err = tg3_restart_hw(tp, 0);
4600
4601         if (!err)
4602                 tg3_netif_start(tp);
4603
4604         tg3_full_unlock(tp);
4605
4606         return err;
4607 }
4608
4609 /* Free up pending packets in all rx/tx rings.
4610  *
4611  * The chip has been shut down and the driver detached from
4612  * the networking, so no interrupts or new tx packets will
4613  * end up in the driver.  tp->{tx,}lock is not held and we are not
4614  * in an interrupt context and thus may sleep.
4615  */
4616 static void tg3_free_rings(struct tg3 *tp)
4617 {
4618         struct ring_info *rxp;
4619         int i;
4620
4621         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4622                 rxp = &tp->rx_std_buffers[i];
4623
4624                 if (rxp->skb == NULL)
4625                         continue;
4626                 pci_unmap_single(tp->pdev,
4627                                  pci_unmap_addr(rxp, mapping),
4628                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4629                                  PCI_DMA_FROMDEVICE);
4630                 dev_kfree_skb_any(rxp->skb);
4631                 rxp->skb = NULL;
4632         }
4633
4634         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4635                 rxp = &tp->rx_jumbo_buffers[i];
4636
4637                 if (rxp->skb == NULL)
4638                         continue;
4639                 pci_unmap_single(tp->pdev,
4640                                  pci_unmap_addr(rxp, mapping),
4641                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4642                                  PCI_DMA_FROMDEVICE);
4643                 dev_kfree_skb_any(rxp->skb);
4644                 rxp->skb = NULL;
4645         }
4646
4647         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4648                 struct tx_ring_info *txp;
4649                 struct sk_buff *skb;
4650                 int j;
4651
4652                 txp = &tp->tx_buffers[i];
4653                 skb = txp->skb;
4654
4655                 if (skb == NULL) {
4656                         i++;
4657                         continue;
4658                 }
4659
4660                 pci_unmap_single(tp->pdev,
4661                                  pci_unmap_addr(txp, mapping),
4662                                  skb_headlen(skb),
4663                                  PCI_DMA_TODEVICE);
4664                 txp->skb = NULL;
4665
4666                 i++;
4667
4668                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4669                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4670                         pci_unmap_page(tp->pdev,
4671                                        pci_unmap_addr(txp, mapping),
4672                                        skb_shinfo(skb)->frags[j].size,
4673                                        PCI_DMA_TODEVICE);
4674                         i++;
4675                 }
4676
4677                 dev_kfree_skb_any(skb);
4678         }
4679 }
4680
4681 /* Initialize tx/rx rings for packet processing.
4682  *
4683  * The chip has been shut down and the driver detached from
4684  * the networking, so no interrupts or new tx packets will
4685  * end up in the driver.  tp->{tx,}lock are held and thus
4686  * we may not sleep.
4687  */
4688 static int tg3_init_rings(struct tg3 *tp)
4689 {
4690         u32 i;
4691
4692         /* Free up all the SKBs. */
4693         tg3_free_rings(tp);
4694
4695         /* Zero out all descriptors. */
4696         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4697         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4698         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4699         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4700
4701         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4702         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4703             (tp->dev->mtu > ETH_DATA_LEN))
4704                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4705
4706         /* Initialize invariants of the rings, we only set this
4707          * stuff once.  This works because the card does not
4708          * write into the rx buffer posting rings.
4709          */
4710         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4711                 struct tg3_rx_buffer_desc *rxd;
4712
4713                 rxd = &tp->rx_std[i];
4714                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4715                         << RXD_LEN_SHIFT;
4716                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4717                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4718                                (i << RXD_OPAQUE_INDEX_SHIFT));
4719         }
4720
4721         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4722                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4723                         struct tg3_rx_buffer_desc *rxd;
4724
4725                         rxd = &tp->rx_jumbo[i];
4726                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4727                                 << RXD_LEN_SHIFT;
4728                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4729                                 RXD_FLAG_JUMBO;
4730                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4731                                (i << RXD_OPAQUE_INDEX_SHIFT));
4732                 }
4733         }
4734
4735         /* Now allocate fresh SKBs for each rx ring. */
4736         for (i = 0; i < tp->rx_pending; i++) {
4737                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4738                         printk(KERN_WARNING PFX
4739                                "%s: Using a smaller RX standard ring, "
4740                                "only %d out of %d buffers were allocated "
4741                                "successfully.\n",
4742                                tp->dev->name, i, tp->rx_pending);
4743                         if (i == 0)
4744                                 return -ENOMEM;
4745                         tp->rx_pending = i;
4746                         break;
4747                 }
4748         }
4749
4750         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4751                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4752                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4753                                              -1, i) < 0) {
4754                                 printk(KERN_WARNING PFX
4755                                        "%s: Using a smaller RX jumbo ring, "
4756                                        "only %d out of %d buffers were "
4757                                        "allocated successfully.\n",
4758                                        tp->dev->name, i, tp->rx_jumbo_pending);
4759                                 if (i == 0) {
4760                                         tg3_free_rings(tp);
4761                                         return -ENOMEM;
4762                                 }
4763                                 tp->rx_jumbo_pending = i;
4764                                 break;
4765                         }
4766                 }
4767         }
4768         return 0;
4769 }
4770
4771 /*
4772  * Must not be invoked with interrupt sources disabled and
4773  * the hardware shutdown down.
4774  */
4775 static void tg3_free_consistent(struct tg3 *tp)
4776 {
4777         kfree(tp->rx_std_buffers);
4778         tp->rx_std_buffers = NULL;
4779         if (tp->rx_std) {
4780                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4781                                     tp->rx_std, tp->rx_std_mapping);
4782                 tp->rx_std = NULL;
4783         }
4784         if (tp->rx_jumbo) {
4785                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4786                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4787                 tp->rx_jumbo = NULL;
4788         }
4789         if (tp->rx_rcb) {
4790                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4791                                     tp->rx_rcb, tp->rx_rcb_mapping);
4792                 tp->rx_rcb = NULL;
4793         }
4794         if (tp->tx_ring) {
4795                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4796                         tp->tx_ring, tp->tx_desc_mapping);
4797                 tp->tx_ring = NULL;
4798         }
4799         if (tp->hw_status) {
4800                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4801                                     tp->hw_status, tp->status_mapping);
4802                 tp->hw_status = NULL;
4803         }
4804         if (tp->hw_stats) {
4805                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4806                                     tp->hw_stats, tp->stats_mapping);
4807                 tp->hw_stats = NULL;
4808         }
4809 }
4810
4811 /*
4812  * Must not be invoked with interrupt sources disabled and
4813  * the hardware shutdown down.  Can sleep.
4814  */
4815 static int tg3_alloc_consistent(struct tg3 *tp)
4816 {
4817         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4818                                       (TG3_RX_RING_SIZE +
4819                                        TG3_RX_JUMBO_RING_SIZE)) +
4820                                      (sizeof(struct tx_ring_info) *
4821                                       TG3_TX_RING_SIZE),
4822                                      GFP_KERNEL);
4823         if (!tp->rx_std_buffers)
4824                 return -ENOMEM;
4825
4826         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4827         tp->tx_buffers = (struct tx_ring_info *)
4828                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4829
4830         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4831                                           &tp->rx_std_mapping);
4832         if (!tp->rx_std)
4833                 goto err_out;
4834
4835         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4836                                             &tp->rx_jumbo_mapping);
4837
4838         if (!tp->rx_jumbo)
4839                 goto err_out;
4840
4841         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4842                                           &tp->rx_rcb_mapping);
4843         if (!tp->rx_rcb)
4844                 goto err_out;
4845
4846         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4847                                            &tp->tx_desc_mapping);
4848         if (!tp->tx_ring)
4849                 goto err_out;
4850
4851         tp->hw_status = pci_alloc_consistent(tp->pdev,
4852                                              TG3_HW_STATUS_SIZE,
4853                                              &tp->status_mapping);
4854         if (!tp->hw_status)
4855                 goto err_out;
4856
4857         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4858                                             sizeof(struct tg3_hw_stats),
4859                                             &tp->stats_mapping);
4860         if (!tp->hw_stats)
4861                 goto err_out;
4862
4863         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4864         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4865
4866         return 0;
4867
4868 err_out:
4869         tg3_free_consistent(tp);
4870         return -ENOMEM;
4871 }
4872
4873 #define MAX_WAIT_CNT 1000
4874
4875 /* To stop a block, clear the enable bit and poll till it
4876  * clears.  tp->lock is held.
4877  */
4878 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4879 {
4880         unsigned int i;
4881         u32 val;
4882
4883         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4884                 switch (ofs) {
4885                 case RCVLSC_MODE:
4886                 case DMAC_MODE:
4887                 case MBFREE_MODE:
4888                 case BUFMGR_MODE:
4889                 case MEMARB_MODE:
4890                         /* We can't enable/disable these bits of the
4891                          * 5705/5750, just say success.
4892                          */
4893                         return 0;
4894
4895                 default:
4896                         break;
4897                 };
4898         }
4899
4900         val = tr32(ofs);
4901         val &= ~enable_bit;
4902         tw32_f(ofs, val);
4903
4904         for (i = 0; i < MAX_WAIT_CNT; i++) {
4905                 udelay(100);
4906                 val = tr32(ofs);
4907                 if ((val & enable_bit) == 0)
4908                         break;
4909         }
4910
4911         if (i == MAX_WAIT_CNT && !silent) {
4912                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4913                        "ofs=%lx enable_bit=%x\n",
4914                        ofs, enable_bit);
4915                 return -ENODEV;
4916         }
4917
4918         return 0;
4919 }
4920
4921 /* tp->lock is held. */
4922 static int tg3_abort_hw(struct tg3 *tp, int silent)
4923 {
4924         int i, err;
4925
4926         tg3_disable_ints(tp);
4927
4928         tp->rx_mode &= ~RX_MODE_ENABLE;
4929         tw32_f(MAC_RX_MODE, tp->rx_mode);
4930         udelay(10);
4931
4932         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4933         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4934         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4935         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4936         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4937         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4938
4939         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4940         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4941         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4942         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4943         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4944         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4945         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4946
4947         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4948         tw32_f(MAC_MODE, tp->mac_mode);
4949         udelay(40);
4950
4951         tp->tx_mode &= ~TX_MODE_ENABLE;
4952         tw32_f(MAC_TX_MODE, tp->tx_mode);
4953
4954         for (i = 0; i < MAX_WAIT_CNT; i++) {
4955                 udelay(100);
4956                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4957                         break;
4958         }
4959         if (i >= MAX_WAIT_CNT) {
4960                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4961                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4962                        tp->dev->name, tr32(MAC_TX_MODE));
4963                 err |= -ENODEV;
4964         }
4965
4966         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4967         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4968         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4969
4970         tw32(FTQ_RESET, 0xffffffff);
4971         tw32(FTQ_RESET, 0x00000000);
4972
4973         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4974         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4975
4976         if (tp->hw_status)
4977                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4978         if (tp->hw_stats)
4979                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4980
4981         return err;
4982 }
4983
4984 /* tp->lock is held. */
4985 static int tg3_nvram_lock(struct tg3 *tp)
4986 {
4987         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4988                 int i;
4989
4990                 if (tp->nvram_lock_cnt == 0) {
4991                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4992                         for (i = 0; i < 8000; i++) {
4993                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4994                                         break;
4995                                 udelay(20);
4996                         }
4997                         if (i == 8000) {
4998                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4999                                 return -ENODEV;
5000                         }
5001                 }
5002                 tp->nvram_lock_cnt++;
5003         }
5004         return 0;
5005 }
5006
5007 /* tp->lock is held. */
5008 static void tg3_nvram_unlock(struct tg3 *tp)
5009 {
5010         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5011                 if (tp->nvram_lock_cnt > 0)
5012                         tp->nvram_lock_cnt--;
5013                 if (tp->nvram_lock_cnt == 0)
5014                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5015         }
5016 }
5017
5018 /* tp->lock is held. */
5019 static void tg3_enable_nvram_access(struct tg3 *tp)
5020 {
5021         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5022             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5023                 u32 nvaccess = tr32(NVRAM_ACCESS);
5024
5025                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5026         }
5027 }
5028
5029 /* tp->lock is held. */
5030 static void tg3_disable_nvram_access(struct tg3 *tp)
5031 {
5032         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5033             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5034                 u32 nvaccess = tr32(NVRAM_ACCESS);
5035
5036                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5037         }
5038 }
5039
5040 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5041 {
5042         int i;
5043         u32 apedata;
5044
5045         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5046         if (apedata != APE_SEG_SIG_MAGIC)
5047                 return;
5048
5049         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5050         if (apedata != APE_FW_STATUS_READY)
5051                 return;
5052
5053         /* Wait for up to 1 millisecond for APE to service previous event. */
5054         for (i = 0; i < 10; i++) {
5055                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5056                         return;
5057
5058                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5059
5060                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5061                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5062                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5063
5064                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5065
5066                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5067                         break;
5068
5069                 udelay(100);
5070         }
5071
5072         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5073                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5074 }
5075
5076 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5077 {
5078         u32 event;
5079         u32 apedata;
5080
5081         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5082                 return;
5083
5084         switch (kind) {
5085                 case RESET_KIND_INIT:
5086                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5087                                         APE_HOST_SEG_SIG_MAGIC);
5088                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5089                                         APE_HOST_SEG_LEN_MAGIC);
5090                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5091                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5092                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5093                                         APE_HOST_DRIVER_ID_MAGIC);
5094                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5095                                         APE_HOST_BEHAV_NO_PHYLOCK);
5096
5097                         event = APE_EVENT_STATUS_STATE_START;
5098                         break;
5099                 case RESET_KIND_SHUTDOWN:
5100                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5101                         break;
5102                 case RESET_KIND_SUSPEND:
5103                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5104                         break;
5105                 default:
5106                         return;
5107         }
5108
5109         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5110
5111         tg3_ape_send_event(tp, event);
5112 }
5113
5114 /* tp->lock is held. */
5115 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5116 {
5117         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5118                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5119
5120         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5121                 switch (kind) {
5122                 case RESET_KIND_INIT:
5123                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5124                                       DRV_STATE_START);
5125                         break;
5126
5127                 case RESET_KIND_SHUTDOWN:
5128                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5129                                       DRV_STATE_UNLOAD);
5130                         break;
5131
5132                 case RESET_KIND_SUSPEND:
5133                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5134                                       DRV_STATE_SUSPEND);
5135                         break;
5136
5137                 default:
5138                         break;
5139                 };
5140         }
5141
5142         if (kind == RESET_KIND_INIT ||
5143             kind == RESET_KIND_SUSPEND)
5144                 tg3_ape_driver_state_change(tp, kind);
5145 }
5146
5147 /* tp->lock is held. */
5148 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5149 {
5150         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5151                 switch (kind) {
5152                 case RESET_KIND_INIT:
5153                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5154                                       DRV_STATE_START_DONE);
5155                         break;
5156
5157                 case RESET_KIND_SHUTDOWN:
5158                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5159                                       DRV_STATE_UNLOAD_DONE);
5160                         break;
5161
5162                 default:
5163                         break;
5164                 };
5165         }
5166
5167         if (kind == RESET_KIND_SHUTDOWN)
5168                 tg3_ape_driver_state_change(tp, kind);
5169 }
5170
5171 /* tp->lock is held. */
5172 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5173 {
5174         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5175                 switch (kind) {
5176                 case RESET_KIND_INIT:
5177                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5178                                       DRV_STATE_START);
5179                         break;
5180
5181                 case RESET_KIND_SHUTDOWN:
5182                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5183                                       DRV_STATE_UNLOAD);
5184                         break;
5185
5186                 case RESET_KIND_SUSPEND:
5187                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5188                                       DRV_STATE_SUSPEND);
5189                         break;
5190
5191                 default:
5192                         break;
5193                 };
5194         }
5195 }
5196
5197 static int tg3_poll_fw(struct tg3 *tp)
5198 {
5199         int i;
5200         u32 val;
5201
5202         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5203                 /* Wait up to 20ms for init done. */
5204                 for (i = 0; i < 200; i++) {
5205                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5206                                 return 0;
5207                         udelay(100);
5208                 }
5209                 return -ENODEV;
5210         }
5211
5212         /* Wait for firmware initialization to complete. */
5213         for (i = 0; i < 100000; i++) {
5214                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5215                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5216                         break;
5217                 udelay(10);
5218         }
5219
5220         /* Chip might not be fitted with firmware.  Some Sun onboard
5221          * parts are configured like that.  So don't signal the timeout
5222          * of the above loop as an error, but do report the lack of
5223          * running firmware once.
5224          */
5225         if (i >= 100000 &&
5226             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5227                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5228
5229                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5230                        tp->dev->name);
5231         }
5232
5233         return 0;
5234 }
5235
5236 /* Save PCI command register before chip reset */
5237 static void tg3_save_pci_state(struct tg3 *tp)
5238 {
5239         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5240 }
5241
5242 /* Restore PCI state after chip reset */
5243 static void tg3_restore_pci_state(struct tg3 *tp)
5244 {
5245         u32 val;
5246
5247         /* Re-enable indirect register accesses. */
5248         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5249                                tp->misc_host_ctrl);
5250
5251         /* Set MAX PCI retry to zero. */
5252         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5253         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5254             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5255                 val |= PCISTATE_RETRY_SAME_DMA;
5256         /* Allow reads and writes to the APE register and memory space. */
5257         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5258                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5259                        PCISTATE_ALLOW_APE_SHMEM_WR;
5260         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5261
5262         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5263
5264         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5265                 pcie_set_readrq(tp->pdev, 4096);
5266         else {
5267                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5268                                       tp->pci_cacheline_sz);
5269                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5270                                       tp->pci_lat_timer);
5271         }
5272
5273         /* Make sure PCI-X relaxed ordering bit is clear. */
5274         if (tp->pcix_cap) {
5275                 u16 pcix_cmd;
5276
5277                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5278                                      &pcix_cmd);
5279                 pcix_cmd &= ~PCI_X_CMD_ERO;
5280                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5281                                       pcix_cmd);
5282         }
5283
5284         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5285
5286                 /* Chip reset on 5780 will reset MSI enable bit,
5287                  * so need to restore it.
5288                  */
5289                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5290                         u16 ctrl;
5291
5292                         pci_read_config_word(tp->pdev,
5293                                              tp->msi_cap + PCI_MSI_FLAGS,
5294                                              &ctrl);
5295                         pci_write_config_word(tp->pdev,
5296                                               tp->msi_cap + PCI_MSI_FLAGS,
5297                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5298                         val = tr32(MSGINT_MODE);
5299                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5300                 }
5301         }
5302 }
5303
5304 static void tg3_stop_fw(struct tg3 *);
5305
5306 /* tp->lock is held. */
5307 static int tg3_chip_reset(struct tg3 *tp)
5308 {
5309         u32 val;
5310         void (*write_op)(struct tg3 *, u32, u32);
5311         int err;
5312
5313         tg3_nvram_lock(tp);
5314
5315         /* No matching tg3_nvram_unlock() after this because
5316          * chip reset below will undo the nvram lock.
5317          */
5318         tp->nvram_lock_cnt = 0;
5319
5320         /* GRC_MISC_CFG core clock reset will clear the memory
5321          * enable bit in PCI register 4 and the MSI enable bit
5322          * on some chips, so we save relevant registers here.
5323          */
5324         tg3_save_pci_state(tp);
5325
5326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5327             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5328             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5329             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5331                 tw32(GRC_FASTBOOT_PC, 0);
5332
5333         /*
5334          * We must avoid the readl() that normally takes place.
5335          * It locks machines, causes machine checks, and other
5336          * fun things.  So, temporarily disable the 5701
5337          * hardware workaround, while we do the reset.
5338          */
5339         write_op = tp->write32;
5340         if (write_op == tg3_write_flush_reg32)
5341                 tp->write32 = tg3_write32;
5342
5343         /* Prevent the irq handler from reading or writing PCI registers
5344          * during chip reset when the memory enable bit in the PCI command
5345          * register may be cleared.  The chip does not generate interrupt
5346          * at this time, but the irq handler may still be called due to irq
5347          * sharing or irqpoll.
5348          */
5349         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5350         if (tp->hw_status) {
5351                 tp->hw_status->status = 0;
5352                 tp->hw_status->status_tag = 0;
5353         }
5354         tp->last_tag = 0;
5355         smp_mb();
5356         synchronize_irq(tp->pdev->irq);
5357
5358         /* do the reset */
5359         val = GRC_MISC_CFG_CORECLK_RESET;
5360
5361         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5362                 if (tr32(0x7e2c) == 0x60) {
5363                         tw32(0x7e2c, 0x20);
5364                 }
5365                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5366                         tw32(GRC_MISC_CFG, (1 << 29));
5367                         val |= (1 << 29);
5368                 }
5369         }
5370
5371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5372                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5373                 tw32(GRC_VCPU_EXT_CTRL,
5374                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5375         }
5376
5377         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5378                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5379         tw32(GRC_MISC_CFG, val);
5380
5381         /* restore 5701 hardware bug workaround write method */
5382         tp->write32 = write_op;
5383
5384         /* Unfortunately, we have to delay before the PCI read back.
5385          * Some 575X chips even will not respond to a PCI cfg access
5386          * when the reset command is given to the chip.
5387          *
5388          * How do these hardware designers expect things to work
5389          * properly if the PCI write is posted for a long period
5390          * of time?  It is always necessary to have some method by
5391          * which a register read back can occur to push the write
5392          * out which does the reset.
5393          *
5394          * For most tg3 variants the trick below was working.
5395          * Ho hum...
5396          */
5397         udelay(120);
5398
5399         /* Flush PCI posted writes.  The normal MMIO registers
5400          * are inaccessible at this time so this is the only
5401          * way to make this reliably (actually, this is no longer
5402          * the case, see above).  I tried to use indirect
5403          * register read/write but this upset some 5701 variants.
5404          */
5405         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5406
5407         udelay(120);
5408
5409         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5410                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5411                         int i;
5412                         u32 cfg_val;
5413
5414                         /* Wait for link training to complete.  */
5415                         for (i = 0; i < 5000; i++)
5416                                 udelay(100);
5417
5418                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5419                         pci_write_config_dword(tp->pdev, 0xc4,
5420                                                cfg_val | (1 << 15));
5421                 }
5422                 /* Set PCIE max payload size and clear error status.  */
5423                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5424         }
5425
5426         tg3_restore_pci_state(tp);
5427
5428         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5429
5430         val = 0;
5431         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5432                 val = tr32(MEMARB_MODE);
5433         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5434
5435         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5436                 tg3_stop_fw(tp);
5437                 tw32(0x5000, 0x400);
5438         }
5439
5440         tw32(GRC_MODE, tp->grc_mode);
5441
5442         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5443                 val = tr32(0xc4);
5444
5445                 tw32(0xc4, val | (1 << 15));
5446         }
5447
5448         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5450                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5451                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5452                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5453                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5454         }
5455
5456         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5457                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5458                 tw32_f(MAC_MODE, tp->mac_mode);
5459         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5460                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5461                 tw32_f(MAC_MODE, tp->mac_mode);
5462         } else
5463                 tw32_f(MAC_MODE, 0);
5464         udelay(40);
5465
5466         err = tg3_poll_fw(tp);
5467         if (err)
5468                 return err;
5469
5470         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5471             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5472                 val = tr32(0x7c00);
5473
5474                 tw32(0x7c00, val | (1 << 25));
5475         }
5476
5477         /* Reprobe ASF enable state.  */
5478         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5479         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5480         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5481         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5482                 u32 nic_cfg;
5483
5484                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5485                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5486                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5487                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5488                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5489                 }
5490         }
5491
5492         return 0;
5493 }
5494
5495 /* tp->lock is held. */
5496 static void tg3_stop_fw(struct tg3 *tp)
5497 {
5498         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5499            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5500                 u32 val;
5501                 int i;
5502
5503                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5504                 val = tr32(GRC_RX_CPU_EVENT);
5505                 val |= (1 << 14);
5506                 tw32(GRC_RX_CPU_EVENT, val);
5507
5508                 /* Wait for RX cpu to ACK the event.  */
5509                 for (i = 0; i < 100; i++) {
5510                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5511                                 break;
5512                         udelay(1);
5513                 }
5514         }
5515 }
5516
5517 /* tp->lock is held. */
5518 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5519 {
5520         int err;
5521
5522         tg3_stop_fw(tp);
5523
5524         tg3_write_sig_pre_reset(tp, kind);
5525
5526         tg3_abort_hw(tp, silent);
5527         err = tg3_chip_reset(tp);
5528
5529         tg3_write_sig_legacy(tp, kind);
5530         tg3_write_sig_post_reset(tp, kind);
5531
5532         if (err)
5533                 return err;
5534
5535         return 0;
5536 }
5537
5538 #define TG3_FW_RELEASE_MAJOR    0x0
5539 #define TG3_FW_RELASE_MINOR     0x0
5540 #define TG3_FW_RELEASE_FIX      0x0
5541 #define TG3_FW_START_ADDR       0x08000000
5542 #define TG3_FW_TEXT_ADDR        0x08000000
5543 #define TG3_FW_TEXT_LEN         0x9c0
5544 #define TG3_FW_RODATA_ADDR      0x080009c0
5545 #define TG3_FW_RODATA_LEN       0x60
5546 #define TG3_FW_DATA_ADDR        0x08000a40
5547 #define TG3_FW_DATA_LEN         0x20
5548 #define TG3_FW_SBSS_ADDR        0x08000a60
5549 #define TG3_FW_SBSS_LEN         0xc
5550 #define TG3_FW_BSS_ADDR         0x08000a70
5551 #define TG3_FW_BSS_LEN          0x10
5552
5553 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5554         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5555         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5556         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5557         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5558         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5559         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5560         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5561         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5562         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5563         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5564         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5565         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5566         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5567         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5568         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5569         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5570         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5571         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5572         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5573         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5574         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5575         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5576         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5577         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5578         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5579         0, 0, 0, 0, 0, 0,
5580         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5581         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5582         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5583         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5584         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5585         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5586         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5587         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5588         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5589         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5590         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5591         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5592         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5593         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5594         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5595         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5596         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5597         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5598         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5599         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5600         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5601         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5602         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5603         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5604         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5605         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5606         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5607         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5608         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5609         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5610         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5611         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5612         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5613         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5614         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5615         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5616         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5617         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5618         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5619         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5620         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5621         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5622         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5623         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5624         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5625         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5626         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5627         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5628         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5629         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5630         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5631         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5632         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5633         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5634         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5635         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5636         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5637         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5638         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5639         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5640         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5641         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5642         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5643         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5644         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5645 };
5646
5647 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5648         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5649         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5650         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5651         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5652         0x00000000
5653 };
5654
5655 #if 0 /* All zeros, don't eat up space with it. */
5656 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5657         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5658         0x00000000, 0x00000000, 0x00000000, 0x00000000
5659 };
5660 #endif
5661
5662 #define RX_CPU_SCRATCH_BASE     0x30000
5663 #define RX_CPU_SCRATCH_SIZE     0x04000
5664 #define TX_CPU_SCRATCH_BASE     0x34000
5665 #define TX_CPU_SCRATCH_SIZE     0x04000
5666
5667 /* tp->lock is held. */
5668 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5669 {
5670         int i;
5671
5672         BUG_ON(offset == TX_CPU_BASE &&
5673             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5674
5675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5676                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5677
5678                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5679                 return 0;
5680         }
5681         if (offset == RX_CPU_BASE) {
5682                 for (i = 0; i < 10000; i++) {
5683                         tw32(offset + CPU_STATE, 0xffffffff);
5684                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5685                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5686                                 break;
5687                 }
5688
5689                 tw32(offset + CPU_STATE, 0xffffffff);
5690                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5691                 udelay(10);
5692         } else {
5693                 for (i = 0; i < 10000; i++) {
5694                         tw32(offset + CPU_STATE, 0xffffffff);
5695                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5696                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5697                                 break;
5698                 }
5699         }
5700
5701         if (i >= 10000) {
5702                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5703                        "and %s CPU\n",
5704                        tp->dev->name,
5705                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5706                 return -ENODEV;
5707         }
5708
5709         /* Clear firmware's nvram arbitration. */
5710         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5711                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5712         return 0;
5713 }
5714
5715 struct fw_info {
5716         unsigned int text_base;
5717         unsigned int text_len;
5718         const u32 *text_data;
5719         unsigned int rodata_base;
5720         unsigned int rodata_len;
5721         const u32 *rodata_data;
5722         unsigned int data_base;
5723         unsigned int data_len;
5724         const u32 *data_data;
5725 };
5726
5727 /* tp->lock is held. */
5728 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5729                                  int cpu_scratch_size, struct fw_info *info)
5730 {
5731         int err, lock_err, i;
5732         void (*write_op)(struct tg3 *, u32, u32);
5733
5734         if (cpu_base == TX_CPU_BASE &&
5735             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5736                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5737                        "TX cpu firmware on %s which is 5705.\n",
5738                        tp->dev->name);
5739                 return -EINVAL;
5740         }
5741
5742         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5743                 write_op = tg3_write_mem;
5744         else
5745                 write_op = tg3_write_indirect_reg32;
5746
5747         /* It is possible that bootcode is still loading at this point.
5748          * Get the nvram lock first before halting the cpu.
5749          */
5750         lock_err = tg3_nvram_lock(tp);
5751         err = tg3_halt_cpu(tp, cpu_base);
5752         if (!lock_err)
5753                 tg3_nvram_unlock(tp);
5754         if (err)
5755                 goto out;
5756
5757         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5758                 write_op(tp, cpu_scratch_base + i, 0);
5759         tw32(cpu_base + CPU_STATE, 0xffffffff);
5760         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5761         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5762                 write_op(tp, (cpu_scratch_base +
5763                               (info->text_base & 0xffff) +
5764                               (i * sizeof(u32))),
5765                          (info->text_data ?
5766                           info->text_data[i] : 0));
5767         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5768                 write_op(tp, (cpu_scratch_base +
5769                               (info->rodata_base & 0xffff) +
5770                               (i * sizeof(u32))),
5771                          (info->rodata_data ?
5772                           info->rodata_data[i] : 0));
5773         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5774                 write_op(tp, (cpu_scratch_base +
5775                               (info->data_base & 0xffff) +
5776                               (i * sizeof(u32))),
5777                          (info->data_data ?
5778                           info->data_data[i] : 0));
5779
5780         err = 0;
5781
5782 out:
5783         return err;
5784 }
5785
5786 /* tp->lock is held. */
5787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5788 {
5789         struct fw_info info;
5790         int err, i;
5791
5792         info.text_base = TG3_FW_TEXT_ADDR;
5793         info.text_len = TG3_FW_TEXT_LEN;
5794         info.text_data = &tg3FwText[0];
5795         info.rodata_base = TG3_FW_RODATA_ADDR;
5796         info.rodata_len = TG3_FW_RODATA_LEN;
5797         info.rodata_data = &tg3FwRodata[0];
5798         info.data_base = TG3_FW_DATA_ADDR;
5799         info.data_len = TG3_FW_DATA_LEN;
5800         info.data_data = NULL;
5801
5802         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5803                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5804                                     &info);
5805         if (err)
5806                 return err;
5807
5808         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5809                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5810                                     &info);
5811         if (err)
5812                 return err;
5813
5814         /* Now startup only the RX cpu. */
5815         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5816         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5817
5818         for (i = 0; i < 5; i++) {
5819                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5820                         break;
5821                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5822                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5823                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5824                 udelay(1000);
5825         }
5826         if (i >= 5) {
5827                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5828                        "to set RX CPU PC, is %08x should be %08x\n",
5829                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5830                        TG3_FW_TEXT_ADDR);
5831                 return -ENODEV;
5832         }
5833         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5834         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5835
5836         return 0;
5837 }
5838
5839
5840 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5841 #define TG3_TSO_FW_RELASE_MINOR         0x6
5842 #define TG3_TSO_FW_RELEASE_FIX          0x0
5843 #define TG3_TSO_FW_START_ADDR           0x08000000
5844 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5845 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5846 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5847 #define TG3_TSO_FW_RODATA_LEN           0x60
5848 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5849 #define TG3_TSO_FW_DATA_LEN             0x30
5850 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5851 #define TG3_TSO_FW_SBSS_LEN             0x2c
5852 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5853 #define TG3_TSO_FW_BSS_LEN              0x894
5854
5855 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5856         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5857         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5858         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5859         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5860         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5861         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5862         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5863         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5864         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5865         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5866         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5867         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5868         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5869         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5870         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5871         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5872         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5873         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5874         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5875         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5876         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5877         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5878         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5879         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5880         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5881         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5882         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5883         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5884         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5885         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5886         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5887         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5888         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5889         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5890         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5891         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5892         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5893         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5894         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5895         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5896         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5897         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5898         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5899         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5900         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5901         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5902         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5903         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5904         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5905         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5906         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5907         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5908         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5909         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5910         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5911         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5912         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5913         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5914         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5915         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5916         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5917         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5918         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5919         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5920         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5921         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5922         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5923         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5924         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5925         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5926         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5927         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5928         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5929         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5930         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5931         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5932         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5933         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5934         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5935         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5936         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5937         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5938         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5939         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5940         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5941         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5942         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5943         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5944         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5945         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5946         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5947         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5948         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5949         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5950         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5951         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5952         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5953         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5954         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5955         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5956         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5957         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5958         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5959         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5960         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5961         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5962         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5963         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5964         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5965         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5966         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5967         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5968         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5969         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5970         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5971         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5972         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5973         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5974         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5975         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5976         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5977         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5978         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5979         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5980         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5981         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5982         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5983         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5984         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5985         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5986         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5987         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5988         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5989         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5990         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5991         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5992         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5993         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5994         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5995         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5996         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5997         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5998         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5999         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6000         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6001         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6002         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6003         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6004         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6005         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6006         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6007         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6008         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6009         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6010         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6011         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6012         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6013         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6014         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6015         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6016         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6017         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6018         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6019         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6020         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6021         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6022         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6023         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6024         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6025         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6026         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6027         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6028         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6029         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6030         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6031         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6032         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6033         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6034         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6035         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6036         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6037         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6038         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6039         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6040         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6041         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6042         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6043         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6044         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6045         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6046         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6047         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6048         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6049         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6050         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6051         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6052         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6053         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6054         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6055         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6056         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6057         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6058         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6059         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6060         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6061         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6062         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6063         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6064         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6065         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6066         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6067         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6068         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6069         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6070         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6071         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6072         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6073         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6074         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6075         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6076         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6077         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6078         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6079         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6080         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6081         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6082         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6083         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6084         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6085         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6086         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6087         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6088         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6089         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6090         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6091         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6092         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6093         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6094         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6095         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6096         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6097         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6098         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6099         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6100         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6101         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6102         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6103         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6104         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6105         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6106         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6107         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6108         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6109         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6110         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6111         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6112         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6113         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6114         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6115         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6116         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6117         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6118         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6119         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6120         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6121         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6122         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6123         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6124         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6125         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6126         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6127         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6128         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6129         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6130         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6131         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6132         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6133         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6134         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6135         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6136         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6137         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6138         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6139         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6140 };
6141
6142 static const u32 tg3TsoFwRodata[] = {
6143         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6144         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6145         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6146         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6147         0x00000000,
6148 };
6149
6150 static const u32 tg3TsoFwData[] = {
6151         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6152         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6153         0x00000000,
6154 };
6155
6156 /* 5705 needs a special version of the TSO firmware.  */
6157 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6158 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6159 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6160 #define TG3_TSO5_FW_START_ADDR          0x00010000
6161 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6162 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6163 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6164 #define TG3_TSO5_FW_RODATA_LEN          0x50
6165 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6166 #define TG3_TSO5_FW_DATA_LEN            0x20
6167 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6168 #define TG3_TSO5_FW_SBSS_LEN            0x28
6169 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6170 #define TG3_TSO5_FW_BSS_LEN             0x88
6171
6172 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6173         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6174         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6175         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6176         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6177         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6178         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6179         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6180         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6181         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6182         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6183         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6184         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6185         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6186         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6187         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6188         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6189         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6190         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6191         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6192         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6193         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6194         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6195         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6196         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6197         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6198         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6199         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6200         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6201         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6202         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6203         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6204         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6205         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6206         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6207         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6208         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6209         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6210         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6211         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6212         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6213         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6214         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6215         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6216         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6217         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6218         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6219         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6220         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6221         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6222         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6223         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6224         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6225         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6226         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6227         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6228         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6229         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6230         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6231         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6232         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6233         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6234         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6235         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6236         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6237         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6238         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6239         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6240         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6241         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6242         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6243         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6244         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6245         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6246         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6247         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6248         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6249         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6250         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6251         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6252         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6253         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6254         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6255         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6256         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6257         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6258         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6259         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6260         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6261         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6262         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6263         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6264         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6265         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6266         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6267         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6268         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6269         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6270         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6271         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6272         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6273         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6274         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6275         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6276         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6277         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6278         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6279         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6280         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6281         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6282         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6283         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6284         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6285         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6286         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6287         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6288         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6289         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6290         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6291         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6292         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6293         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6294         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6295         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6296         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6297         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6298         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6299         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6300         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6301         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6302         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6303         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6304         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6305         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6306         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6307         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6308         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6309         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6310         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6311         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6312         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6313         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6314         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6315         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6316         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6317         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6318         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6319         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6320         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6321         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6322         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6323         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6324         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6325         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6326         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6327         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6328         0x00000000, 0x00000000, 0x00000000,
6329 };
6330
6331 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6332         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6333         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6334         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6335         0x00000000, 0x00000000, 0x00000000,
6336 };
6337
6338 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6339         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6340         0x00000000, 0x00000000, 0x00000000,
6341 };
6342
6343 /* tp->lock is held. */
6344 static int tg3_load_tso_firmware(struct tg3 *tp)
6345 {
6346         struct fw_info info;
6347         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6348         int err, i;
6349
6350         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6351                 return 0;
6352
6353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6354                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6355                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6356                 info.text_data = &tg3Tso5FwText[0];
6357                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6358                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6359                 info.rodata_data = &tg3Tso5FwRodata[0];
6360                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6361                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6362                 info.data_data = &tg3Tso5FwData[0];
6363                 cpu_base = RX_CPU_BASE;
6364                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6365                 cpu_scratch_size = (info.text_len +
6366                                     info.rodata_len +
6367                                     info.data_len +
6368                                     TG3_TSO5_FW_SBSS_LEN +
6369                                     TG3_TSO5_FW_BSS_LEN);
6370         } else {
6371                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6372                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6373                 info.text_data = &tg3TsoFwText[0];
6374                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6375                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6376                 info.rodata_data = &tg3TsoFwRodata[0];
6377                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6378                 info.data_len = TG3_TSO_FW_DATA_LEN;
6379                 info.data_data = &tg3TsoFwData[0];
6380                 cpu_base = TX_CPU_BASE;
6381                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6382                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6383         }
6384
6385         err = tg3_load_firmware_cpu(tp, cpu_base,
6386                                     cpu_scratch_base, cpu_scratch_size,
6387                                     &info);
6388         if (err)
6389                 return err;
6390
6391         /* Now startup the cpu. */
6392         tw32(cpu_base + CPU_STATE, 0xffffffff);
6393         tw32_f(cpu_base + CPU_PC,    info.text_base);
6394
6395         for (i = 0; i < 5; i++) {
6396                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6397                         break;
6398                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6399                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6400                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6401                 udelay(1000);
6402         }
6403         if (i >= 5) {
6404                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6405                        "to set CPU PC, is %08x should be %08x\n",
6406                        tp->dev->name, tr32(cpu_base + CPU_PC),
6407                        info.text_base);
6408                 return -ENODEV;
6409         }
6410         tw32(cpu_base + CPU_STATE, 0xffffffff);
6411         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6412         return 0;
6413 }
6414
6415
6416 /* tp->lock is held. */
6417 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6418 {
6419         u32 addr_high, addr_low;
6420         int i;
6421
6422         addr_high = ((tp->dev->dev_addr[0] << 8) |
6423                      tp->dev->dev_addr[1]);
6424         addr_low = ((tp->dev->dev_addr[2] << 24) |
6425                     (tp->dev->dev_addr[3] << 16) |
6426                     (tp->dev->dev_addr[4] <<  8) |
6427                     (tp->dev->dev_addr[5] <<  0));
6428         for (i = 0; i < 4; i++) {
6429                 if (i == 1 && skip_mac_1)
6430                         continue;
6431                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6432                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6433         }
6434
6435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6436             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6437                 for (i = 0; i < 12; i++) {
6438                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6439                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6440                 }
6441         }
6442
6443         addr_high = (tp->dev->dev_addr[0] +
6444                      tp->dev->dev_addr[1] +
6445                      tp->dev->dev_addr[2] +
6446                      tp->dev->dev_addr[3] +
6447                      tp->dev->dev_addr[4] +
6448                      tp->dev->dev_addr[5]) &
6449                 TX_BACKOFF_SEED_MASK;
6450         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6451 }
6452
6453 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6454 {
6455         struct tg3 *tp = netdev_priv(dev);
6456         struct sockaddr *addr = p;
6457         int err = 0, skip_mac_1 = 0;
6458
6459         if (!is_valid_ether_addr(addr->sa_data))
6460                 return -EINVAL;
6461
6462         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6463
6464         if (!netif_running(dev))
6465                 return 0;
6466
6467         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6468                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6469
6470                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6471                 addr0_low = tr32(MAC_ADDR_0_LOW);
6472                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6473                 addr1_low = tr32(MAC_ADDR_1_LOW);
6474
6475                 /* Skip MAC addr 1 if ASF is using it. */
6476                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6477                     !(addr1_high == 0 && addr1_low == 0))
6478                         skip_mac_1 = 1;
6479         }
6480         spin_lock_bh(&tp->lock);
6481         __tg3_set_mac_addr(tp, skip_mac_1);
6482         spin_unlock_bh(&tp->lock);
6483
6484         return err;
6485 }
6486
6487 /* tp->lock is held. */
6488 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6489                            dma_addr_t mapping, u32 maxlen_flags,
6490                            u32 nic_addr)
6491 {
6492         tg3_write_mem(tp,
6493                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6494                       ((u64) mapping >> 32));
6495         tg3_write_mem(tp,
6496                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6497                       ((u64) mapping & 0xffffffff));
6498         tg3_write_mem(tp,
6499                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6500                        maxlen_flags);
6501
6502         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6503                 tg3_write_mem(tp,
6504                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6505                               nic_addr);
6506 }
6507
6508 static void __tg3_set_rx_mode(struct net_device *);
6509 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6510 {
6511         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6512         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6513         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6514         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6515         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6516                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6517                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6518         }
6519         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6520         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6521         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6522                 u32 val = ec->stats_block_coalesce_usecs;
6523
6524                 if (!netif_carrier_ok(tp->dev))
6525                         val = 0;
6526
6527                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6528         }
6529 }
6530
6531 /* tp->lock is held. */
6532 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6533 {
6534         u32 val, rdmac_mode;
6535         int i, err, limit;
6536
6537         tg3_disable_ints(tp);
6538
6539         tg3_stop_fw(tp);
6540
6541         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6542
6543         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6544                 tg3_abort_hw(tp, 1);
6545         }
6546
6547         if (reset_phy)
6548                 tg3_phy_reset(tp);
6549
6550         err = tg3_chip_reset(tp);
6551         if (err)
6552                 return err;
6553
6554         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6555
6556         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6557             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6558                 val = tr32(TG3_CPMU_CTRL);
6559                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6560                 tw32(TG3_CPMU_CTRL, val);
6561
6562                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6563                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6564                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6565                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6566
6567                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6568                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6569                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6570                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6571
6572                 val = tr32(TG3_CPMU_HST_ACC);
6573                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6574                 val |= CPMU_HST_ACC_MACCLK_6_25;
6575                 tw32(TG3_CPMU_HST_ACC, val);
6576         }
6577
6578         /* This works around an issue with Athlon chipsets on
6579          * B3 tigon3 silicon.  This bit has no effect on any
6580          * other revision.  But do not set this on PCI Express
6581          * chips and don't even touch the clocks if the CPMU is present.
6582          */
6583         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6584                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6585                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6586                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6587         }
6588
6589         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6590             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6591                 val = tr32(TG3PCI_PCISTATE);
6592                 val |= PCISTATE_RETRY_SAME_DMA;
6593                 tw32(TG3PCI_PCISTATE, val);
6594         }
6595
6596         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6597                 /* Allow reads and writes to the
6598                  * APE register and memory space.
6599                  */
6600                 val = tr32(TG3PCI_PCISTATE);
6601                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6602                        PCISTATE_ALLOW_APE_SHMEM_WR;
6603                 tw32(TG3PCI_PCISTATE, val);
6604         }
6605
6606         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6607                 /* Enable some hw fixes.  */
6608                 val = tr32(TG3PCI_MSI_DATA);
6609                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6610                 tw32(TG3PCI_MSI_DATA, val);
6611         }
6612
6613         /* Descriptor ring init may make accesses to the
6614          * NIC SRAM area to setup the TX descriptors, so we
6615          * can only do this after the hardware has been
6616          * successfully reset.
6617          */
6618         err = tg3_init_rings(tp);
6619         if (err)
6620                 return err;
6621
6622         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6623             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6624                 /* This value is determined during the probe time DMA
6625                  * engine test, tg3_test_dma.
6626                  */
6627                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6628         }
6629
6630         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6631                           GRC_MODE_4X_NIC_SEND_RINGS |
6632                           GRC_MODE_NO_TX_PHDR_CSUM |
6633                           GRC_MODE_NO_RX_PHDR_CSUM);
6634         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6635
6636         /* Pseudo-header checksum is done by hardware logic and not
6637          * the offload processers, so make the chip do the pseudo-
6638          * header checksums on receive.  For transmit it is more
6639          * convenient to do the pseudo-header checksum in software
6640          * as Linux does that on transmit for us in all cases.
6641          */
6642         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6643
6644         tw32(GRC_MODE,
6645              tp->grc_mode |
6646              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6647
6648         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6649         val = tr32(GRC_MISC_CFG);
6650         val &= ~0xff;
6651         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6652         tw32(GRC_MISC_CFG, val);
6653
6654         /* Initialize MBUF/DESC pool. */
6655         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6656                 /* Do nothing.  */
6657         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6658                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6659                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6660                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6661                 else
6662                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6663                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6664                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6665         }
6666         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6667                 int fw_len;
6668
6669                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6670                           TG3_TSO5_FW_RODATA_LEN +
6671                           TG3_TSO5_FW_DATA_LEN +
6672                           TG3_TSO5_FW_SBSS_LEN +
6673                           TG3_TSO5_FW_BSS_LEN);
6674                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6675                 tw32(BUFMGR_MB_POOL_ADDR,
6676                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6677                 tw32(BUFMGR_MB_POOL_SIZE,
6678                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6679         }
6680
6681         if (tp->dev->mtu <= ETH_DATA_LEN) {
6682                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6683                      tp->bufmgr_config.mbuf_read_dma_low_water);
6684                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6685                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6686                 tw32(BUFMGR_MB_HIGH_WATER,
6687                      tp->bufmgr_config.mbuf_high_water);
6688         } else {
6689                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6690                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6691                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6692                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6693                 tw32(BUFMGR_MB_HIGH_WATER,
6694                      tp->bufmgr_config.mbuf_high_water_jumbo);
6695         }
6696         tw32(BUFMGR_DMA_LOW_WATER,
6697              tp->bufmgr_config.dma_low_water);
6698         tw32(BUFMGR_DMA_HIGH_WATER,
6699              tp->bufmgr_config.dma_high_water);
6700
6701         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6702         for (i = 0; i < 2000; i++) {
6703                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6704                         break;
6705                 udelay(10);
6706         }
6707         if (i >= 2000) {
6708                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6709                        tp->dev->name);
6710                 return -ENODEV;
6711         }
6712
6713         /* Setup replenish threshold. */
6714         val = tp->rx_pending / 8;
6715         if (val == 0)
6716                 val = 1;
6717         else if (val > tp->rx_std_max_post)
6718                 val = tp->rx_std_max_post;
6719         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6720                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6721                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6722
6723                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6724                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6725         }
6726
6727         tw32(RCVBDI_STD_THRESH, val);
6728
6729         /* Initialize TG3_BDINFO's at:
6730          *  RCVDBDI_STD_BD:     standard eth size rx ring
6731          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6732          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6733          *
6734          * like so:
6735          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6736          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6737          *                              ring attribute flags
6738          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6739          *
6740          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6741          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6742          *
6743          * The size of each ring is fixed in the firmware, but the location is
6744          * configurable.
6745          */
6746         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6747              ((u64) tp->rx_std_mapping >> 32));
6748         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6749              ((u64) tp->rx_std_mapping & 0xffffffff));
6750         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6751              NIC_SRAM_RX_BUFFER_DESC);
6752
6753         /* Don't even try to program the JUMBO/MINI buffer descriptor
6754          * configs on 5705.
6755          */
6756         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6757                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6758                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6759         } else {
6760                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6761                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6762
6763                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6764                      BDINFO_FLAGS_DISABLED);
6765
6766                 /* Setup replenish threshold. */
6767                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6768
6769                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6770                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6771                              ((u64) tp->rx_jumbo_mapping >> 32));
6772                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6773                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6774                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6775                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6776                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6777                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6778                 } else {
6779                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6780                              BDINFO_FLAGS_DISABLED);
6781                 }
6782
6783         }
6784
6785         /* There is only one send ring on 5705/5750, no need to explicitly
6786          * disable the others.
6787          */
6788         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6789                 /* Clear out send RCB ring in SRAM. */
6790                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6791                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6792                                       BDINFO_FLAGS_DISABLED);
6793         }
6794
6795         tp->tx_prod = 0;
6796         tp->tx_cons = 0;
6797         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6798         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6799
6800         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6801                        tp->tx_desc_mapping,
6802                        (TG3_TX_RING_SIZE <<
6803                         BDINFO_FLAGS_MAXLEN_SHIFT),
6804                        NIC_SRAM_TX_BUFFER_DESC);
6805
6806         /* There is only one receive return ring on 5705/5750, no need
6807          * to explicitly disable the others.
6808          */
6809         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6810                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6811                      i += TG3_BDINFO_SIZE) {
6812                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6813                                       BDINFO_FLAGS_DISABLED);
6814                 }
6815         }
6816
6817         tp->rx_rcb_ptr = 0;
6818         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6819
6820         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6821                        tp->rx_rcb_mapping,
6822                        (TG3_RX_RCB_RING_SIZE(tp) <<
6823                         BDINFO_FLAGS_MAXLEN_SHIFT),
6824                        0);
6825
6826         tp->rx_std_ptr = tp->rx_pending;
6827         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6828                      tp->rx_std_ptr);
6829
6830         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6831                                                 tp->rx_jumbo_pending : 0;
6832         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6833                      tp->rx_jumbo_ptr);
6834
6835         /* Initialize MAC address and backoff seed. */
6836         __tg3_set_mac_addr(tp, 0);
6837
6838         /* MTU + ethernet header + FCS + optional VLAN tag */
6839         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6840
6841         /* The slot time is changed by tg3_setup_phy if we
6842          * run at gigabit with half duplex.
6843          */
6844         tw32(MAC_TX_LENGTHS,
6845              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6846              (6 << TX_LENGTHS_IPG_SHIFT) |
6847              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6848
6849         /* Receive rules. */
6850         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6851         tw32(RCVLPC_CONFIG, 0x0181);
6852
6853         /* Calculate RDMAC_MODE setting early, we need it to determine
6854          * the RCVLPC_STATE_ENABLE mask.
6855          */
6856         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6857                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6858                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6859                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6860                       RDMAC_MODE_LNGREAD_ENAB);
6861
6862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6863                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6864                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6865                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6866
6867         /* If statement applies to 5705 and 5750 PCI devices only */
6868         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6869              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6870             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6871                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6873                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6874                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6875                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6876                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6877                 }
6878         }
6879
6880         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6881                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6882
6883         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6884                 rdmac_mode |= (1 << 27);
6885
6886         /* Receive/send statistics. */
6887         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6888                 val = tr32(RCVLPC_STATS_ENABLE);
6889                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6890                 tw32(RCVLPC_STATS_ENABLE, val);
6891         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6892                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6893                 val = tr32(RCVLPC_STATS_ENABLE);
6894                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6895                 tw32(RCVLPC_STATS_ENABLE, val);
6896         } else {
6897                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6898         }
6899         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6900         tw32(SNDDATAI_STATSENAB, 0xffffff);
6901         tw32(SNDDATAI_STATSCTRL,
6902              (SNDDATAI_SCTRL_ENABLE |
6903               SNDDATAI_SCTRL_FASTUPD));
6904
6905         /* Setup host coalescing engine. */
6906         tw32(HOSTCC_MODE, 0);
6907         for (i = 0; i < 2000; i++) {
6908                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6909                         break;
6910                 udelay(10);
6911         }
6912
6913         __tg3_set_coalesce(tp, &tp->coal);
6914
6915         /* set status block DMA address */
6916         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6917              ((u64) tp->status_mapping >> 32));
6918         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6919              ((u64) tp->status_mapping & 0xffffffff));
6920
6921         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6922                 /* Status/statistics block address.  See tg3_timer,
6923                  * the tg3_periodic_fetch_stats call there, and
6924                  * tg3_get_stats to see how this works for 5705/5750 chips.
6925                  */
6926                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6927                      ((u64) tp->stats_mapping >> 32));
6928                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6929                      ((u64) tp->stats_mapping & 0xffffffff));
6930                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6931                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6932         }
6933
6934         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6935
6936         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6937         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6938         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6939                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6940
6941         /* Clear statistics/status block in chip, and status block in ram. */
6942         for (i = NIC_SRAM_STATS_BLK;
6943              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6944              i += sizeof(u32)) {
6945                 tg3_write_mem(tp, i, 0);
6946                 udelay(40);
6947         }
6948         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6949
6950         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6951                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6952                 /* reset to prevent losing 1st rx packet intermittently */
6953                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6954                 udelay(10);
6955         }
6956
6957         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6958                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6959         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6960             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6961             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6962                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6963         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6964         udelay(40);
6965
6966         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6967          * If TG3_FLG2_IS_NIC is zero, we should read the
6968          * register to preserve the GPIO settings for LOMs. The GPIOs,
6969          * whether used as inputs or outputs, are set by boot code after
6970          * reset.
6971          */
6972         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6973                 u32 gpio_mask;
6974
6975                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6976                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6977                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6978
6979                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6980                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6981                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6982
6983                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6984                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6985
6986                 tp->grc_local_ctrl &= ~gpio_mask;
6987                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6988
6989                 /* GPIO1 must be driven high for eeprom write protect */
6990                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6991                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6992                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6993         }
6994         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6995         udelay(100);
6996
6997         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6998         tp->last_tag = 0;
6999
7000         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7001                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7002                 udelay(40);
7003         }
7004
7005         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7006                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7007                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7008                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7009                WDMAC_MODE_LNGREAD_ENAB);
7010
7011         /* If statement applies to 5705 and 5750 PCI devices only */
7012         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7013              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7014             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7015                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7016                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7017                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7018                         /* nothing */
7019                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7020                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7021                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7022                         val |= WDMAC_MODE_RX_ACCEL;
7023                 }
7024         }
7025
7026         /* Enable host coalescing bug fix */
7027         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7028             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7029             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7030             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7031                 val |= (1 << 29);
7032
7033         tw32_f(WDMAC_MODE, val);
7034         udelay(40);
7035
7036         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7037                 u16 pcix_cmd;
7038
7039                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7040                                      &pcix_cmd);
7041                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7042                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7043                         pcix_cmd |= PCI_X_CMD_READ_2K;
7044                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7045                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7046                         pcix_cmd |= PCI_X_CMD_READ_2K;
7047                 }
7048                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7049                                       pcix_cmd);
7050         }
7051
7052         tw32_f(RDMAC_MODE, rdmac_mode);
7053         udelay(40);
7054
7055         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7056         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7057                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7058
7059         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7060                 tw32(SNDDATAC_MODE,
7061                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7062         else
7063                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7064
7065         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7066         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7067         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7068         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7069         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7070                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7071         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7072         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7073
7074         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7075                 err = tg3_load_5701_a0_firmware_fix(tp);
7076                 if (err)
7077                         return err;
7078         }
7079
7080         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7081                 err = tg3_load_tso_firmware(tp);
7082                 if (err)
7083                         return err;
7084         }
7085
7086         tp->tx_mode = TX_MODE_ENABLE;
7087         tw32_f(MAC_TX_MODE, tp->tx_mode);
7088         udelay(100);
7089
7090         tp->rx_mode = RX_MODE_ENABLE;
7091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7093                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7094
7095         tw32_f(MAC_RX_MODE, tp->rx_mode);
7096         udelay(10);
7097
7098         if (tp->link_config.phy_is_low_power) {
7099                 tp->link_config.phy_is_low_power = 0;
7100                 tp->link_config.speed = tp->link_config.orig_speed;
7101                 tp->link_config.duplex = tp->link_config.orig_duplex;
7102                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7103         }
7104
7105         tp->mi_mode = MAC_MI_MODE_BASE;
7106         tw32_f(MAC_MI_MODE, tp->mi_mode);
7107         udelay(80);
7108
7109         tw32(MAC_LED_CTRL, tp->led_ctrl);
7110
7111         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7112         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7113                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7114                 udelay(10);
7115         }
7116         tw32_f(MAC_RX_MODE, tp->rx_mode);
7117         udelay(10);
7118
7119         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7120                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7121                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7122                         /* Set drive transmission level to 1.2V  */
7123                         /* only if the signal pre-emphasis bit is not set  */
7124                         val = tr32(MAC_SERDES_CFG);
7125                         val &= 0xfffff000;
7126                         val |= 0x880;
7127                         tw32(MAC_SERDES_CFG, val);
7128                 }
7129                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7130                         tw32(MAC_SERDES_CFG, 0x616000);
7131         }
7132
7133         /* Prevent chip from dropping frames when flow control
7134          * is enabled.
7135          */
7136         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7137
7138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7139             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7140                 /* Use hardware link auto-negotiation */
7141                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7142         }
7143
7144         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7145             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7146                 u32 tmp;
7147
7148                 tmp = tr32(SERDES_RX_CTRL);
7149                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7150                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7151                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7152                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7153         }
7154
7155         err = tg3_setup_phy(tp, 0);
7156         if (err)
7157                 return err;
7158
7159         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7160             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7161                 u32 tmp;
7162
7163                 /* Clear CRC stats. */
7164                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7165                         tg3_writephy(tp, MII_TG3_TEST1,
7166                                      tmp | MII_TG3_TEST1_CRC_EN);
7167                         tg3_readphy(tp, 0x14, &tmp);
7168                 }
7169         }
7170
7171         __tg3_set_rx_mode(tp->dev);
7172
7173         /* Initialize receive rules. */
7174         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7175         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7176         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7177         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7178
7179         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7180             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7181                 limit = 8;
7182         else
7183                 limit = 16;
7184         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7185                 limit -= 4;
7186         switch (limit) {
7187         case 16:
7188                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7189         case 15:
7190                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7191         case 14:
7192                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7193         case 13:
7194                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7195         case 12:
7196                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7197         case 11:
7198                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7199         case 10:
7200                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7201         case 9:
7202                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7203         case 8:
7204                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7205         case 7:
7206                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7207         case 6:
7208                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7209         case 5:
7210                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7211         case 4:
7212                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7213         case 3:
7214                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7215         case 2:
7216         case 1:
7217
7218         default:
7219                 break;
7220         };
7221
7222         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7223                 /* Write our heartbeat update interval to APE. */
7224                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7225                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7226
7227         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7228
7229         return 0;
7230 }
7231
7232 /* Called at device open time to get the chip ready for
7233  * packet processing.  Invoked with tp->lock held.
7234  */
7235 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7236 {
7237         int err;
7238
7239         /* Force the chip into D0. */
7240         err = tg3_set_power_state(tp, PCI_D0);
7241         if (err)
7242                 goto out;
7243
7244         tg3_switch_clocks(tp);
7245
7246         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7247
7248         err = tg3_reset_hw(tp, reset_phy);
7249
7250 out:
7251         return err;
7252 }
7253
7254 #define TG3_STAT_ADD32(PSTAT, REG) \
7255 do {    u32 __val = tr32(REG); \
7256         (PSTAT)->low += __val; \
7257         if ((PSTAT)->low < __val) \
7258                 (PSTAT)->high += 1; \
7259 } while (0)
7260
7261 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7262 {
7263         struct tg3_hw_stats *sp = tp->hw_stats;
7264
7265         if (!netif_carrier_ok(tp->dev))
7266                 return;
7267
7268         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7269         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7270         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7271         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7272         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7273         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7274         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7275         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7276         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7277         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7278         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7279         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7280         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7281
7282         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7283         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7284         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7285         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7286         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7287         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7288         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7289         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7290         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7291         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7292         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7293         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7294         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7295         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7296
7297         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7298         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7299         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7300 }
7301
7302 static void tg3_timer(unsigned long __opaque)
7303 {
7304         struct tg3 *tp = (struct tg3 *) __opaque;
7305
7306         if (tp->irq_sync)
7307                 goto restart_timer;
7308
7309         spin_lock(&tp->lock);
7310
7311         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7312                 /* All of this garbage is because when using non-tagged
7313                  * IRQ status the mailbox/status_block protocol the chip
7314                  * uses with the cpu is race prone.
7315                  */
7316                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7317                         tw32(GRC_LOCAL_CTRL,
7318                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7319                 } else {
7320                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7321                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7322                 }
7323
7324                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7325                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7326                         spin_unlock(&tp->lock);
7327                         schedule_work(&tp->reset_task);
7328                         return;
7329                 }
7330         }
7331
7332         /* This part only runs once per second. */
7333         if (!--tp->timer_counter) {
7334                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7335                         tg3_periodic_fetch_stats(tp);
7336
7337                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7338                         u32 mac_stat;
7339                         int phy_event;
7340
7341                         mac_stat = tr32(MAC_STATUS);
7342
7343                         phy_event = 0;
7344                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7345                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7346                                         phy_event = 1;
7347                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7348                                 phy_event = 1;
7349
7350                         if (phy_event)
7351                                 tg3_setup_phy(tp, 0);
7352                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7353                         u32 mac_stat = tr32(MAC_STATUS);
7354                         int need_setup = 0;
7355
7356                         if (netif_carrier_ok(tp->dev) &&
7357                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7358                                 need_setup = 1;
7359                         }
7360                         if (! netif_carrier_ok(tp->dev) &&
7361                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7362                                          MAC_STATUS_SIGNAL_DET))) {
7363                                 need_setup = 1;
7364                         }
7365                         if (need_setup) {
7366                                 if (!tp->serdes_counter) {
7367                                         tw32_f(MAC_MODE,
7368                                              (tp->mac_mode &
7369                                               ~MAC_MODE_PORT_MODE_MASK));
7370                                         udelay(40);
7371                                         tw32_f(MAC_MODE, tp->mac_mode);
7372                                         udelay(40);
7373                                 }
7374                                 tg3_setup_phy(tp, 0);
7375                         }
7376                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7377                         tg3_serdes_parallel_detect(tp);
7378
7379                 tp->timer_counter = tp->timer_multiplier;
7380         }
7381
7382         /* Heartbeat is only sent once every 2 seconds.
7383          *
7384          * The heartbeat is to tell the ASF firmware that the host
7385          * driver is still alive.  In the event that the OS crashes,
7386          * ASF needs to reset the hardware to free up the FIFO space
7387          * that may be filled with rx packets destined for the host.
7388          * If the FIFO is full, ASF will no longer function properly.
7389          *
7390          * Unintended resets have been reported on real time kernels
7391          * where the timer doesn't run on time.  Netpoll will also have
7392          * same problem.
7393          *
7394          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7395          * to check the ring condition when the heartbeat is expiring
7396          * before doing the reset.  This will prevent most unintended
7397          * resets.
7398          */
7399         if (!--tp->asf_counter) {
7400                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7401                         u32 val;
7402
7403                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7404                                       FWCMD_NICDRV_ALIVE3);
7405                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7406                         /* 5 seconds timeout */
7407                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7408                         val = tr32(GRC_RX_CPU_EVENT);
7409                         val |= (1 << 14);
7410                         tw32(GRC_RX_CPU_EVENT, val);
7411                 }
7412                 tp->asf_counter = tp->asf_multiplier;
7413         }
7414
7415         spin_unlock(&tp->lock);
7416
7417 restart_timer:
7418         tp->timer.expires = jiffies + tp->timer_offset;
7419         add_timer(&tp->timer);
7420 }
7421
7422 static int tg3_request_irq(struct tg3 *tp)
7423 {
7424         irq_handler_t fn;
7425         unsigned long flags;
7426         struct net_device *dev = tp->dev;
7427
7428         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7429                 fn = tg3_msi;
7430                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7431                         fn = tg3_msi_1shot;
7432                 flags = IRQF_SAMPLE_RANDOM;
7433         } else {
7434                 fn = tg3_interrupt;
7435                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7436                         fn = tg3_interrupt_tagged;
7437                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7438         }
7439         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7440 }
7441
7442 static int tg3_test_interrupt(struct tg3 *tp)
7443 {
7444         struct net_device *dev = tp->dev;
7445         int err, i, intr_ok = 0;
7446
7447         if (!netif_running(dev))
7448                 return -ENODEV;
7449
7450         tg3_disable_ints(tp);
7451
7452         free_irq(tp->pdev->irq, dev);
7453
7454         err = request_irq(tp->pdev->irq, tg3_test_isr,
7455                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7456         if (err)
7457                 return err;
7458
7459         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7460         tg3_enable_ints(tp);
7461
7462         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7463                HOSTCC_MODE_NOW);
7464
7465         for (i = 0; i < 5; i++) {
7466                 u32 int_mbox, misc_host_ctrl;
7467
7468                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7469                                         TG3_64BIT_REG_LOW);
7470                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7471
7472                 if ((int_mbox != 0) ||
7473                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7474                         intr_ok = 1;
7475                         break;
7476                 }
7477
7478                 msleep(10);
7479         }
7480
7481         tg3_disable_ints(tp);
7482
7483         free_irq(tp->pdev->irq, dev);
7484
7485         err = tg3_request_irq(tp);
7486
7487         if (err)
7488                 return err;
7489
7490         if (intr_ok)
7491                 return 0;
7492
7493         return -EIO;
7494 }
7495
7496 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7497  * successfully restored
7498  */
7499 static int tg3_test_msi(struct tg3 *tp)
7500 {
7501         struct net_device *dev = tp->dev;
7502         int err;
7503         u16 pci_cmd;
7504
7505         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7506                 return 0;
7507
7508         /* Turn off SERR reporting in case MSI terminates with Master
7509          * Abort.
7510          */
7511         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7512         pci_write_config_word(tp->pdev, PCI_COMMAND,
7513                               pci_cmd & ~PCI_COMMAND_SERR);
7514
7515         err = tg3_test_interrupt(tp);
7516
7517         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7518
7519         if (!err)
7520                 return 0;
7521
7522         /* other failures */
7523         if (err != -EIO)
7524                 return err;
7525
7526         /* MSI test failed, go back to INTx mode */
7527         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7528                "switching to INTx mode. Please report this failure to "
7529                "the PCI maintainer and include system chipset information.\n",
7530                        tp->dev->name);
7531
7532         free_irq(tp->pdev->irq, dev);
7533         pci_disable_msi(tp->pdev);
7534
7535         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7536
7537         err = tg3_request_irq(tp);
7538         if (err)
7539                 return err;
7540
7541         /* Need to reset the chip because the MSI cycle may have terminated
7542          * with Master Abort.
7543          */
7544         tg3_full_lock(tp, 1);
7545
7546         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7547         err = tg3_init_hw(tp, 1);
7548
7549         tg3_full_unlock(tp);
7550
7551         if (err)
7552                 free_irq(tp->pdev->irq, dev);
7553
7554         return err;
7555 }
7556
7557 static int tg3_open(struct net_device *dev)
7558 {
7559         struct tg3 *tp = netdev_priv(dev);
7560         int err;
7561
7562         netif_carrier_off(tp->dev);
7563
7564         tg3_full_lock(tp, 0);
7565
7566         err = tg3_set_power_state(tp, PCI_D0);
7567         if (err) {
7568                 tg3_full_unlock(tp);
7569                 return err;
7570         }
7571
7572         tg3_disable_ints(tp);
7573         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7574
7575         tg3_full_unlock(tp);
7576
7577         /* The placement of this call is tied
7578          * to the setup and use of Host TX descriptors.
7579          */
7580         err = tg3_alloc_consistent(tp);
7581         if (err)
7582                 return err;
7583
7584         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7585                 /* All MSI supporting chips should support tagged
7586                  * status.  Assert that this is the case.
7587                  */
7588                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7589                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7590                                "Not using MSI.\n", tp->dev->name);
7591                 } else if (pci_enable_msi(tp->pdev) == 0) {
7592                         u32 msi_mode;
7593
7594                         msi_mode = tr32(MSGINT_MODE);
7595                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7596                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7597                 }
7598         }
7599         err = tg3_request_irq(tp);
7600
7601         if (err) {
7602                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7603                         pci_disable_msi(tp->pdev);
7604                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7605                 }
7606                 tg3_free_consistent(tp);
7607                 return err;
7608         }
7609
7610         napi_enable(&tp->napi);
7611
7612         tg3_full_lock(tp, 0);
7613
7614         err = tg3_init_hw(tp, 1);
7615         if (err) {
7616                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7617                 tg3_free_rings(tp);
7618         } else {
7619                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7620                         tp->timer_offset = HZ;
7621                 else
7622                         tp->timer_offset = HZ / 10;
7623
7624                 BUG_ON(tp->timer_offset > HZ);
7625                 tp->timer_counter = tp->timer_multiplier =
7626                         (HZ / tp->timer_offset);
7627                 tp->asf_counter = tp->asf_multiplier =
7628                         ((HZ / tp->timer_offset) * 2);
7629
7630                 init_timer(&tp->timer);
7631                 tp->timer.expires = jiffies + tp->timer_offset;
7632                 tp->timer.data = (unsigned long) tp;
7633                 tp->timer.function = tg3_timer;
7634         }
7635
7636         tg3_full_unlock(tp);
7637
7638         if (err) {
7639                 napi_disable(&tp->napi);
7640                 free_irq(tp->pdev->irq, dev);
7641                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7642                         pci_disable_msi(tp->pdev);
7643                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7644                 }
7645                 tg3_free_consistent(tp);
7646                 return err;
7647         }
7648
7649         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7650                 err = tg3_test_msi(tp);
7651
7652                 if (err) {
7653                         tg3_full_lock(tp, 0);
7654
7655                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7656                                 pci_disable_msi(tp->pdev);
7657                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7658                         }
7659                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7660                         tg3_free_rings(tp);
7661                         tg3_free_consistent(tp);
7662
7663                         tg3_full_unlock(tp);
7664
7665                         napi_disable(&tp->napi);
7666
7667                         return err;
7668                 }
7669
7670                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7671                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7672                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7673
7674                                 tw32(PCIE_TRANSACTION_CFG,
7675                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7676                         }
7677                 }
7678         }
7679
7680         tg3_full_lock(tp, 0);
7681
7682         add_timer(&tp->timer);
7683         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7684         tg3_enable_ints(tp);
7685
7686         tg3_full_unlock(tp);
7687
7688         netif_start_queue(dev);
7689
7690         return 0;
7691 }
7692
7693 #if 0
7694 /*static*/ void tg3_dump_state(struct tg3 *tp)
7695 {
7696         u32 val32, val32_2, val32_3, val32_4, val32_5;
7697         u16 val16;
7698         int i;
7699
7700         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7701         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7702         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7703                val16, val32);
7704
7705         /* MAC block */
7706         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7707                tr32(MAC_MODE), tr32(MAC_STATUS));
7708         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7709                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7710         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7711                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7712         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7713                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7714
7715         /* Send data initiator control block */
7716         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7717                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7718         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7719                tr32(SNDDATAI_STATSCTRL));
7720
7721         /* Send data completion control block */
7722         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7723
7724         /* Send BD ring selector block */
7725         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7726                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7727
7728         /* Send BD initiator control block */
7729         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7730                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7731
7732         /* Send BD completion control block */
7733         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7734
7735         /* Receive list placement control block */
7736         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7737                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7738         printk("       RCVLPC_STATSCTRL[%08x]\n",
7739                tr32(RCVLPC_STATSCTRL));
7740
7741         /* Receive data and receive BD initiator control block */
7742         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7743                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7744
7745         /* Receive data completion control block */
7746         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7747                tr32(RCVDCC_MODE));
7748
7749         /* Receive BD initiator control block */
7750         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7751                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7752
7753         /* Receive BD completion control block */
7754         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7755                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7756
7757         /* Receive list selector control block */
7758         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7759                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7760
7761         /* Mbuf cluster free block */
7762         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7763                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7764
7765         /* Host coalescing control block */
7766         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7767                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7768         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7769                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7770                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7771         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7772                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7773                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7774         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7775                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7776         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7777                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7778
7779         /* Memory arbiter control block */
7780         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7781                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7782
7783         /* Buffer manager control block */
7784         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7785                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7786         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7787                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7788         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7789                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7790                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7791                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7792
7793         /* Read DMA control block */
7794         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7795                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7796
7797         /* Write DMA control block */
7798         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7799                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7800
7801         /* DMA completion block */
7802         printk("DEBUG: DMAC_MODE[%08x]\n",
7803                tr32(DMAC_MODE));
7804
7805         /* GRC block */
7806         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7807                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7808         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7809                tr32(GRC_LOCAL_CTRL));
7810
7811         /* TG3_BDINFOs */
7812         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7813                tr32(RCVDBDI_JUMBO_BD + 0x0),
7814                tr32(RCVDBDI_JUMBO_BD + 0x4),
7815                tr32(RCVDBDI_JUMBO_BD + 0x8),
7816                tr32(RCVDBDI_JUMBO_BD + 0xc));
7817         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7818                tr32(RCVDBDI_STD_BD + 0x0),
7819                tr32(RCVDBDI_STD_BD + 0x4),
7820                tr32(RCVDBDI_STD_BD + 0x8),
7821                tr32(RCVDBDI_STD_BD + 0xc));
7822         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7823                tr32(RCVDBDI_MINI_BD + 0x0),
7824                tr32(RCVDBDI_MINI_BD + 0x4),
7825                tr32(RCVDBDI_MINI_BD + 0x8),
7826                tr32(RCVDBDI_MINI_BD + 0xc));
7827
7828         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7829         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7830         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7831         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7832         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7833                val32, val32_2, val32_3, val32_4);
7834
7835         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7836         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7837         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7838         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7839         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7840                val32, val32_2, val32_3, val32_4);
7841
7842         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7843         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7844         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7845         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7846         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7847         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7848                val32, val32_2, val32_3, val32_4, val32_5);
7849
7850         /* SW status block */
7851         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7852                tp->hw_status->status,
7853                tp->hw_status->status_tag,
7854                tp->hw_status->rx_jumbo_consumer,
7855                tp->hw_status->rx_consumer,
7856                tp->hw_status->rx_mini_consumer,
7857                tp->hw_status->idx[0].rx_producer,
7858                tp->hw_status->idx[0].tx_consumer);
7859
7860         /* SW statistics block */
7861         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7862                ((u32 *)tp->hw_stats)[0],
7863                ((u32 *)tp->hw_stats)[1],
7864                ((u32 *)tp->hw_stats)[2],
7865                ((u32 *)tp->hw_stats)[3]);
7866
7867         /* Mailboxes */
7868         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7869                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7870                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7871                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7872                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7873
7874         /* NIC side send descriptors. */
7875         for (i = 0; i < 6; i++) {
7876                 unsigned long txd;
7877
7878                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7879                         + (i * sizeof(struct tg3_tx_buffer_desc));
7880                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7881                        i,
7882                        readl(txd + 0x0), readl(txd + 0x4),
7883                        readl(txd + 0x8), readl(txd + 0xc));
7884         }
7885
7886         /* NIC side RX descriptors. */
7887         for (i = 0; i < 6; i++) {
7888                 unsigned long rxd;
7889
7890                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7891                         + (i * sizeof(struct tg3_rx_buffer_desc));
7892                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7893                        i,
7894                        readl(rxd + 0x0), readl(rxd + 0x4),
7895                        readl(rxd + 0x8), readl(rxd + 0xc));
7896                 rxd += (4 * sizeof(u32));
7897                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7898                        i,
7899                        readl(rxd + 0x0), readl(rxd + 0x4),
7900                        readl(rxd + 0x8), readl(rxd + 0xc));
7901         }
7902
7903         for (i = 0; i < 6; i++) {
7904                 unsigned long rxd;
7905
7906                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7907                         + (i * sizeof(struct tg3_rx_buffer_desc));
7908                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7909                        i,
7910                        readl(rxd + 0x0), readl(rxd + 0x4),
7911                        readl(rxd + 0x8), readl(rxd + 0xc));
7912                 rxd += (4 * sizeof(u32));
7913                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7914                        i,
7915                        readl(rxd + 0x0), readl(rxd + 0x4),
7916                        readl(rxd + 0x8), readl(rxd + 0xc));
7917         }
7918 }
7919 #endif
7920
7921 static struct net_device_stats *tg3_get_stats(struct net_device *);
7922 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7923
7924 static int tg3_close(struct net_device *dev)
7925 {
7926         struct tg3 *tp = netdev_priv(dev);
7927
7928         napi_disable(&tp->napi);
7929         cancel_work_sync(&tp->reset_task);
7930
7931         netif_stop_queue(dev);
7932
7933         del_timer_sync(&tp->timer);
7934
7935         tg3_full_lock(tp, 1);
7936 #if 0
7937         tg3_dump_state(tp);
7938 #endif
7939
7940         tg3_disable_ints(tp);
7941
7942         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7943         tg3_free_rings(tp);
7944         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7945
7946         tg3_full_unlock(tp);
7947
7948         free_irq(tp->pdev->irq, dev);
7949         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7950                 pci_disable_msi(tp->pdev);
7951                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7952         }
7953
7954         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7955                sizeof(tp->net_stats_prev));
7956         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7957                sizeof(tp->estats_prev));
7958
7959         tg3_free_consistent(tp);
7960
7961         tg3_set_power_state(tp, PCI_D3hot);
7962
7963         netif_carrier_off(tp->dev);
7964
7965         return 0;
7966 }
7967
7968 static inline unsigned long get_stat64(tg3_stat64_t *val)
7969 {
7970         unsigned long ret;
7971
7972 #if (BITS_PER_LONG == 32)
7973         ret = val->low;
7974 #else
7975         ret = ((u64)val->high << 32) | ((u64)val->low);
7976 #endif
7977         return ret;
7978 }
7979
7980 static unsigned long calc_crc_errors(struct tg3 *tp)
7981 {
7982         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7983
7984         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7985             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7986              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7987                 u32 val;
7988
7989                 spin_lock_bh(&tp->lock);
7990                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7991                         tg3_writephy(tp, MII_TG3_TEST1,
7992                                      val | MII_TG3_TEST1_CRC_EN);
7993                         tg3_readphy(tp, 0x14, &val);
7994                 } else
7995                         val = 0;
7996                 spin_unlock_bh(&tp->lock);
7997
7998                 tp->phy_crc_errors += val;
7999
8000                 return tp->phy_crc_errors;
8001         }
8002
8003         return get_stat64(&hw_stats->rx_fcs_errors);
8004 }
8005
8006 #define ESTAT_ADD(member) \
8007         estats->member =        old_estats->member + \
8008                                 get_stat64(&hw_stats->member)
8009
8010 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8011 {
8012         struct tg3_ethtool_stats *estats = &tp->estats;
8013         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8014         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8015
8016         if (!hw_stats)
8017                 return old_estats;
8018
8019         ESTAT_ADD(rx_octets);
8020         ESTAT_ADD(rx_fragments);
8021         ESTAT_ADD(rx_ucast_packets);
8022         ESTAT_ADD(rx_mcast_packets);
8023         ESTAT_ADD(rx_bcast_packets);
8024         ESTAT_ADD(rx_fcs_errors);
8025         ESTAT_ADD(rx_align_errors);
8026         ESTAT_ADD(rx_xon_pause_rcvd);
8027         ESTAT_ADD(rx_xoff_pause_rcvd);
8028         ESTAT_ADD(rx_mac_ctrl_rcvd);
8029         ESTAT_ADD(rx_xoff_entered);
8030         ESTAT_ADD(rx_frame_too_long_errors);
8031         ESTAT_ADD(rx_jabbers);
8032         ESTAT_ADD(rx_undersize_packets);
8033         ESTAT_ADD(rx_in_length_errors);
8034         ESTAT_ADD(rx_out_length_errors);
8035         ESTAT_ADD(rx_64_or_less_octet_packets);
8036         ESTAT_ADD(rx_65_to_127_octet_packets);
8037         ESTAT_ADD(rx_128_to_255_octet_packets);
8038         ESTAT_ADD(rx_256_to_511_octet_packets);
8039         ESTAT_ADD(rx_512_to_1023_octet_packets);
8040         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8041         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8042         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8043         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8044         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8045
8046         ESTAT_ADD(tx_octets);
8047         ESTAT_ADD(tx_collisions);
8048         ESTAT_ADD(tx_xon_sent);
8049         ESTAT_ADD(tx_xoff_sent);
8050         ESTAT_ADD(tx_flow_control);
8051         ESTAT_ADD(tx_mac_errors);
8052         ESTAT_ADD(tx_single_collisions);
8053         ESTAT_ADD(tx_mult_collisions);
8054         ESTAT_ADD(tx_deferred);
8055         ESTAT_ADD(tx_excessive_collisions);
8056         ESTAT_ADD(tx_late_collisions);
8057         ESTAT_ADD(tx_collide_2times);
8058         ESTAT_ADD(tx_collide_3times);
8059         ESTAT_ADD(tx_collide_4times);
8060         ESTAT_ADD(tx_collide_5times);
8061         ESTAT_ADD(tx_collide_6times);
8062         ESTAT_ADD(tx_collide_7times);
8063         ESTAT_ADD(tx_collide_8times);
8064         ESTAT_ADD(tx_collide_9times);
8065         ESTAT_ADD(tx_collide_10times);
8066         ESTAT_ADD(tx_collide_11times);
8067         ESTAT_ADD(tx_collide_12times);
8068         ESTAT_ADD(tx_collide_13times);
8069         ESTAT_ADD(tx_collide_14times);
8070         ESTAT_ADD(tx_collide_15times);
8071         ESTAT_ADD(tx_ucast_packets);
8072         ESTAT_ADD(tx_mcast_packets);
8073         ESTAT_ADD(tx_bcast_packets);
8074         ESTAT_ADD(tx_carrier_sense_errors);
8075         ESTAT_ADD(tx_discards);
8076         ESTAT_ADD(tx_errors);
8077
8078         ESTAT_ADD(dma_writeq_full);
8079         ESTAT_ADD(dma_write_prioq_full);
8080         ESTAT_ADD(rxbds_empty);
8081         ESTAT_ADD(rx_discards);
8082         ESTAT_ADD(rx_errors);
8083         ESTAT_ADD(rx_threshold_hit);
8084
8085         ESTAT_ADD(dma_readq_full);
8086         ESTAT_ADD(dma_read_prioq_full);
8087         ESTAT_ADD(tx_comp_queue_full);
8088
8089         ESTAT_ADD(ring_set_send_prod_index);
8090         ESTAT_ADD(ring_status_update);
8091         ESTAT_ADD(nic_irqs);
8092         ESTAT_ADD(nic_avoided_irqs);
8093         ESTAT_ADD(nic_tx_threshold_hit);
8094
8095         return estats;
8096 }
8097
8098 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8099 {
8100         struct tg3 *tp = netdev_priv(dev);
8101         struct net_device_stats *stats = &tp->net_stats;
8102         struct net_device_stats *old_stats = &tp->net_stats_prev;
8103         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8104
8105         if (!hw_stats)
8106                 return old_stats;
8107
8108         stats->rx_packets = old_stats->rx_packets +
8109                 get_stat64(&hw_stats->rx_ucast_packets) +
8110                 get_stat64(&hw_stats->rx_mcast_packets) +
8111                 get_stat64(&hw_stats->rx_bcast_packets);
8112
8113         stats->tx_packets = old_stats->tx_packets +
8114                 get_stat64(&hw_stats->tx_ucast_packets) +
8115                 get_stat64(&hw_stats->tx_mcast_packets) +
8116                 get_stat64(&hw_stats->tx_bcast_packets);
8117
8118         stats->rx_bytes = old_stats->rx_bytes +
8119                 get_stat64(&hw_stats->rx_octets);
8120         stats->tx_bytes = old_stats->tx_bytes +
8121                 get_stat64(&hw_stats->tx_octets);
8122
8123         stats->rx_errors = old_stats->rx_errors +
8124                 get_stat64(&hw_stats->rx_errors);
8125         stats->tx_errors = old_stats->tx_errors +
8126                 get_stat64(&hw_stats->tx_errors) +
8127                 get_stat64(&hw_stats->tx_mac_errors) +
8128                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8129                 get_stat64(&hw_stats->tx_discards);
8130
8131         stats->multicast = old_stats->multicast +
8132                 get_stat64(&hw_stats->rx_mcast_packets);
8133         stats->collisions = old_stats->collisions +
8134                 get_stat64(&hw_stats->tx_collisions);
8135
8136         stats->rx_length_errors = old_stats->rx_length_errors +
8137                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8138                 get_stat64(&hw_stats->rx_undersize_packets);
8139
8140         stats->rx_over_errors = old_stats->rx_over_errors +
8141                 get_stat64(&hw_stats->rxbds_empty);
8142         stats->rx_frame_errors = old_stats->rx_frame_errors +
8143                 get_stat64(&hw_stats->rx_align_errors);
8144         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8145                 get_stat64(&hw_stats->tx_discards);
8146         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8147                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8148
8149         stats->rx_crc_errors = old_stats->rx_crc_errors +
8150                 calc_crc_errors(tp);
8151
8152         stats->rx_missed_errors = old_stats->rx_missed_errors +
8153                 get_stat64(&hw_stats->rx_discards);
8154
8155         return stats;
8156 }
8157
8158 static inline u32 calc_crc(unsigned char *buf, int len)
8159 {
8160         u32 reg;
8161         u32 tmp;
8162         int j, k;
8163
8164         reg = 0xffffffff;
8165
8166         for (j = 0; j < len; j++) {
8167                 reg ^= buf[j];
8168
8169                 for (k = 0; k < 8; k++) {
8170                         tmp = reg & 0x01;
8171
8172                         reg >>= 1;
8173
8174                         if (tmp) {
8175                                 reg ^= 0xedb88320;
8176                         }
8177                 }
8178         }
8179
8180         return ~reg;
8181 }
8182
8183 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8184 {
8185         /* accept or reject all multicast frames */
8186         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8187         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8188         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8189         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8190 }
8191
8192 static void __tg3_set_rx_mode(struct net_device *dev)
8193 {
8194         struct tg3 *tp = netdev_priv(dev);
8195         u32 rx_mode;
8196
8197         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8198                                   RX_MODE_KEEP_VLAN_TAG);
8199
8200         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8201          * flag clear.
8202          */
8203 #if TG3_VLAN_TAG_USED
8204         if (!tp->vlgrp &&
8205             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8206                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8207 #else
8208         /* By definition, VLAN is disabled always in this
8209          * case.
8210          */
8211         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8212                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8213 #endif
8214
8215         if (dev->flags & IFF_PROMISC) {
8216                 /* Promiscuous mode. */
8217                 rx_mode |= RX_MODE_PROMISC;
8218         } else if (dev->flags & IFF_ALLMULTI) {
8219                 /* Accept all multicast. */
8220                 tg3_set_multi (tp, 1);
8221         } else if (dev->mc_count < 1) {
8222                 /* Reject all multicast. */
8223                 tg3_set_multi (tp, 0);
8224         } else {
8225                 /* Accept one or more multicast(s). */
8226                 struct dev_mc_list *mclist;
8227                 unsigned int i;
8228                 u32 mc_filter[4] = { 0, };
8229                 u32 regidx;
8230                 u32 bit;
8231                 u32 crc;
8232
8233                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8234                      i++, mclist = mclist->next) {
8235
8236                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8237                         bit = ~crc & 0x7f;
8238                         regidx = (bit & 0x60) >> 5;
8239                         bit &= 0x1f;
8240                         mc_filter[regidx] |= (1 << bit);
8241                 }
8242
8243                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8244                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8245                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8246                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8247         }
8248
8249         if (rx_mode != tp->rx_mode) {
8250                 tp->rx_mode = rx_mode;
8251                 tw32_f(MAC_RX_MODE, rx_mode);
8252                 udelay(10);
8253         }
8254 }
8255
8256 static void tg3_set_rx_mode(struct net_device *dev)
8257 {
8258         struct tg3 *tp = netdev_priv(dev);
8259
8260         if (!netif_running(dev))
8261                 return;
8262
8263         tg3_full_lock(tp, 0);
8264         __tg3_set_rx_mode(dev);
8265         tg3_full_unlock(tp);
8266 }
8267
8268 #define TG3_REGDUMP_LEN         (32 * 1024)
8269
8270 static int tg3_get_regs_len(struct net_device *dev)
8271 {
8272         return TG3_REGDUMP_LEN;
8273 }
8274
8275 static void tg3_get_regs(struct net_device *dev,
8276                 struct ethtool_regs *regs, void *_p)
8277 {
8278         u32 *p = _p;
8279         struct tg3 *tp = netdev_priv(dev);
8280         u8 *orig_p = _p;
8281         int i;
8282
8283         regs->version = 0;
8284
8285         memset(p, 0, TG3_REGDUMP_LEN);
8286
8287         if (tp->link_config.phy_is_low_power)
8288                 return;
8289
8290         tg3_full_lock(tp, 0);
8291
8292 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8293 #define GET_REG32_LOOP(base,len)                \
8294 do {    p = (u32 *)(orig_p + (base));           \
8295         for (i = 0; i < len; i += 4)            \
8296                 __GET_REG32((base) + i);        \
8297 } while (0)
8298 #define GET_REG32_1(reg)                        \
8299 do {    p = (u32 *)(orig_p + (reg));            \
8300         __GET_REG32((reg));                     \
8301 } while (0)
8302
8303         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8304         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8305         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8306         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8307         GET_REG32_1(SNDDATAC_MODE);
8308         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8309         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8310         GET_REG32_1(SNDBDC_MODE);
8311         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8312         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8313         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8314         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8315         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8316         GET_REG32_1(RCVDCC_MODE);
8317         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8318         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8319         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8320         GET_REG32_1(MBFREE_MODE);
8321         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8322         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8323         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8324         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8325         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8326         GET_REG32_1(RX_CPU_MODE);
8327         GET_REG32_1(RX_CPU_STATE);
8328         GET_REG32_1(RX_CPU_PGMCTR);
8329         GET_REG32_1(RX_CPU_HWBKPT);
8330         GET_REG32_1(TX_CPU_MODE);
8331         GET_REG32_1(TX_CPU_STATE);
8332         GET_REG32_1(TX_CPU_PGMCTR);
8333         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8334         GET_REG32_LOOP(FTQ_RESET, 0x120);
8335         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8336         GET_REG32_1(DMAC_MODE);
8337         GET_REG32_LOOP(GRC_MODE, 0x4c);
8338         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8339                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8340
8341 #undef __GET_REG32
8342 #undef GET_REG32_LOOP
8343 #undef GET_REG32_1
8344
8345         tg3_full_unlock(tp);
8346 }
8347
8348 static int tg3_get_eeprom_len(struct net_device *dev)
8349 {
8350         struct tg3 *tp = netdev_priv(dev);
8351
8352         return tp->nvram_size;
8353 }
8354
8355 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8356 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8357 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8358
8359 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8360 {
8361         struct tg3 *tp = netdev_priv(dev);
8362         int ret;
8363         u8  *pd;
8364         u32 i, offset, len, b_offset, b_count;
8365         __le32 val;
8366
8367         if (tp->link_config.phy_is_low_power)
8368                 return -EAGAIN;
8369
8370         offset = eeprom->offset;
8371         len = eeprom->len;
8372         eeprom->len = 0;
8373
8374         eeprom->magic = TG3_EEPROM_MAGIC;
8375
8376         if (offset & 3) {
8377                 /* adjustments to start on required 4 byte boundary */
8378                 b_offset = offset & 3;
8379                 b_count = 4 - b_offset;
8380                 if (b_count > len) {
8381                         /* i.e. offset=1 len=2 */
8382                         b_count = len;
8383                 }
8384                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8385                 if (ret)
8386                         return ret;
8387                 memcpy(data, ((char*)&val) + b_offset, b_count);
8388                 len -= b_count;
8389                 offset += b_count;
8390                 eeprom->len += b_count;
8391         }
8392
8393         /* read bytes upto the last 4 byte boundary */
8394         pd = &data[eeprom->len];
8395         for (i = 0; i < (len - (len & 3)); i += 4) {
8396                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8397                 if (ret) {
8398                         eeprom->len += i;
8399                         return ret;
8400                 }
8401                 memcpy(pd + i, &val, 4);
8402         }
8403         eeprom->len += i;
8404
8405         if (len & 3) {
8406                 /* read last bytes not ending on 4 byte boundary */
8407                 pd = &data[eeprom->len];
8408                 b_count = len & 3;
8409                 b_offset = offset + len - b_count;
8410                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8411                 if (ret)
8412                         return ret;
8413                 memcpy(pd, &val, b_count);
8414                 eeprom->len += b_count;
8415         }
8416         return 0;
8417 }
8418
8419 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8420
8421 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8422 {
8423         struct tg3 *tp = netdev_priv(dev);
8424         int ret;
8425         u32 offset, len, b_offset, odd_len;
8426         u8 *buf;
8427         __le32 start, end;
8428
8429         if (tp->link_config.phy_is_low_power)
8430                 return -EAGAIN;
8431
8432         if (eeprom->magic != TG3_EEPROM_MAGIC)
8433                 return -EINVAL;
8434
8435         offset = eeprom->offset;
8436         len = eeprom->len;
8437
8438         if ((b_offset = (offset & 3))) {
8439                 /* adjustments to start on required 4 byte boundary */
8440                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8441                 if (ret)
8442                         return ret;
8443                 len += b_offset;
8444                 offset &= ~3;
8445                 if (len < 4)
8446                         len = 4;
8447         }
8448
8449         odd_len = 0;
8450         if (len & 3) {
8451                 /* adjustments to end on required 4 byte boundary */
8452                 odd_len = 1;
8453                 len = (len + 3) & ~3;
8454                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8455                 if (ret)
8456                         return ret;
8457         }
8458
8459         buf = data;
8460         if (b_offset || odd_len) {
8461                 buf = kmalloc(len, GFP_KERNEL);
8462                 if (!buf)
8463                         return -ENOMEM;
8464                 if (b_offset)
8465                         memcpy(buf, &start, 4);
8466                 if (odd_len)
8467                         memcpy(buf+len-4, &end, 4);
8468                 memcpy(buf + b_offset, data, eeprom->len);
8469         }
8470
8471         ret = tg3_nvram_write_block(tp, offset, len, buf);
8472
8473         if (buf != data)
8474                 kfree(buf);
8475
8476         return ret;
8477 }
8478
8479 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8480 {
8481         struct tg3 *tp = netdev_priv(dev);
8482
8483         cmd->supported = (SUPPORTED_Autoneg);
8484
8485         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8486                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8487                                    SUPPORTED_1000baseT_Full);
8488
8489         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8490                 cmd->supported |= (SUPPORTED_100baseT_Half |
8491                                   SUPPORTED_100baseT_Full |
8492                                   SUPPORTED_10baseT_Half |
8493                                   SUPPORTED_10baseT_Full |
8494                                   SUPPORTED_TP);
8495                 cmd->port = PORT_TP;
8496         } else {
8497                 cmd->supported |= SUPPORTED_FIBRE;
8498                 cmd->port = PORT_FIBRE;
8499         }
8500
8501         cmd->advertising = tp->link_config.advertising;
8502         if (netif_running(dev)) {
8503                 cmd->speed = tp->link_config.active_speed;
8504                 cmd->duplex = tp->link_config.active_duplex;
8505         }
8506         cmd->phy_address = PHY_ADDR;
8507         cmd->transceiver = 0;
8508         cmd->autoneg = tp->link_config.autoneg;
8509         cmd->maxtxpkt = 0;
8510         cmd->maxrxpkt = 0;
8511         return 0;
8512 }
8513
8514 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8515 {
8516         struct tg3 *tp = netdev_priv(dev);
8517
8518         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8519                 /* These are the only valid advertisement bits allowed.  */
8520                 if (cmd->autoneg == AUTONEG_ENABLE &&
8521                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8522                                           ADVERTISED_1000baseT_Full |
8523                                           ADVERTISED_Autoneg |
8524                                           ADVERTISED_FIBRE)))
8525                         return -EINVAL;
8526                 /* Fiber can only do SPEED_1000.  */
8527                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8528                          (cmd->speed != SPEED_1000))
8529                         return -EINVAL;
8530         /* Copper cannot force SPEED_1000.  */
8531         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8532                    (cmd->speed == SPEED_1000))
8533                 return -EINVAL;
8534         else if ((cmd->speed == SPEED_1000) &&
8535                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8536                 return -EINVAL;
8537
8538         tg3_full_lock(tp, 0);
8539
8540         tp->link_config.autoneg = cmd->autoneg;
8541         if (cmd->autoneg == AUTONEG_ENABLE) {
8542                 tp->link_config.advertising = (cmd->advertising |
8543                                               ADVERTISED_Autoneg);
8544                 tp->link_config.speed = SPEED_INVALID;
8545                 tp->link_config.duplex = DUPLEX_INVALID;
8546         } else {
8547                 tp->link_config.advertising = 0;
8548                 tp->link_config.speed = cmd->speed;
8549                 tp->link_config.duplex = cmd->duplex;
8550         }
8551
8552         tp->link_config.orig_speed = tp->link_config.speed;
8553         tp->link_config.orig_duplex = tp->link_config.duplex;
8554         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8555
8556         if (netif_running(dev))
8557                 tg3_setup_phy(tp, 1);
8558
8559         tg3_full_unlock(tp);
8560
8561         return 0;
8562 }
8563
8564 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8565 {
8566         struct tg3 *tp = netdev_priv(dev);
8567
8568         strcpy(info->driver, DRV_MODULE_NAME);
8569         strcpy(info->version, DRV_MODULE_VERSION);
8570         strcpy(info->fw_version, tp->fw_ver);
8571         strcpy(info->bus_info, pci_name(tp->pdev));
8572 }
8573
8574 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8575 {
8576         struct tg3 *tp = netdev_priv(dev);
8577
8578         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8579                 wol->supported = WAKE_MAGIC;
8580         else
8581                 wol->supported = 0;
8582         wol->wolopts = 0;
8583         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8584                 wol->wolopts = WAKE_MAGIC;
8585         memset(&wol->sopass, 0, sizeof(wol->sopass));
8586 }
8587
8588 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8589 {
8590         struct tg3 *tp = netdev_priv(dev);
8591
8592         if (wol->wolopts & ~WAKE_MAGIC)
8593                 return -EINVAL;
8594         if ((wol->wolopts & WAKE_MAGIC) &&
8595             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8596                 return -EINVAL;
8597
8598         spin_lock_bh(&tp->lock);
8599         if (wol->wolopts & WAKE_MAGIC)
8600                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8601         else
8602                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8603         spin_unlock_bh(&tp->lock);
8604
8605         return 0;
8606 }
8607
8608 static u32 tg3_get_msglevel(struct net_device *dev)
8609 {
8610         struct tg3 *tp = netdev_priv(dev);
8611         return tp->msg_enable;
8612 }
8613
8614 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8615 {
8616         struct tg3 *tp = netdev_priv(dev);
8617         tp->msg_enable = value;
8618 }
8619
8620 static int tg3_set_tso(struct net_device *dev, u32 value)
8621 {
8622         struct tg3 *tp = netdev_priv(dev);
8623
8624         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8625                 if (value)
8626                         return -EINVAL;
8627                 return 0;
8628         }
8629         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8630             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8631                 if (value) {
8632                         dev->features |= NETIF_F_TSO6;
8633                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8634                                 dev->features |= NETIF_F_TSO_ECN;
8635                 } else
8636                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8637         }
8638         return ethtool_op_set_tso(dev, value);
8639 }
8640
8641 static int tg3_nway_reset(struct net_device *dev)
8642 {
8643         struct tg3 *tp = netdev_priv(dev);
8644         u32 bmcr;
8645         int r;
8646
8647         if (!netif_running(dev))
8648                 return -EAGAIN;
8649
8650         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8651                 return -EINVAL;
8652
8653         spin_lock_bh(&tp->lock);
8654         r = -EINVAL;
8655         tg3_readphy(tp, MII_BMCR, &bmcr);
8656         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8657             ((bmcr & BMCR_ANENABLE) ||
8658              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8659                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8660                                            BMCR_ANENABLE);
8661                 r = 0;
8662         }
8663         spin_unlock_bh(&tp->lock);
8664
8665         return r;
8666 }
8667
8668 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8669 {
8670         struct tg3 *tp = netdev_priv(dev);
8671
8672         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8673         ering->rx_mini_max_pending = 0;
8674         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8675                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8676         else
8677                 ering->rx_jumbo_max_pending = 0;
8678
8679         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8680
8681         ering->rx_pending = tp->rx_pending;
8682         ering->rx_mini_pending = 0;
8683         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8684                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8685         else
8686                 ering->rx_jumbo_pending = 0;
8687
8688         ering->tx_pending = tp->tx_pending;
8689 }
8690
8691 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8692 {
8693         struct tg3 *tp = netdev_priv(dev);
8694         int irq_sync = 0, err = 0;
8695
8696         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8697             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8698             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8699             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8700             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8701              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8702                 return -EINVAL;
8703
8704         if (netif_running(dev)) {
8705                 tg3_netif_stop(tp);
8706                 irq_sync = 1;
8707         }
8708
8709         tg3_full_lock(tp, irq_sync);
8710
8711         tp->rx_pending = ering->rx_pending;
8712
8713         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8714             tp->rx_pending > 63)
8715                 tp->rx_pending = 63;
8716         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8717         tp->tx_pending = ering->tx_pending;
8718
8719         if (netif_running(dev)) {
8720                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8721                 err = tg3_restart_hw(tp, 1);
8722                 if (!err)
8723                         tg3_netif_start(tp);
8724         }
8725
8726         tg3_full_unlock(tp);
8727
8728         return err;
8729 }
8730
8731 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8732 {
8733         struct tg3 *tp = netdev_priv(dev);
8734
8735         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8736
8737         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8738                 epause->rx_pause = 1;
8739         else
8740                 epause->rx_pause = 0;
8741
8742         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8743                 epause->tx_pause = 1;
8744         else
8745                 epause->tx_pause = 0;
8746 }
8747
8748 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8749 {
8750         struct tg3 *tp = netdev_priv(dev);
8751         int irq_sync = 0, err = 0;
8752
8753         if (netif_running(dev)) {
8754                 tg3_netif_stop(tp);
8755                 irq_sync = 1;
8756         }
8757
8758         tg3_full_lock(tp, irq_sync);
8759
8760         if (epause->autoneg)
8761                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8762         else
8763                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8764         if (epause->rx_pause)
8765                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8766         else
8767                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8768         if (epause->tx_pause)
8769                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8770         else
8771                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8772
8773         if (netif_running(dev)) {
8774                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8775                 err = tg3_restart_hw(tp, 1);
8776                 if (!err)
8777                         tg3_netif_start(tp);
8778         }
8779
8780         tg3_full_unlock(tp);
8781
8782         return err;
8783 }
8784
8785 static u32 tg3_get_rx_csum(struct net_device *dev)
8786 {
8787         struct tg3 *tp = netdev_priv(dev);
8788         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8789 }
8790
8791 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8792 {
8793         struct tg3 *tp = netdev_priv(dev);
8794
8795         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8796                 if (data != 0)
8797                         return -EINVAL;
8798                 return 0;
8799         }
8800
8801         spin_lock_bh(&tp->lock);
8802         if (data)
8803                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8804         else
8805                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8806         spin_unlock_bh(&tp->lock);
8807
8808         return 0;
8809 }
8810
8811 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8812 {
8813         struct tg3 *tp = netdev_priv(dev);
8814
8815         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8816                 if (data != 0)
8817                         return -EINVAL;
8818                 return 0;
8819         }
8820
8821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8822             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8825                 ethtool_op_set_tx_ipv6_csum(dev, data);
8826         else
8827                 ethtool_op_set_tx_csum(dev, data);
8828
8829         return 0;
8830 }
8831
8832 static int tg3_get_sset_count (struct net_device *dev, int sset)
8833 {
8834         switch (sset) {
8835         case ETH_SS_TEST:
8836                 return TG3_NUM_TEST;
8837         case ETH_SS_STATS:
8838                 return TG3_NUM_STATS;
8839         default:
8840                 return -EOPNOTSUPP;
8841         }
8842 }
8843
8844 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8845 {
8846         switch (stringset) {
8847         case ETH_SS_STATS:
8848                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8849                 break;
8850         case ETH_SS_TEST:
8851                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8852                 break;
8853         default:
8854                 WARN_ON(1);     /* we need a WARN() */
8855                 break;
8856         }
8857 }
8858
8859 static int tg3_phys_id(struct net_device *dev, u32 data)
8860 {
8861         struct tg3 *tp = netdev_priv(dev);
8862         int i;
8863
8864         if (!netif_running(tp->dev))
8865                 return -EAGAIN;
8866
8867         if (data == 0)
8868                 data = UINT_MAX / 2;
8869
8870         for (i = 0; i < (data * 2); i++) {
8871                 if ((i % 2) == 0)
8872                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8873                                            LED_CTRL_1000MBPS_ON |
8874                                            LED_CTRL_100MBPS_ON |
8875                                            LED_CTRL_10MBPS_ON |
8876                                            LED_CTRL_TRAFFIC_OVERRIDE |
8877                                            LED_CTRL_TRAFFIC_BLINK |
8878                                            LED_CTRL_TRAFFIC_LED);
8879
8880                 else
8881                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8882                                            LED_CTRL_TRAFFIC_OVERRIDE);
8883
8884                 if (msleep_interruptible(500))
8885                         break;
8886         }
8887         tw32(MAC_LED_CTRL, tp->led_ctrl);
8888         return 0;
8889 }
8890
8891 static void tg3_get_ethtool_stats (struct net_device *dev,
8892                                    struct ethtool_stats *estats, u64 *tmp_stats)
8893 {
8894         struct tg3 *tp = netdev_priv(dev);
8895         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8896 }
8897
8898 #define NVRAM_TEST_SIZE 0x100
8899 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8900 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8901 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8902 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8903 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8904
8905 static int tg3_test_nvram(struct tg3 *tp)
8906 {
8907         u32 csum, magic;
8908         __le32 *buf;
8909         int i, j, k, err = 0, size;
8910
8911         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8912                 return -EIO;
8913
8914         if (magic == TG3_EEPROM_MAGIC)
8915                 size = NVRAM_TEST_SIZE;
8916         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8917                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8918                     TG3_EEPROM_SB_FORMAT_1) {
8919                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8920                         case TG3_EEPROM_SB_REVISION_0:
8921                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8922                                 break;
8923                         case TG3_EEPROM_SB_REVISION_2:
8924                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8925                                 break;
8926                         case TG3_EEPROM_SB_REVISION_3:
8927                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8928                                 break;
8929                         default:
8930                                 return 0;
8931                         }
8932                 } else
8933                         return 0;
8934         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8935                 size = NVRAM_SELFBOOT_HW_SIZE;
8936         else
8937                 return -EIO;
8938
8939         buf = kmalloc(size, GFP_KERNEL);
8940         if (buf == NULL)
8941                 return -ENOMEM;
8942
8943         err = -EIO;
8944         for (i = 0, j = 0; i < size; i += 4, j++) {
8945                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8946                         break;
8947         }
8948         if (i < size)
8949                 goto out;
8950
8951         /* Selfboot format */
8952         magic = swab32(le32_to_cpu(buf[0]));
8953         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8954             TG3_EEPROM_MAGIC_FW) {
8955                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8956
8957                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8958                     TG3_EEPROM_SB_REVISION_2) {
8959                         /* For rev 2, the csum doesn't include the MBA. */
8960                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8961                                 csum8 += buf8[i];
8962                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8963                                 csum8 += buf8[i];
8964                 } else {
8965                         for (i = 0; i < size; i++)
8966                                 csum8 += buf8[i];
8967                 }
8968
8969                 if (csum8 == 0) {
8970                         err = 0;
8971                         goto out;
8972                 }
8973
8974                 err = -EIO;
8975                 goto out;
8976         }
8977
8978         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8979             TG3_EEPROM_MAGIC_HW) {
8980                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8981                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8982                 u8 *buf8 = (u8 *) buf;
8983
8984                 /* Separate the parity bits and the data bytes.  */
8985                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8986                         if ((i == 0) || (i == 8)) {
8987                                 int l;
8988                                 u8 msk;
8989
8990                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8991                                         parity[k++] = buf8[i] & msk;
8992                                 i++;
8993                         }
8994                         else if (i == 16) {
8995                                 int l;
8996                                 u8 msk;
8997
8998                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8999                                         parity[k++] = buf8[i] & msk;
9000                                 i++;
9001
9002                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9003                                         parity[k++] = buf8[i] & msk;
9004                                 i++;
9005                         }
9006                         data[j++] = buf8[i];
9007                 }
9008
9009                 err = -EIO;
9010                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9011                         u8 hw8 = hweight8(data[i]);
9012
9013                         if ((hw8 & 0x1) && parity[i])
9014                                 goto out;
9015                         else if (!(hw8 & 0x1) && !parity[i])
9016                                 goto out;
9017                 }
9018                 err = 0;
9019                 goto out;
9020         }
9021
9022         /* Bootstrap checksum at offset 0x10 */
9023         csum = calc_crc((unsigned char *) buf, 0x10);
9024         if(csum != le32_to_cpu(buf[0x10/4]))
9025                 goto out;
9026
9027         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9028         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9029         if (csum != le32_to_cpu(buf[0xfc/4]))
9030                  goto out;
9031
9032         err = 0;
9033
9034 out:
9035         kfree(buf);
9036         return err;
9037 }
9038
9039 #define TG3_SERDES_TIMEOUT_SEC  2
9040 #define TG3_COPPER_TIMEOUT_SEC  6
9041
9042 static int tg3_test_link(struct tg3 *tp)
9043 {
9044         int i, max;
9045
9046         if (!netif_running(tp->dev))
9047                 return -ENODEV;
9048
9049         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9050                 max = TG3_SERDES_TIMEOUT_SEC;
9051         else
9052                 max = TG3_COPPER_TIMEOUT_SEC;
9053
9054         for (i = 0; i < max; i++) {
9055                 if (netif_carrier_ok(tp->dev))
9056                         return 0;
9057
9058                 if (msleep_interruptible(1000))
9059                         break;
9060         }
9061
9062         return -EIO;
9063 }
9064
9065 /* Only test the commonly used registers */
9066 static int tg3_test_registers(struct tg3 *tp)
9067 {
9068         int i, is_5705, is_5750;
9069         u32 offset, read_mask, write_mask, val, save_val, read_val;
9070         static struct {
9071                 u16 offset;
9072                 u16 flags;
9073 #define TG3_FL_5705     0x1
9074 #define TG3_FL_NOT_5705 0x2
9075 #define TG3_FL_NOT_5788 0x4
9076 #define TG3_FL_NOT_5750 0x8
9077                 u32 read_mask;
9078                 u32 write_mask;
9079         } reg_tbl[] = {
9080                 /* MAC Control Registers */
9081                 { MAC_MODE, TG3_FL_NOT_5705,
9082                         0x00000000, 0x00ef6f8c },
9083                 { MAC_MODE, TG3_FL_5705,
9084                         0x00000000, 0x01ef6b8c },
9085                 { MAC_STATUS, TG3_FL_NOT_5705,
9086                         0x03800107, 0x00000000 },
9087                 { MAC_STATUS, TG3_FL_5705,
9088                         0x03800100, 0x00000000 },
9089                 { MAC_ADDR_0_HIGH, 0x0000,
9090                         0x00000000, 0x0000ffff },
9091                 { MAC_ADDR_0_LOW, 0x0000,
9092                         0x00000000, 0xffffffff },
9093                 { MAC_RX_MTU_SIZE, 0x0000,
9094                         0x00000000, 0x0000ffff },
9095                 { MAC_TX_MODE, 0x0000,
9096                         0x00000000, 0x00000070 },
9097                 { MAC_TX_LENGTHS, 0x0000,
9098                         0x00000000, 0x00003fff },
9099                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9100                         0x00000000, 0x000007fc },
9101                 { MAC_RX_MODE, TG3_FL_5705,
9102                         0x00000000, 0x000007dc },
9103                 { MAC_HASH_REG_0, 0x0000,
9104                         0x00000000, 0xffffffff },
9105                 { MAC_HASH_REG_1, 0x0000,
9106                         0x00000000, 0xffffffff },
9107                 { MAC_HASH_REG_2, 0x0000,
9108                         0x00000000, 0xffffffff },
9109                 { MAC_HASH_REG_3, 0x0000,
9110                         0x00000000, 0xffffffff },
9111
9112                 /* Receive Data and Receive BD Initiator Control Registers. */
9113                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9114                         0x00000000, 0xffffffff },
9115                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9116                         0x00000000, 0xffffffff },
9117                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9118                         0x00000000, 0x00000003 },
9119                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9120                         0x00000000, 0xffffffff },
9121                 { RCVDBDI_STD_BD+0, 0x0000,
9122                         0x00000000, 0xffffffff },
9123                 { RCVDBDI_STD_BD+4, 0x0000,
9124                         0x00000000, 0xffffffff },
9125                 { RCVDBDI_STD_BD+8, 0x0000,
9126                         0x00000000, 0xffff0002 },
9127                 { RCVDBDI_STD_BD+0xc, 0x0000,
9128                         0x00000000, 0xffffffff },
9129
9130                 /* Receive BD Initiator Control Registers. */
9131                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9132                         0x00000000, 0xffffffff },
9133                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9134                         0x00000000, 0x000003ff },
9135                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9136                         0x00000000, 0xffffffff },
9137
9138                 /* Host Coalescing Control Registers. */
9139                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9140                         0x00000000, 0x00000004 },
9141                 { HOSTCC_MODE, TG3_FL_5705,
9142                         0x00000000, 0x000000f6 },
9143                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9144                         0x00000000, 0xffffffff },
9145                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9146                         0x00000000, 0x000003ff },
9147                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9148                         0x00000000, 0xffffffff },
9149                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9150                         0x00000000, 0x000003ff },
9151                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9152                         0x00000000, 0xffffffff },
9153                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9154                         0x00000000, 0x000000ff },
9155                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9156                         0x00000000, 0xffffffff },
9157                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9158                         0x00000000, 0x000000ff },
9159                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9160                         0x00000000, 0xffffffff },
9161                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9162                         0x00000000, 0xffffffff },
9163                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9164                         0x00000000, 0xffffffff },
9165                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9166                         0x00000000, 0x000000ff },
9167                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9168                         0x00000000, 0xffffffff },
9169                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9170                         0x00000000, 0x000000ff },
9171                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9172                         0x00000000, 0xffffffff },
9173                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9174                         0x00000000, 0xffffffff },
9175                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9176                         0x00000000, 0xffffffff },
9177                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9178                         0x00000000, 0xffffffff },
9179                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9180                         0x00000000, 0xffffffff },
9181                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9182                         0xffffffff, 0x00000000 },
9183                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9184                         0xffffffff, 0x00000000 },
9185
9186                 /* Buffer Manager Control Registers. */
9187                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9188                         0x00000000, 0x007fff80 },
9189                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9190                         0x00000000, 0x007fffff },
9191                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9192                         0x00000000, 0x0000003f },
9193                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9194                         0x00000000, 0x000001ff },
9195                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9196                         0x00000000, 0x000001ff },
9197                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9198                         0xffffffff, 0x00000000 },
9199                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9200                         0xffffffff, 0x00000000 },
9201
9202                 /* Mailbox Registers */
9203                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9204                         0x00000000, 0x000001ff },
9205                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9206                         0x00000000, 0x000001ff },
9207                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9208                         0x00000000, 0x000007ff },
9209                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9210                         0x00000000, 0x000001ff },
9211
9212                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9213         };
9214
9215         is_5705 = is_5750 = 0;
9216         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9217                 is_5705 = 1;
9218                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9219                         is_5750 = 1;
9220         }
9221
9222         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9223                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9224                         continue;
9225
9226                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9227                         continue;
9228
9229                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9230                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9231                         continue;
9232
9233                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9234                         continue;
9235
9236                 offset = (u32) reg_tbl[i].offset;
9237                 read_mask = reg_tbl[i].read_mask;
9238                 write_mask = reg_tbl[i].write_mask;
9239
9240                 /* Save the original register content */
9241                 save_val = tr32(offset);
9242
9243                 /* Determine the read-only value. */
9244                 read_val = save_val & read_mask;
9245
9246                 /* Write zero to the register, then make sure the read-only bits
9247                  * are not changed and the read/write bits are all zeros.
9248                  */
9249                 tw32(offset, 0);
9250
9251                 val = tr32(offset);
9252
9253                 /* Test the read-only and read/write bits. */
9254                 if (((val & read_mask) != read_val) || (val & write_mask))
9255                         goto out;
9256
9257                 /* Write ones to all the bits defined by RdMask and WrMask, then
9258                  * make sure the read-only bits are not changed and the
9259                  * read/write bits are all ones.
9260                  */
9261                 tw32(offset, read_mask | write_mask);
9262
9263                 val = tr32(offset);
9264
9265                 /* Test the read-only bits. */
9266                 if ((val & read_mask) != read_val)
9267                         goto out;
9268
9269                 /* Test the read/write bits. */
9270                 if ((val & write_mask) != write_mask)
9271                         goto out;
9272
9273                 tw32(offset, save_val);
9274         }
9275
9276         return 0;
9277
9278 out:
9279         if (netif_msg_hw(tp))
9280                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9281                        offset);
9282         tw32(offset, save_val);
9283         return -EIO;
9284 }
9285
9286 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9287 {
9288         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9289         int i;
9290         u32 j;
9291
9292         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9293                 for (j = 0; j < len; j += 4) {
9294                         u32 val;
9295
9296                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9297                         tg3_read_mem(tp, offset + j, &val);
9298                         if (val != test_pattern[i])
9299                                 return -EIO;
9300                 }
9301         }
9302         return 0;
9303 }
9304
9305 static int tg3_test_memory(struct tg3 *tp)
9306 {
9307         static struct mem_entry {
9308                 u32 offset;
9309                 u32 len;
9310         } mem_tbl_570x[] = {
9311                 { 0x00000000, 0x00b50},
9312                 { 0x00002000, 0x1c000},
9313                 { 0xffffffff, 0x00000}
9314         }, mem_tbl_5705[] = {
9315                 { 0x00000100, 0x0000c},
9316                 { 0x00000200, 0x00008},
9317                 { 0x00004000, 0x00800},
9318                 { 0x00006000, 0x01000},
9319                 { 0x00008000, 0x02000},
9320                 { 0x00010000, 0x0e000},
9321                 { 0xffffffff, 0x00000}
9322         }, mem_tbl_5755[] = {
9323                 { 0x00000200, 0x00008},
9324                 { 0x00004000, 0x00800},
9325                 { 0x00006000, 0x00800},
9326                 { 0x00008000, 0x02000},
9327                 { 0x00010000, 0x0c000},
9328                 { 0xffffffff, 0x00000}
9329         }, mem_tbl_5906[] = {
9330                 { 0x00000200, 0x00008},
9331                 { 0x00004000, 0x00400},
9332                 { 0x00006000, 0x00400},
9333                 { 0x00008000, 0x01000},
9334                 { 0x00010000, 0x01000},
9335                 { 0xffffffff, 0x00000}
9336         };
9337         struct mem_entry *mem_tbl;
9338         int err = 0;
9339         int i;
9340
9341         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9343                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9344                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9345                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9346                         mem_tbl = mem_tbl_5755;
9347                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9348                         mem_tbl = mem_tbl_5906;
9349                 else
9350                         mem_tbl = mem_tbl_5705;
9351         } else
9352                 mem_tbl = mem_tbl_570x;
9353
9354         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9355                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9356                     mem_tbl[i].len)) != 0)
9357                         break;
9358         }
9359
9360         return err;
9361 }
9362
9363 #define TG3_MAC_LOOPBACK        0
9364 #define TG3_PHY_LOOPBACK        1
9365
9366 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9367 {
9368         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9369         u32 desc_idx;
9370         struct sk_buff *skb, *rx_skb;
9371         u8 *tx_data;
9372         dma_addr_t map;
9373         int num_pkts, tx_len, rx_len, i, err;
9374         struct tg3_rx_buffer_desc *desc;
9375
9376         if (loopback_mode == TG3_MAC_LOOPBACK) {
9377                 /* HW errata - mac loopback fails in some cases on 5780.
9378                  * Normal traffic and PHY loopback are not affected by
9379                  * errata.
9380                  */
9381                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9382                         return 0;
9383
9384                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9385                            MAC_MODE_PORT_INT_LPBACK;
9386                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9387                         mac_mode |= MAC_MODE_LINK_POLARITY;
9388                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9389                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9390                 else
9391                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9392                 tw32(MAC_MODE, mac_mode);
9393         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9394                 u32 val;
9395
9396                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9397                         u32 phytest;
9398
9399                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9400                                 u32 phy;
9401
9402                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9403                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9404                                 if (!tg3_readphy(tp, 0x1b, &phy))
9405                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9406                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9407                         }
9408                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9409                 } else
9410                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9411
9412                 tg3_phy_toggle_automdix(tp, 0);
9413
9414                 tg3_writephy(tp, MII_BMCR, val);
9415                 udelay(40);
9416
9417                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9418                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9419                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9420                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9421                 } else
9422                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9423
9424                 /* reset to prevent losing 1st rx packet intermittently */
9425                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9426                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9427                         udelay(10);
9428                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9429                 }
9430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9431                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9432                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9433                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9434                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9435                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9436                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9437                 }
9438                 tw32(MAC_MODE, mac_mode);
9439         }
9440         else
9441                 return -EINVAL;
9442
9443         err = -EIO;
9444
9445         tx_len = 1514;
9446         skb = netdev_alloc_skb(tp->dev, tx_len);
9447         if (!skb)
9448                 return -ENOMEM;
9449
9450         tx_data = skb_put(skb, tx_len);
9451         memcpy(tx_data, tp->dev->dev_addr, 6);
9452         memset(tx_data + 6, 0x0, 8);
9453
9454         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9455
9456         for (i = 14; i < tx_len; i++)
9457                 tx_data[i] = (u8) (i & 0xff);
9458
9459         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9460
9461         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9462              HOSTCC_MODE_NOW);
9463
9464         udelay(10);
9465
9466         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9467
9468         num_pkts = 0;
9469
9470         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9471
9472         tp->tx_prod++;
9473         num_pkts++;
9474
9475         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9476                      tp->tx_prod);
9477         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9478
9479         udelay(10);
9480
9481         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9482         for (i = 0; i < 25; i++) {
9483                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9484                        HOSTCC_MODE_NOW);
9485
9486                 udelay(10);
9487
9488                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9489                 rx_idx = tp->hw_status->idx[0].rx_producer;
9490                 if ((tx_idx == tp->tx_prod) &&
9491                     (rx_idx == (rx_start_idx + num_pkts)))
9492                         break;
9493         }
9494
9495         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9496         dev_kfree_skb(skb);
9497
9498         if (tx_idx != tp->tx_prod)
9499                 goto out;
9500
9501         if (rx_idx != rx_start_idx + num_pkts)
9502                 goto out;
9503
9504         desc = &tp->rx_rcb[rx_start_idx];
9505         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9506         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9507         if (opaque_key != RXD_OPAQUE_RING_STD)
9508                 goto out;
9509
9510         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9511             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9512                 goto out;
9513
9514         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9515         if (rx_len != tx_len)
9516                 goto out;
9517
9518         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9519
9520         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9521         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9522
9523         for (i = 14; i < tx_len; i++) {
9524                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9525                         goto out;
9526         }
9527         err = 0;
9528
9529         /* tg3_free_rings will unmap and free the rx_skb */
9530 out:
9531         return err;
9532 }
9533
9534 #define TG3_MAC_LOOPBACK_FAILED         1
9535 #define TG3_PHY_LOOPBACK_FAILED         2
9536 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9537                                          TG3_PHY_LOOPBACK_FAILED)
9538
9539 static int tg3_test_loopback(struct tg3 *tp)
9540 {
9541         int err = 0;
9542         u32 cpmuctrl = 0;
9543
9544         if (!netif_running(tp->dev))
9545                 return TG3_LOOPBACK_FAILED;
9546
9547         err = tg3_reset_hw(tp, 1);
9548         if (err)
9549                 return TG3_LOOPBACK_FAILED;
9550
9551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9553                 int i;
9554                 u32 status;
9555
9556                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9557
9558                 /* Wait for up to 40 microseconds to acquire lock. */
9559                 for (i = 0; i < 4; i++) {
9560                         status = tr32(TG3_CPMU_MUTEX_GNT);
9561                         if (status == CPMU_MUTEX_GNT_DRIVER)
9562                                 break;
9563                         udelay(10);
9564                 }
9565
9566                 if (status != CPMU_MUTEX_GNT_DRIVER)
9567                         return TG3_LOOPBACK_FAILED;
9568
9569                 /* Turn off link-based power management. */
9570                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9572                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX)
9573                         tw32(TG3_CPMU_CTRL,
9574                              cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9575                                           CPMU_CTRL_LINK_AWARE_MODE));
9576                 else
9577                         tw32(TG3_CPMU_CTRL,
9578                              cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE);
9579         }
9580
9581         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9582                 err |= TG3_MAC_LOOPBACK_FAILED;
9583
9584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9585             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9586                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9587
9588                 /* Release the mutex */
9589                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9590         }
9591
9592         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9593                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9594                         err |= TG3_PHY_LOOPBACK_FAILED;
9595         }
9596
9597         return err;
9598 }
9599
9600 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9601                           u64 *data)
9602 {
9603         struct tg3 *tp = netdev_priv(dev);
9604
9605         if (tp->link_config.phy_is_low_power)
9606                 tg3_set_power_state(tp, PCI_D0);
9607
9608         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9609
9610         if (tg3_test_nvram(tp) != 0) {
9611                 etest->flags |= ETH_TEST_FL_FAILED;
9612                 data[0] = 1;
9613         }
9614         if (tg3_test_link(tp) != 0) {
9615                 etest->flags |= ETH_TEST_FL_FAILED;
9616                 data[1] = 1;
9617         }
9618         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9619                 int err, irq_sync = 0;
9620
9621                 if (netif_running(dev)) {
9622                         tg3_netif_stop(tp);
9623                         irq_sync = 1;
9624                 }
9625
9626                 tg3_full_lock(tp, irq_sync);
9627
9628                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9629                 err = tg3_nvram_lock(tp);
9630                 tg3_halt_cpu(tp, RX_CPU_BASE);
9631                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9632                         tg3_halt_cpu(tp, TX_CPU_BASE);
9633                 if (!err)
9634                         tg3_nvram_unlock(tp);
9635
9636                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9637                         tg3_phy_reset(tp);
9638
9639                 if (tg3_test_registers(tp) != 0) {
9640                         etest->flags |= ETH_TEST_FL_FAILED;
9641                         data[2] = 1;
9642                 }
9643                 if (tg3_test_memory(tp) != 0) {
9644                         etest->flags |= ETH_TEST_FL_FAILED;
9645                         data[3] = 1;
9646                 }
9647                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9648                         etest->flags |= ETH_TEST_FL_FAILED;
9649
9650                 tg3_full_unlock(tp);
9651
9652                 if (tg3_test_interrupt(tp) != 0) {
9653                         etest->flags |= ETH_TEST_FL_FAILED;
9654                         data[5] = 1;
9655                 }
9656
9657                 tg3_full_lock(tp, 0);
9658
9659                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9660                 if (netif_running(dev)) {
9661                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9662                         if (!tg3_restart_hw(tp, 1))
9663                                 tg3_netif_start(tp);
9664                 }
9665
9666                 tg3_full_unlock(tp);
9667         }
9668         if (tp->link_config.phy_is_low_power)
9669                 tg3_set_power_state(tp, PCI_D3hot);
9670
9671 }
9672
9673 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9674 {
9675         struct mii_ioctl_data *data = if_mii(ifr);
9676         struct tg3 *tp = netdev_priv(dev);
9677         int err;
9678
9679         switch(cmd) {
9680         case SIOCGMIIPHY:
9681                 data->phy_id = PHY_ADDR;
9682
9683                 /* fallthru */
9684         case SIOCGMIIREG: {
9685                 u32 mii_regval;
9686
9687                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9688                         break;                  /* We have no PHY */
9689
9690                 if (tp->link_config.phy_is_low_power)
9691                         return -EAGAIN;
9692
9693                 spin_lock_bh(&tp->lock);
9694                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9695                 spin_unlock_bh(&tp->lock);
9696
9697                 data->val_out = mii_regval;
9698
9699                 return err;
9700         }
9701
9702         case SIOCSMIIREG:
9703                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9704                         break;                  /* We have no PHY */
9705
9706                 if (!capable(CAP_NET_ADMIN))
9707                         return -EPERM;
9708
9709                 if (tp->link_config.phy_is_low_power)
9710                         return -EAGAIN;
9711
9712                 spin_lock_bh(&tp->lock);
9713                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9714                 spin_unlock_bh(&tp->lock);
9715
9716                 return err;
9717
9718         default:
9719                 /* do nothing */
9720                 break;
9721         }
9722         return -EOPNOTSUPP;
9723 }
9724
9725 #if TG3_VLAN_TAG_USED
9726 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9727 {
9728         struct tg3 *tp = netdev_priv(dev);
9729
9730         if (netif_running(dev))
9731                 tg3_netif_stop(tp);
9732
9733         tg3_full_lock(tp, 0);
9734
9735         tp->vlgrp = grp;
9736
9737         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9738         __tg3_set_rx_mode(dev);
9739
9740         if (netif_running(dev))
9741                 tg3_netif_start(tp);
9742
9743         tg3_full_unlock(tp);
9744 }
9745 #endif
9746
9747 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9748 {
9749         struct tg3 *tp = netdev_priv(dev);
9750
9751         memcpy(ec, &tp->coal, sizeof(*ec));
9752         return 0;
9753 }
9754
9755 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9756 {
9757         struct tg3 *tp = netdev_priv(dev);
9758         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9759         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9760
9761         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9762                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9763                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9764                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9765                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9766         }
9767
9768         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9769             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9770             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9771             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9772             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9773             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9774             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9775             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9776             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9777             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9778                 return -EINVAL;
9779
9780         /* No rx interrupts will be generated if both are zero */
9781         if ((ec->rx_coalesce_usecs == 0) &&
9782             (ec->rx_max_coalesced_frames == 0))
9783                 return -EINVAL;
9784
9785         /* No tx interrupts will be generated if both are zero */
9786         if ((ec->tx_coalesce_usecs == 0) &&
9787             (ec->tx_max_coalesced_frames == 0))
9788                 return -EINVAL;
9789
9790         /* Only copy relevant parameters, ignore all others. */
9791         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9792         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9793         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9794         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9795         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9796         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9797         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9798         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9799         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9800
9801         if (netif_running(dev)) {
9802                 tg3_full_lock(tp, 0);
9803                 __tg3_set_coalesce(tp, &tp->coal);
9804                 tg3_full_unlock(tp);
9805         }
9806         return 0;
9807 }
9808
9809 static const struct ethtool_ops tg3_ethtool_ops = {
9810         .get_settings           = tg3_get_settings,
9811         .set_settings           = tg3_set_settings,
9812         .get_drvinfo            = tg3_get_drvinfo,
9813         .get_regs_len           = tg3_get_regs_len,
9814         .get_regs               = tg3_get_regs,
9815         .get_wol                = tg3_get_wol,
9816         .set_wol                = tg3_set_wol,
9817         .get_msglevel           = tg3_get_msglevel,
9818         .set_msglevel           = tg3_set_msglevel,
9819         .nway_reset             = tg3_nway_reset,
9820         .get_link               = ethtool_op_get_link,
9821         .get_eeprom_len         = tg3_get_eeprom_len,
9822         .get_eeprom             = tg3_get_eeprom,
9823         .set_eeprom             = tg3_set_eeprom,
9824         .get_ringparam          = tg3_get_ringparam,
9825         .set_ringparam          = tg3_set_ringparam,
9826         .get_pauseparam         = tg3_get_pauseparam,
9827         .set_pauseparam         = tg3_set_pauseparam,
9828         .get_rx_csum            = tg3_get_rx_csum,
9829         .set_rx_csum            = tg3_set_rx_csum,
9830         .set_tx_csum            = tg3_set_tx_csum,
9831         .set_sg                 = ethtool_op_set_sg,
9832         .set_tso                = tg3_set_tso,
9833         .self_test              = tg3_self_test,
9834         .get_strings            = tg3_get_strings,
9835         .phys_id                = tg3_phys_id,
9836         .get_ethtool_stats      = tg3_get_ethtool_stats,
9837         .get_coalesce           = tg3_get_coalesce,
9838         .set_coalesce           = tg3_set_coalesce,
9839         .get_sset_count         = tg3_get_sset_count,
9840 };
9841
9842 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9843 {
9844         u32 cursize, val, magic;
9845
9846         tp->nvram_size = EEPROM_CHIP_SIZE;
9847
9848         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9849                 return;
9850
9851         if ((magic != TG3_EEPROM_MAGIC) &&
9852             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9853             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9854                 return;
9855
9856         /*
9857          * Size the chip by reading offsets at increasing powers of two.
9858          * When we encounter our validation signature, we know the addressing
9859          * has wrapped around, and thus have our chip size.
9860          */
9861         cursize = 0x10;
9862
9863         while (cursize < tp->nvram_size) {
9864                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9865                         return;
9866
9867                 if (val == magic)
9868                         break;
9869
9870                 cursize <<= 1;
9871         }
9872
9873         tp->nvram_size = cursize;
9874 }
9875
9876 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9877 {
9878         u32 val;
9879
9880         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9881                 return;
9882
9883         /* Selfboot format */
9884         if (val != TG3_EEPROM_MAGIC) {
9885                 tg3_get_eeprom_size(tp);
9886                 return;
9887         }
9888
9889         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9890                 if (val != 0) {
9891                         tp->nvram_size = (val >> 16) * 1024;
9892                         return;
9893                 }
9894         }
9895         tp->nvram_size = 0x80000;
9896 }
9897
9898 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9899 {
9900         u32 nvcfg1;
9901
9902         nvcfg1 = tr32(NVRAM_CFG1);
9903         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9904                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9905         }
9906         else {
9907                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9908                 tw32(NVRAM_CFG1, nvcfg1);
9909         }
9910
9911         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9912             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9913                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9914                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9915                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9916                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9917                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9918                                 break;
9919                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9920                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9921                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9922                                 break;
9923                         case FLASH_VENDOR_ATMEL_EEPROM:
9924                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9925                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9926                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9927                                 break;
9928                         case FLASH_VENDOR_ST:
9929                                 tp->nvram_jedecnum = JEDEC_ST;
9930                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9931                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9932                                 break;
9933                         case FLASH_VENDOR_SAIFUN:
9934                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9935                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9936                                 break;
9937                         case FLASH_VENDOR_SST_SMALL:
9938                         case FLASH_VENDOR_SST_LARGE:
9939                                 tp->nvram_jedecnum = JEDEC_SST;
9940                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9941                                 break;
9942                 }
9943         }
9944         else {
9945                 tp->nvram_jedecnum = JEDEC_ATMEL;
9946                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9947                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9948         }
9949 }
9950
9951 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9952 {
9953         u32 nvcfg1;
9954
9955         nvcfg1 = tr32(NVRAM_CFG1);
9956
9957         /* NVRAM protection for TPM */
9958         if (nvcfg1 & (1 << 27))
9959                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9960
9961         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9962                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9963                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9964                         tp->nvram_jedecnum = JEDEC_ATMEL;
9965                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9966                         break;
9967                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9968                         tp->nvram_jedecnum = JEDEC_ATMEL;
9969                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9970                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9971                         break;
9972                 case FLASH_5752VENDOR_ST_M45PE10:
9973                 case FLASH_5752VENDOR_ST_M45PE20:
9974                 case FLASH_5752VENDOR_ST_M45PE40:
9975                         tp->nvram_jedecnum = JEDEC_ST;
9976                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9977                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9978                         break;
9979         }
9980
9981         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9982                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9983                         case FLASH_5752PAGE_SIZE_256:
9984                                 tp->nvram_pagesize = 256;
9985                                 break;
9986                         case FLASH_5752PAGE_SIZE_512:
9987                                 tp->nvram_pagesize = 512;
9988                                 break;
9989                         case FLASH_5752PAGE_SIZE_1K:
9990                                 tp->nvram_pagesize = 1024;
9991                                 break;
9992                         case FLASH_5752PAGE_SIZE_2K:
9993                                 tp->nvram_pagesize = 2048;
9994                                 break;
9995                         case FLASH_5752PAGE_SIZE_4K:
9996                                 tp->nvram_pagesize = 4096;
9997                                 break;
9998                         case FLASH_5752PAGE_SIZE_264:
9999                                 tp->nvram_pagesize = 264;
10000                                 break;
10001                 }
10002         }
10003         else {
10004                 /* For eeprom, set pagesize to maximum eeprom size */
10005                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10006
10007                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10008                 tw32(NVRAM_CFG1, nvcfg1);
10009         }
10010 }
10011
10012 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10013 {
10014         u32 nvcfg1, protect = 0;
10015
10016         nvcfg1 = tr32(NVRAM_CFG1);
10017
10018         /* NVRAM protection for TPM */
10019         if (nvcfg1 & (1 << 27)) {
10020                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10021                 protect = 1;
10022         }
10023
10024         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10025         switch (nvcfg1) {
10026                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10027                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10028                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10029                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10030                         tp->nvram_jedecnum = JEDEC_ATMEL;
10031                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10032                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10033                         tp->nvram_pagesize = 264;
10034                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10035                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10036                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
10037                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10038                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
10039                         else
10040                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
10041                         break;
10042                 case FLASH_5752VENDOR_ST_M45PE10:
10043                 case FLASH_5752VENDOR_ST_M45PE20:
10044                 case FLASH_5752VENDOR_ST_M45PE40:
10045                         tp->nvram_jedecnum = JEDEC_ST;
10046                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10047                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10048                         tp->nvram_pagesize = 256;
10049                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10050                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
10051                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10052                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
10053                         else
10054                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
10055                         break;
10056         }
10057 }
10058
10059 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10060 {
10061         u32 nvcfg1;
10062
10063         nvcfg1 = tr32(NVRAM_CFG1);
10064
10065         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10066                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10067                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10068                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10069                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10070                         tp->nvram_jedecnum = JEDEC_ATMEL;
10071                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10072                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10073
10074                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10075                         tw32(NVRAM_CFG1, nvcfg1);
10076                         break;
10077                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10078                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10079                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10080                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10081                         tp->nvram_jedecnum = JEDEC_ATMEL;
10082                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10083                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10084                         tp->nvram_pagesize = 264;
10085                         break;
10086                 case FLASH_5752VENDOR_ST_M45PE10:
10087                 case FLASH_5752VENDOR_ST_M45PE20:
10088                 case FLASH_5752VENDOR_ST_M45PE40:
10089                         tp->nvram_jedecnum = JEDEC_ST;
10090                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10091                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10092                         tp->nvram_pagesize = 256;
10093                         break;
10094         }
10095 }
10096
10097 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10098 {
10099         u32 nvcfg1, protect = 0;
10100
10101         nvcfg1 = tr32(NVRAM_CFG1);
10102
10103         /* NVRAM protection for TPM */
10104         if (nvcfg1 & (1 << 27)) {
10105                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10106                 protect = 1;
10107         }
10108
10109         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10110         switch (nvcfg1) {
10111                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10112                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10113                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10114                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10115                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10116                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10117                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10118                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10119                         tp->nvram_jedecnum = JEDEC_ATMEL;
10120                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10121                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10122                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10123                         tp->nvram_pagesize = 256;
10124                         break;
10125                 case FLASH_5761VENDOR_ST_A_M45PE20:
10126                 case FLASH_5761VENDOR_ST_A_M45PE40:
10127                 case FLASH_5761VENDOR_ST_A_M45PE80:
10128                 case FLASH_5761VENDOR_ST_A_M45PE16:
10129                 case FLASH_5761VENDOR_ST_M_M45PE20:
10130                 case FLASH_5761VENDOR_ST_M_M45PE40:
10131                 case FLASH_5761VENDOR_ST_M_M45PE80:
10132                 case FLASH_5761VENDOR_ST_M_M45PE16:
10133                         tp->nvram_jedecnum = JEDEC_ST;
10134                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10135                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10136                         tp->nvram_pagesize = 256;
10137                         break;
10138         }
10139
10140         if (protect) {
10141                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10142         } else {
10143                 switch (nvcfg1) {
10144                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10145                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10146                         case FLASH_5761VENDOR_ST_A_M45PE16:
10147                         case FLASH_5761VENDOR_ST_M_M45PE16:
10148                                 tp->nvram_size = 0x100000;
10149                                 break;
10150                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10151                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10152                         case FLASH_5761VENDOR_ST_A_M45PE80:
10153                         case FLASH_5761VENDOR_ST_M_M45PE80:
10154                                 tp->nvram_size = 0x80000;
10155                                 break;
10156                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10157                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10158                         case FLASH_5761VENDOR_ST_A_M45PE40:
10159                         case FLASH_5761VENDOR_ST_M_M45PE40:
10160                                 tp->nvram_size = 0x40000;
10161                                 break;
10162                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10163                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10164                         case FLASH_5761VENDOR_ST_A_M45PE20:
10165                         case FLASH_5761VENDOR_ST_M_M45PE20:
10166                                 tp->nvram_size = 0x20000;
10167                                 break;
10168                 }
10169         }
10170 }
10171
10172 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10173 {
10174         tp->nvram_jedecnum = JEDEC_ATMEL;
10175         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10176         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10177 }
10178
10179 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10180 static void __devinit tg3_nvram_init(struct tg3 *tp)
10181 {
10182         tw32_f(GRC_EEPROM_ADDR,
10183              (EEPROM_ADDR_FSM_RESET |
10184               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10185                EEPROM_ADDR_CLKPERD_SHIFT)));
10186
10187         msleep(1);
10188
10189         /* Enable seeprom accesses. */
10190         tw32_f(GRC_LOCAL_CTRL,
10191              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10192         udelay(100);
10193
10194         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10195             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10196                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10197
10198                 if (tg3_nvram_lock(tp)) {
10199                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10200                                "tg3_nvram_init failed.\n", tp->dev->name);
10201                         return;
10202                 }
10203                 tg3_enable_nvram_access(tp);
10204
10205                 tp->nvram_size = 0;
10206
10207                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10208                         tg3_get_5752_nvram_info(tp);
10209                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10210                         tg3_get_5755_nvram_info(tp);
10211                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10212                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10213                         tg3_get_5787_nvram_info(tp);
10214                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10215                         tg3_get_5761_nvram_info(tp);
10216                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10217                         tg3_get_5906_nvram_info(tp);
10218                 else
10219                         tg3_get_nvram_info(tp);
10220
10221                 if (tp->nvram_size == 0)
10222                         tg3_get_nvram_size(tp);
10223
10224                 tg3_disable_nvram_access(tp);
10225                 tg3_nvram_unlock(tp);
10226
10227         } else {
10228                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10229
10230                 tg3_get_eeprom_size(tp);
10231         }
10232 }
10233
10234 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10235                                         u32 offset, u32 *val)
10236 {
10237         u32 tmp;
10238         int i;
10239
10240         if (offset > EEPROM_ADDR_ADDR_MASK ||
10241             (offset % 4) != 0)
10242                 return -EINVAL;
10243
10244         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10245                                         EEPROM_ADDR_DEVID_MASK |
10246                                         EEPROM_ADDR_READ);
10247         tw32(GRC_EEPROM_ADDR,
10248              tmp |
10249              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10250              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10251               EEPROM_ADDR_ADDR_MASK) |
10252              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10253
10254         for (i = 0; i < 1000; i++) {
10255                 tmp = tr32(GRC_EEPROM_ADDR);
10256
10257                 if (tmp & EEPROM_ADDR_COMPLETE)
10258                         break;
10259                 msleep(1);
10260         }
10261         if (!(tmp & EEPROM_ADDR_COMPLETE))
10262                 return -EBUSY;
10263
10264         *val = tr32(GRC_EEPROM_DATA);
10265         return 0;
10266 }
10267
10268 #define NVRAM_CMD_TIMEOUT 10000
10269
10270 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10271 {
10272         int i;
10273
10274         tw32(NVRAM_CMD, nvram_cmd);
10275         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10276                 udelay(10);
10277                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10278                         udelay(10);
10279                         break;
10280                 }
10281         }
10282         if (i == NVRAM_CMD_TIMEOUT) {
10283                 return -EBUSY;
10284         }
10285         return 0;
10286 }
10287
10288 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10289 {
10290         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10291             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10292             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10293            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10294             (tp->nvram_jedecnum == JEDEC_ATMEL))
10295
10296                 addr = ((addr / tp->nvram_pagesize) <<
10297                         ATMEL_AT45DB0X1B_PAGE_POS) +
10298                        (addr % tp->nvram_pagesize);
10299
10300         return addr;
10301 }
10302
10303 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10304 {
10305         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10306             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10307             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10308            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10309             (tp->nvram_jedecnum == JEDEC_ATMEL))
10310
10311                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10312                         tp->nvram_pagesize) +
10313                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10314
10315         return addr;
10316 }
10317
10318 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10319 {
10320         int ret;
10321
10322         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10323                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10324
10325         offset = tg3_nvram_phys_addr(tp, offset);
10326
10327         if (offset > NVRAM_ADDR_MSK)
10328                 return -EINVAL;
10329
10330         ret = tg3_nvram_lock(tp);
10331         if (ret)
10332                 return ret;
10333
10334         tg3_enable_nvram_access(tp);
10335
10336         tw32(NVRAM_ADDR, offset);
10337         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10338                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10339
10340         if (ret == 0)
10341                 *val = swab32(tr32(NVRAM_RDDATA));
10342
10343         tg3_disable_nvram_access(tp);
10344
10345         tg3_nvram_unlock(tp);
10346
10347         return ret;
10348 }
10349
10350 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10351 {
10352         u32 v;
10353         int res = tg3_nvram_read(tp, offset, &v);
10354         if (!res)
10355                 *val = cpu_to_le32(v);
10356         return res;
10357 }
10358
10359 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10360 {
10361         int err;
10362         u32 tmp;
10363
10364         err = tg3_nvram_read(tp, offset, &tmp);
10365         *val = swab32(tmp);
10366         return err;
10367 }
10368
10369 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10370                                     u32 offset, u32 len, u8 *buf)
10371 {
10372         int i, j, rc = 0;
10373         u32 val;
10374
10375         for (i = 0; i < len; i += 4) {
10376                 u32 addr;
10377                 __le32 data;
10378
10379                 addr = offset + i;
10380
10381                 memcpy(&data, buf + i, 4);
10382
10383                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10384
10385                 val = tr32(GRC_EEPROM_ADDR);
10386                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10387
10388                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10389                         EEPROM_ADDR_READ);
10390                 tw32(GRC_EEPROM_ADDR, val |
10391                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10392                         (addr & EEPROM_ADDR_ADDR_MASK) |
10393                         EEPROM_ADDR_START |
10394                         EEPROM_ADDR_WRITE);
10395
10396                 for (j = 0; j < 1000; j++) {
10397                         val = tr32(GRC_EEPROM_ADDR);
10398
10399                         if (val & EEPROM_ADDR_COMPLETE)
10400                                 break;
10401                         msleep(1);
10402                 }
10403                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10404                         rc = -EBUSY;
10405                         break;
10406                 }
10407         }
10408
10409         return rc;
10410 }
10411
10412 /* offset and length are dword aligned */
10413 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10414                 u8 *buf)
10415 {
10416         int ret = 0;
10417         u32 pagesize = tp->nvram_pagesize;
10418         u32 pagemask = pagesize - 1;
10419         u32 nvram_cmd;
10420         u8 *tmp;
10421
10422         tmp = kmalloc(pagesize, GFP_KERNEL);
10423         if (tmp == NULL)
10424                 return -ENOMEM;
10425
10426         while (len) {
10427                 int j;
10428                 u32 phy_addr, page_off, size;
10429
10430                 phy_addr = offset & ~pagemask;
10431
10432                 for (j = 0; j < pagesize; j += 4) {
10433                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10434                                                 (__le32 *) (tmp + j))))
10435                                 break;
10436                 }
10437                 if (ret)
10438                         break;
10439
10440                 page_off = offset & pagemask;
10441                 size = pagesize;
10442                 if (len < size)
10443                         size = len;
10444
10445                 len -= size;
10446
10447                 memcpy(tmp + page_off, buf, size);
10448
10449                 offset = offset + (pagesize - page_off);
10450
10451                 tg3_enable_nvram_access(tp);
10452
10453                 /*
10454                  * Before we can erase the flash page, we need
10455                  * to issue a special "write enable" command.
10456                  */
10457                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10458
10459                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10460                         break;
10461
10462                 /* Erase the target page */
10463                 tw32(NVRAM_ADDR, phy_addr);
10464
10465                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10466                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10467
10468                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10469                         break;
10470
10471                 /* Issue another write enable to start the write. */
10472                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10473
10474                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10475                         break;
10476
10477                 for (j = 0; j < pagesize; j += 4) {
10478                         __be32 data;
10479
10480                         data = *((__be32 *) (tmp + j));
10481                         /* swab32(le32_to_cpu(data)), actually */
10482                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10483
10484                         tw32(NVRAM_ADDR, phy_addr + j);
10485
10486                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10487                                 NVRAM_CMD_WR;
10488
10489                         if (j == 0)
10490                                 nvram_cmd |= NVRAM_CMD_FIRST;
10491                         else if (j == (pagesize - 4))
10492                                 nvram_cmd |= NVRAM_CMD_LAST;
10493
10494                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10495                                 break;
10496                 }
10497                 if (ret)
10498                         break;
10499         }
10500
10501         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10502         tg3_nvram_exec_cmd(tp, nvram_cmd);
10503
10504         kfree(tmp);
10505
10506         return ret;
10507 }
10508
10509 /* offset and length are dword aligned */
10510 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10511                 u8 *buf)
10512 {
10513         int i, ret = 0;
10514
10515         for (i = 0; i < len; i += 4, offset += 4) {
10516                 u32 page_off, phy_addr, nvram_cmd;
10517                 __be32 data;
10518
10519                 memcpy(&data, buf + i, 4);
10520                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10521
10522                 page_off = offset % tp->nvram_pagesize;
10523
10524                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10525
10526                 tw32(NVRAM_ADDR, phy_addr);
10527
10528                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10529
10530                 if ((page_off == 0) || (i == 0))
10531                         nvram_cmd |= NVRAM_CMD_FIRST;
10532                 if (page_off == (tp->nvram_pagesize - 4))
10533                         nvram_cmd |= NVRAM_CMD_LAST;
10534
10535                 if (i == (len - 4))
10536                         nvram_cmd |= NVRAM_CMD_LAST;
10537
10538                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10539                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10540                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10541                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10542                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10543                     (tp->nvram_jedecnum == JEDEC_ST) &&
10544                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10545
10546                         if ((ret = tg3_nvram_exec_cmd(tp,
10547                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10548                                 NVRAM_CMD_DONE)))
10549
10550                                 break;
10551                 }
10552                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10553                         /* We always do complete word writes to eeprom. */
10554                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10555                 }
10556
10557                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10558                         break;
10559         }
10560         return ret;
10561 }
10562
10563 /* offset and length are dword aligned */
10564 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10565 {
10566         int ret;
10567
10568         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10569                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10570                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10571                 udelay(40);
10572         }
10573
10574         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10575                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10576         }
10577         else {
10578                 u32 grc_mode;
10579
10580                 ret = tg3_nvram_lock(tp);
10581                 if (ret)
10582                         return ret;
10583
10584                 tg3_enable_nvram_access(tp);
10585                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10586                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10587                         tw32(NVRAM_WRITE1, 0x406);
10588
10589                 grc_mode = tr32(GRC_MODE);
10590                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10591
10592                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10593                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10594
10595                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10596                                 buf);
10597                 }
10598                 else {
10599                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10600                                 buf);
10601                 }
10602
10603                 grc_mode = tr32(GRC_MODE);
10604                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10605
10606                 tg3_disable_nvram_access(tp);
10607                 tg3_nvram_unlock(tp);
10608         }
10609
10610         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10611                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10612                 udelay(40);
10613         }
10614
10615         return ret;
10616 }
10617
10618 struct subsys_tbl_ent {
10619         u16 subsys_vendor, subsys_devid;
10620         u32 phy_id;
10621 };
10622
10623 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10624         /* Broadcom boards. */
10625         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10626         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10627         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10628         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10629         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10630         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10631         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10632         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10633         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10634         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10635         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10636
10637         /* 3com boards. */
10638         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10639         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10640         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10641         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10642         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10643
10644         /* DELL boards. */
10645         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10646         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10647         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10648         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10649
10650         /* Compaq boards. */
10651         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10652         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10653         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10654         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10655         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10656
10657         /* IBM boards. */
10658         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10659 };
10660
10661 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10662 {
10663         int i;
10664
10665         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10666                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10667                      tp->pdev->subsystem_vendor) &&
10668                     (subsys_id_to_phy_id[i].subsys_devid ==
10669                      tp->pdev->subsystem_device))
10670                         return &subsys_id_to_phy_id[i];
10671         }
10672         return NULL;
10673 }
10674
10675 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10676 {
10677         u32 val;
10678         u16 pmcsr;
10679
10680         /* On some early chips the SRAM cannot be accessed in D3hot state,
10681          * so need make sure we're in D0.
10682          */
10683         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10684         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10685         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10686         msleep(1);
10687
10688         /* Make sure register accesses (indirect or otherwise)
10689          * will function correctly.
10690          */
10691         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10692                                tp->misc_host_ctrl);
10693
10694         /* The memory arbiter has to be enabled in order for SRAM accesses
10695          * to succeed.  Normally on powerup the tg3 chip firmware will make
10696          * sure it is enabled, but other entities such as system netboot
10697          * code might disable it.
10698          */
10699         val = tr32(MEMARB_MODE);
10700         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10701
10702         tp->phy_id = PHY_ID_INVALID;
10703         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10704
10705         /* Assume an onboard device and WOL capable by default.  */
10706         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10707
10708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10709                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10710                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10711                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10712                 }
10713                 val = tr32(VCPU_CFGSHDW);
10714                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10715                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10716                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10717                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10718                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10719                 return;
10720         }
10721
10722         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10723         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10724                 u32 nic_cfg, led_cfg;
10725                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10726                 int eeprom_phy_serdes = 0;
10727
10728                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10729                 tp->nic_sram_data_cfg = nic_cfg;
10730
10731                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10732                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10733                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10734                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10735                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10736                     (ver > 0) && (ver < 0x100))
10737                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10738
10739                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10740                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10741                         eeprom_phy_serdes = 1;
10742
10743                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10744                 if (nic_phy_id != 0) {
10745                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10746                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10747
10748                         eeprom_phy_id  = (id1 >> 16) << 10;
10749                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10750                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10751                 } else
10752                         eeprom_phy_id = 0;
10753
10754                 tp->phy_id = eeprom_phy_id;
10755                 if (eeprom_phy_serdes) {
10756                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10757                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10758                         else
10759                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10760                 }
10761
10762                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10763                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10764                                     SHASTA_EXT_LED_MODE_MASK);
10765                 else
10766                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10767
10768                 switch (led_cfg) {
10769                 default:
10770                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10771                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10772                         break;
10773
10774                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10775                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10776                         break;
10777
10778                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10779                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10780
10781                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10782                          * read on some older 5700/5701 bootcode.
10783                          */
10784                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10785                             ASIC_REV_5700 ||
10786                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10787                             ASIC_REV_5701)
10788                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10789
10790                         break;
10791
10792                 case SHASTA_EXT_LED_SHARED:
10793                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10794                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10795                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10796                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10797                                                  LED_CTRL_MODE_PHY_2);
10798                         break;
10799
10800                 case SHASTA_EXT_LED_MAC:
10801                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10802                         break;
10803
10804                 case SHASTA_EXT_LED_COMBO:
10805                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10806                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10807                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10808                                                  LED_CTRL_MODE_PHY_2);
10809                         break;
10810
10811                 };
10812
10813                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10814                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10815                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10816                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10817
10818                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10819                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10820
10821                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10822                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10823                         if ((tp->pdev->subsystem_vendor ==
10824                              PCI_VENDOR_ID_ARIMA) &&
10825                             (tp->pdev->subsystem_device == 0x205a ||
10826                              tp->pdev->subsystem_device == 0x2063))
10827                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10828                 } else {
10829                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10830                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10831                 }
10832
10833                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10834                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10835                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10836                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10837                 }
10838                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10839                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10840                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10841                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10842                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10843
10844                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10845                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10846                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10847
10848                 if (cfg2 & (1 << 17))
10849                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10850
10851                 /* serdes signal pre-emphasis in register 0x590 set by */
10852                 /* bootcode if bit 18 is set */
10853                 if (cfg2 & (1 << 18))
10854                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10855
10856                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10857                         u32 cfg3;
10858
10859                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10860                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10861                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10862                 }
10863         }
10864 }
10865
10866 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10867 {
10868         int i;
10869         u32 val;
10870
10871         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10872         tw32(OTP_CTRL, cmd);
10873
10874         /* Wait for up to 1 ms for command to execute. */
10875         for (i = 0; i < 100; i++) {
10876                 val = tr32(OTP_STATUS);
10877                 if (val & OTP_STATUS_CMD_DONE)
10878                         break;
10879                 udelay(10);
10880         }
10881
10882         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10883 }
10884
10885 /* Read the gphy configuration from the OTP region of the chip.  The gphy
10886  * configuration is a 32-bit value that straddles the alignment boundary.
10887  * We do two 32-bit reads and then shift and merge the results.
10888  */
10889 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10890 {
10891         u32 bhalf_otp, thalf_otp;
10892
10893         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10894
10895         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10896                 return 0;
10897
10898         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10899
10900         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10901                 return 0;
10902
10903         thalf_otp = tr32(OTP_READ_DATA);
10904
10905         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10906
10907         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10908                 return 0;
10909
10910         bhalf_otp = tr32(OTP_READ_DATA);
10911
10912         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10913 }
10914
10915 static int __devinit tg3_phy_probe(struct tg3 *tp)
10916 {
10917         u32 hw_phy_id_1, hw_phy_id_2;
10918         u32 hw_phy_id, hw_phy_id_masked;
10919         int err;
10920
10921         /* Reading the PHY ID register can conflict with ASF
10922          * firwmare access to the PHY hardware.
10923          */
10924         err = 0;
10925         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10926             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10927                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10928         } else {
10929                 /* Now read the physical PHY_ID from the chip and verify
10930                  * that it is sane.  If it doesn't look good, we fall back
10931                  * to either the hard-coded table based PHY_ID and failing
10932                  * that the value found in the eeprom area.
10933                  */
10934                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10935                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10936
10937                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10938                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10939                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10940
10941                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10942         }
10943
10944         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10945                 tp->phy_id = hw_phy_id;
10946                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10947                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10948                 else
10949                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10950         } else {
10951                 if (tp->phy_id != PHY_ID_INVALID) {
10952                         /* Do nothing, phy ID already set up in
10953                          * tg3_get_eeprom_hw_cfg().
10954                          */
10955                 } else {
10956                         struct subsys_tbl_ent *p;
10957
10958                         /* No eeprom signature?  Try the hardcoded
10959                          * subsys device table.
10960                          */
10961                         p = lookup_by_subsys(tp);
10962                         if (!p)
10963                                 return -ENODEV;
10964
10965                         tp->phy_id = p->phy_id;
10966                         if (!tp->phy_id ||
10967                             tp->phy_id == PHY_ID_BCM8002)
10968                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10969                 }
10970         }
10971
10972         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10973             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10974             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10975                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10976
10977                 tg3_readphy(tp, MII_BMSR, &bmsr);
10978                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10979                     (bmsr & BMSR_LSTATUS))
10980                         goto skip_phy_reset;
10981
10982                 err = tg3_phy_reset(tp);
10983                 if (err)
10984                         return err;
10985
10986                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10987                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10988                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10989                 tg3_ctrl = 0;
10990                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10991                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10992                                     MII_TG3_CTRL_ADV_1000_FULL);
10993                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10994                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10995                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10996                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10997                 }
10998
10999                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11000                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11001                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11002                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11003                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11004
11005                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11006                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11007
11008                         tg3_writephy(tp, MII_BMCR,
11009                                      BMCR_ANENABLE | BMCR_ANRESTART);
11010                 }
11011                 tg3_phy_set_wirespeed(tp);
11012
11013                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11014                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11015                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11016         }
11017
11018 skip_phy_reset:
11019         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11020                 err = tg3_init_5401phy_dsp(tp);
11021                 if (err)
11022                         return err;
11023         }
11024
11025         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11026                 err = tg3_init_5401phy_dsp(tp);
11027         }
11028
11029         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11030                 tp->link_config.advertising =
11031                         (ADVERTISED_1000baseT_Half |
11032                          ADVERTISED_1000baseT_Full |
11033                          ADVERTISED_Autoneg |
11034                          ADVERTISED_FIBRE);
11035         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11036                 tp->link_config.advertising &=
11037                         ~(ADVERTISED_1000baseT_Half |
11038                           ADVERTISED_1000baseT_Full);
11039
11040         return err;
11041 }
11042
11043 static void __devinit tg3_read_partno(struct tg3 *tp)
11044 {
11045         unsigned char vpd_data[256];
11046         unsigned int i;
11047         u32 magic;
11048
11049         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11050                 goto out_not_found;
11051
11052         if (magic == TG3_EEPROM_MAGIC) {
11053                 for (i = 0; i < 256; i += 4) {
11054                         u32 tmp;
11055
11056                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11057                                 goto out_not_found;
11058
11059                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11060                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11061                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11062                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11063                 }
11064         } else {
11065                 int vpd_cap;
11066
11067                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11068                 for (i = 0; i < 256; i += 4) {
11069                         u32 tmp, j = 0;
11070                         __le32 v;
11071                         u16 tmp16;
11072
11073                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11074                                               i);
11075                         while (j++ < 100) {
11076                                 pci_read_config_word(tp->pdev, vpd_cap +
11077                                                      PCI_VPD_ADDR, &tmp16);
11078                                 if (tmp16 & 0x8000)
11079                                         break;
11080                                 msleep(1);
11081                         }
11082                         if (!(tmp16 & 0x8000))
11083                                 goto out_not_found;
11084
11085                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11086                                               &tmp);
11087                         v = cpu_to_le32(tmp);
11088                         memcpy(&vpd_data[i], &v, 4);
11089                 }
11090         }
11091
11092         /* Now parse and find the part number. */
11093         for (i = 0; i < 254; ) {
11094                 unsigned char val = vpd_data[i];
11095                 unsigned int block_end;
11096
11097                 if (val == 0x82 || val == 0x91) {
11098                         i = (i + 3 +
11099                              (vpd_data[i + 1] +
11100                               (vpd_data[i + 2] << 8)));
11101                         continue;
11102                 }
11103
11104                 if (val != 0x90)
11105                         goto out_not_found;
11106
11107                 block_end = (i + 3 +
11108                              (vpd_data[i + 1] +
11109                               (vpd_data[i + 2] << 8)));
11110                 i += 3;
11111
11112                 if (block_end > 256)
11113                         goto out_not_found;
11114
11115                 while (i < (block_end - 2)) {
11116                         if (vpd_data[i + 0] == 'P' &&
11117                             vpd_data[i + 1] == 'N') {
11118                                 int partno_len = vpd_data[i + 2];
11119
11120                                 i += 3;
11121                                 if (partno_len > 24 || (partno_len + i) > 256)
11122                                         goto out_not_found;
11123
11124                                 memcpy(tp->board_part_number,
11125                                        &vpd_data[i], partno_len);
11126
11127                                 /* Success. */
11128                                 return;
11129                         }
11130                         i += 3 + vpd_data[i + 2];
11131                 }
11132
11133                 /* Part number not found. */
11134                 goto out_not_found;
11135         }
11136
11137 out_not_found:
11138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11139                 strcpy(tp->board_part_number, "BCM95906");
11140         else
11141                 strcpy(tp->board_part_number, "none");
11142 }
11143
11144 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11145 {
11146         u32 val;
11147
11148         if (tg3_nvram_read_swab(tp, offset, &val) ||
11149             (val & 0xfc000000) != 0x0c000000 ||
11150             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11151             val != 0)
11152                 return 0;
11153
11154         return 1;
11155 }
11156
11157 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11158 {
11159         u32 val, offset, start;
11160         u32 ver_offset;
11161         int i, bcnt;
11162
11163         if (tg3_nvram_read_swab(tp, 0, &val))
11164                 return;
11165
11166         if (val != TG3_EEPROM_MAGIC)
11167                 return;
11168
11169         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11170             tg3_nvram_read_swab(tp, 0x4, &start))
11171                 return;
11172
11173         offset = tg3_nvram_logical_addr(tp, offset);
11174
11175         if (!tg3_fw_img_is_valid(tp, offset) ||
11176             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11177                 return;
11178
11179         offset = offset + ver_offset - start;
11180         for (i = 0; i < 16; i += 4) {
11181                 __le32 v;
11182                 if (tg3_nvram_read_le(tp, offset + i, &v))
11183                         return;
11184
11185                 memcpy(tp->fw_ver + i, &v, 4);
11186         }
11187
11188         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11189              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11190                 return;
11191
11192         for (offset = TG3_NVM_DIR_START;
11193              offset < TG3_NVM_DIR_END;
11194              offset += TG3_NVM_DIRENT_SIZE) {
11195                 if (tg3_nvram_read_swab(tp, offset, &val))
11196                         return;
11197
11198                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11199                         break;
11200         }
11201
11202         if (offset == TG3_NVM_DIR_END)
11203                 return;
11204
11205         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11206                 start = 0x08000000;
11207         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11208                 return;
11209
11210         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11211             !tg3_fw_img_is_valid(tp, offset) ||
11212             tg3_nvram_read_swab(tp, offset + 8, &val))
11213                 return;
11214
11215         offset += val - start;
11216
11217         bcnt = strlen(tp->fw_ver);
11218
11219         tp->fw_ver[bcnt++] = ',';
11220         tp->fw_ver[bcnt++] = ' ';
11221
11222         for (i = 0; i < 4; i++) {
11223                 __le32 v;
11224                 if (tg3_nvram_read_le(tp, offset, &v))
11225                         return;
11226
11227                 offset += sizeof(v);
11228
11229                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11230                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11231                         break;
11232                 }
11233
11234                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11235                 bcnt += sizeof(v);
11236         }
11237
11238         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11239 }
11240
11241 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11242
11243 static int __devinit tg3_get_invariants(struct tg3 *tp)
11244 {
11245         static struct pci_device_id write_reorder_chipsets[] = {
11246                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11247                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11248                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11249                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11250                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11251                              PCI_DEVICE_ID_VIA_8385_0) },
11252                 { },
11253         };
11254         u32 misc_ctrl_reg;
11255         u32 cacheline_sz_reg;
11256         u32 pci_state_reg, grc_misc_cfg;
11257         u32 val;
11258         u16 pci_cmd;
11259         int err, pcie_cap;
11260
11261         /* Force memory write invalidate off.  If we leave it on,
11262          * then on 5700_BX chips we have to enable a workaround.
11263          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11264          * to match the cacheline size.  The Broadcom driver have this
11265          * workaround but turns MWI off all the times so never uses
11266          * it.  This seems to suggest that the workaround is insufficient.
11267          */
11268         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11269         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11270         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11271
11272         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11273          * has the register indirect write enable bit set before
11274          * we try to access any of the MMIO registers.  It is also
11275          * critical that the PCI-X hw workaround situation is decided
11276          * before that as well.
11277          */
11278         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11279                               &misc_ctrl_reg);
11280
11281         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11282                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11284                 u32 prod_id_asic_rev;
11285
11286                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11287                                       &prod_id_asic_rev);
11288                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11289         }
11290
11291         /* Wrong chip ID in 5752 A0. This code can be removed later
11292          * as A0 is not in production.
11293          */
11294         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11295                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11296
11297         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11298          * we need to disable memory and use config. cycles
11299          * only to access all registers. The 5702/03 chips
11300          * can mistakenly decode the special cycles from the
11301          * ICH chipsets as memory write cycles, causing corruption
11302          * of register and memory space. Only certain ICH bridges
11303          * will drive special cycles with non-zero data during the
11304          * address phase which can fall within the 5703's address
11305          * range. This is not an ICH bug as the PCI spec allows
11306          * non-zero address during special cycles. However, only
11307          * these ICH bridges are known to drive non-zero addresses
11308          * during special cycles.
11309          *
11310          * Since special cycles do not cross PCI bridges, we only
11311          * enable this workaround if the 5703 is on the secondary
11312          * bus of these ICH bridges.
11313          */
11314         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11315             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11316                 static struct tg3_dev_id {
11317                         u32     vendor;
11318                         u32     device;
11319                         u32     rev;
11320                 } ich_chipsets[] = {
11321                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11322                           PCI_ANY_ID },
11323                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11324                           PCI_ANY_ID },
11325                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11326                           0xa },
11327                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11328                           PCI_ANY_ID },
11329                         { },
11330                 };
11331                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11332                 struct pci_dev *bridge = NULL;
11333
11334                 while (pci_id->vendor != 0) {
11335                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11336                                                 bridge);
11337                         if (!bridge) {
11338                                 pci_id++;
11339                                 continue;
11340                         }
11341                         if (pci_id->rev != PCI_ANY_ID) {
11342                                 if (bridge->revision > pci_id->rev)
11343                                         continue;
11344                         }
11345                         if (bridge->subordinate &&
11346                             (bridge->subordinate->number ==
11347                              tp->pdev->bus->number)) {
11348
11349                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11350                                 pci_dev_put(bridge);
11351                                 break;
11352                         }
11353                 }
11354         }
11355
11356         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11357                 static struct tg3_dev_id {
11358                         u32     vendor;
11359                         u32     device;
11360                 } bridge_chipsets[] = {
11361                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11362                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11363                         { },
11364                 };
11365                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11366                 struct pci_dev *bridge = NULL;
11367
11368                 while (pci_id->vendor != 0) {
11369                         bridge = pci_get_device(pci_id->vendor,
11370                                                 pci_id->device,
11371                                                 bridge);
11372                         if (!bridge) {
11373                                 pci_id++;
11374                                 continue;
11375                         }
11376                         if (bridge->subordinate &&
11377                             (bridge->subordinate->number <=
11378                              tp->pdev->bus->number) &&
11379                             (bridge->subordinate->subordinate >=
11380                              tp->pdev->bus->number)) {
11381                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11382                                 pci_dev_put(bridge);
11383                                 break;
11384                         }
11385                 }
11386         }
11387
11388         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11389          * DMA addresses > 40-bit. This bridge may have other additional
11390          * 57xx devices behind it in some 4-port NIC designs for example.
11391          * Any tg3 device found behind the bridge will also need the 40-bit
11392          * DMA workaround.
11393          */
11394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11396                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11397                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11398                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11399         }
11400         else {
11401                 struct pci_dev *bridge = NULL;
11402
11403                 do {
11404                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11405                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11406                                                 bridge);
11407                         if (bridge && bridge->subordinate &&
11408                             (bridge->subordinate->number <=
11409                              tp->pdev->bus->number) &&
11410                             (bridge->subordinate->subordinate >=
11411                              tp->pdev->bus->number)) {
11412                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11413                                 pci_dev_put(bridge);
11414                                 break;
11415                         }
11416                 } while (bridge);
11417         }
11418
11419         /* Initialize misc host control in PCI block. */
11420         tp->misc_host_ctrl |= (misc_ctrl_reg &
11421                                MISC_HOST_CTRL_CHIPREV);
11422         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11423                                tp->misc_host_ctrl);
11424
11425         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11426                               &cacheline_sz_reg);
11427
11428         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11429         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11430         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11431         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11432
11433         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11434             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11435                 tp->pdev_peer = tg3_find_peer(tp);
11436
11437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11438             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11439             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11440             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11444             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11445                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11446
11447         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11448             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11449                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11450
11451         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11452                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11453                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11454                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11455                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11456                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11457                      tp->pdev_peer == tp->pdev))
11458                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11459
11460                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11461                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11462                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11463                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11464                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11465                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11466                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11467                 } else {
11468                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11469                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11470                                 ASIC_REV_5750 &&
11471                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11472                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11473                 }
11474         }
11475
11476         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11477             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11478             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11479             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11480             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11481             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11482             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11483             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11484                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11485
11486         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11487         if (pcie_cap != 0) {
11488                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11489
11490                 pcie_set_readrq(tp->pdev, 4096);
11491
11492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11493                         u16 lnkctl;
11494
11495                         pci_read_config_word(tp->pdev,
11496                                              pcie_cap + PCI_EXP_LNKCTL,
11497                                              &lnkctl);
11498                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11499                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11500                 }
11501         }
11502
11503         /* If we have an AMD 762 or VIA K8T800 chipset, write
11504          * reordering to the mailbox registers done by the host
11505          * controller can cause major troubles.  We read back from
11506          * every mailbox register write to force the writes to be
11507          * posted to the chip in order.
11508          */
11509         if (pci_dev_present(write_reorder_chipsets) &&
11510             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11511                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11512
11513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11514             tp->pci_lat_timer < 64) {
11515                 tp->pci_lat_timer = 64;
11516
11517                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11518                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11519                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11520                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11521
11522                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11523                                        cacheline_sz_reg);
11524         }
11525
11526         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11527             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11528                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11529                 if (!tp->pcix_cap) {
11530                         printk(KERN_ERR PFX "Cannot find PCI-X "
11531                                             "capability, aborting.\n");
11532                         return -EIO;
11533                 }
11534         }
11535
11536         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11537                               &pci_state_reg);
11538
11539         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11540                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11541
11542                 /* If this is a 5700 BX chipset, and we are in PCI-X
11543                  * mode, enable register write workaround.
11544                  *
11545                  * The workaround is to use indirect register accesses
11546                  * for all chip writes not to mailbox registers.
11547                  */
11548                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11549                         u32 pm_reg;
11550
11551                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11552
11553                         /* The chip can have it's power management PCI config
11554                          * space registers clobbered due to this bug.
11555                          * So explicitly force the chip into D0 here.
11556                          */
11557                         pci_read_config_dword(tp->pdev,
11558                                               tp->pm_cap + PCI_PM_CTRL,
11559                                               &pm_reg);
11560                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11561                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11562                         pci_write_config_dword(tp->pdev,
11563                                                tp->pm_cap + PCI_PM_CTRL,
11564                                                pm_reg);
11565
11566                         /* Also, force SERR#/PERR# in PCI command. */
11567                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11568                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11569                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11570                 }
11571         }
11572
11573         /* 5700 BX chips need to have their TX producer index mailboxes
11574          * written twice to workaround a bug.
11575          */
11576         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11577                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11578
11579         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11580                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11581         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11582                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11583
11584         /* Chip-specific fixup from Broadcom driver */
11585         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11586             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11587                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11588                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11589         }
11590
11591         /* Default fast path register access methods */
11592         tp->read32 = tg3_read32;
11593         tp->write32 = tg3_write32;
11594         tp->read32_mbox = tg3_read32;
11595         tp->write32_mbox = tg3_write32;
11596         tp->write32_tx_mbox = tg3_write32;
11597         tp->write32_rx_mbox = tg3_write32;
11598
11599         /* Various workaround register access methods */
11600         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11601                 tp->write32 = tg3_write_indirect_reg32;
11602         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11603                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11604                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11605                 /*
11606                  * Back to back register writes can cause problems on these
11607                  * chips, the workaround is to read back all reg writes
11608                  * except those to mailbox regs.
11609                  *
11610                  * See tg3_write_indirect_reg32().
11611                  */
11612                 tp->write32 = tg3_write_flush_reg32;
11613         }
11614
11615
11616         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11617             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11618                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11619                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11620                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11621         }
11622
11623         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11624                 tp->read32 = tg3_read_indirect_reg32;
11625                 tp->write32 = tg3_write_indirect_reg32;
11626                 tp->read32_mbox = tg3_read_indirect_mbox;
11627                 tp->write32_mbox = tg3_write_indirect_mbox;
11628                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11629                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11630
11631                 iounmap(tp->regs);
11632                 tp->regs = NULL;
11633
11634                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11635                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11636                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11637         }
11638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11639                 tp->read32_mbox = tg3_read32_mbox_5906;
11640                 tp->write32_mbox = tg3_write32_mbox_5906;
11641                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11642                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11643         }
11644
11645         if (tp->write32 == tg3_write_indirect_reg32 ||
11646             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11647              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11648               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11649                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11650
11651         /* Get eeprom hw config before calling tg3_set_power_state().
11652          * In particular, the TG3_FLG2_IS_NIC flag must be
11653          * determined before calling tg3_set_power_state() so that
11654          * we know whether or not to switch out of Vaux power.
11655          * When the flag is set, it means that GPIO1 is used for eeprom
11656          * write protect and also implies that it is a LOM where GPIOs
11657          * are not used to switch power.
11658          */
11659         tg3_get_eeprom_hw_cfg(tp);
11660
11661         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11662                 /* Allow reads and writes to the
11663                  * APE register and memory space.
11664                  */
11665                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11666                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11667                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11668                                        pci_state_reg);
11669         }
11670
11671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11673                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11674
11675                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11676                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11677                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11678                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11679                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11680         }
11681
11682         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11683          * GPIO1 driven high will bring 5700's external PHY out of reset.
11684          * It is also used as eeprom write protect on LOMs.
11685          */
11686         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11687         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11688             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11689                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11690                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11691         /* Unused GPIO3 must be driven as output on 5752 because there
11692          * are no pull-up resistors on unused GPIO pins.
11693          */
11694         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11695                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11696
11697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11698                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11699
11700         /* Force the chip into D0. */
11701         err = tg3_set_power_state(tp, PCI_D0);
11702         if (err) {
11703                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11704                        pci_name(tp->pdev));
11705                 return err;
11706         }
11707
11708         /* 5700 B0 chips do not support checksumming correctly due
11709          * to hardware bugs.
11710          */
11711         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11712                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11713
11714         /* Derive initial jumbo mode from MTU assigned in
11715          * ether_setup() via the alloc_etherdev() call
11716          */
11717         if (tp->dev->mtu > ETH_DATA_LEN &&
11718             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11719                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11720
11721         /* Determine WakeOnLan speed to use. */
11722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11723             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11724             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11725             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11726                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11727         } else {
11728                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11729         }
11730
11731         /* A few boards don't want Ethernet@WireSpeed phy feature */
11732         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11733             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11734              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11735              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11736             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11737             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11738                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11739
11740         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11741             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11742                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11743         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11744                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11745
11746         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11747                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11748                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11749                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11750                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11751                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11752                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11753                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11754                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11755                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11756                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11757                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11758         }
11759
11760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11761             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11762                 tp->phy_otp = tg3_read_otp_phycfg(tp);
11763                 if (tp->phy_otp == 0)
11764                         tp->phy_otp = TG3_OTP_DEFAULT;
11765         }
11766
11767         tp->coalesce_mode = 0;
11768         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11769             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11770                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11771
11772         /* Initialize MAC MI mode, polling disabled. */
11773         tw32_f(MAC_MI_MODE, tp->mi_mode);
11774         udelay(80);
11775
11776         /* Initialize data/descriptor byte/word swapping. */
11777         val = tr32(GRC_MODE);
11778         val &= GRC_MODE_HOST_STACKUP;
11779         tw32(GRC_MODE, val | tp->grc_mode);
11780
11781         tg3_switch_clocks(tp);
11782
11783         /* Clear this out for sanity. */
11784         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11785
11786         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11787                               &pci_state_reg);
11788         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11789             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11790                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11791
11792                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11793                     chiprevid == CHIPREV_ID_5701_B0 ||
11794                     chiprevid == CHIPREV_ID_5701_B2 ||
11795                     chiprevid == CHIPREV_ID_5701_B5) {
11796                         void __iomem *sram_base;
11797
11798                         /* Write some dummy words into the SRAM status block
11799                          * area, see if it reads back correctly.  If the return
11800                          * value is bad, force enable the PCIX workaround.
11801                          */
11802                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11803
11804                         writel(0x00000000, sram_base);
11805                         writel(0x00000000, sram_base + 4);
11806                         writel(0xffffffff, sram_base + 4);
11807                         if (readl(sram_base) != 0x00000000)
11808                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11809                 }
11810         }
11811
11812         udelay(50);
11813         tg3_nvram_init(tp);
11814
11815         grc_misc_cfg = tr32(GRC_MISC_CFG);
11816         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11817
11818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11819             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11820              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11821                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11822
11823         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11824             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11825                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11826         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11827                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11828                                       HOSTCC_MODE_CLRTICK_TXBD);
11829
11830                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11831                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11832                                        tp->misc_host_ctrl);
11833         }
11834
11835         /* these are limited to 10/100 only */
11836         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11837              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11838             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11839              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11840              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11841               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11842               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11843             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11844              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11845               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11846               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11848                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11849
11850         err = tg3_phy_probe(tp);
11851         if (err) {
11852                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11853                        pci_name(tp->pdev), err);
11854                 /* ... but do not return immediately ... */
11855         }
11856
11857         tg3_read_partno(tp);
11858         tg3_read_fw_ver(tp);
11859
11860         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11861                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11862         } else {
11863                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11864                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11865                 else
11866                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11867         }
11868
11869         /* 5700 {AX,BX} chips have a broken status block link
11870          * change bit implementation, so we must use the
11871          * status register in those cases.
11872          */
11873         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11874                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11875         else
11876                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11877
11878         /* The led_ctrl is set during tg3_phy_probe, here we might
11879          * have to force the link status polling mechanism based
11880          * upon subsystem IDs.
11881          */
11882         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11884             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11885                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11886                                   TG3_FLAG_USE_LINKCHG_REG);
11887         }
11888
11889         /* For all SERDES we poll the MAC status register. */
11890         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11891                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11892         else
11893                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11894
11895         /* All chips before 5787 can get confused if TX buffers
11896          * straddle the 4GB address boundary in some cases.
11897          */
11898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11899             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11900             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11903                 tp->dev->hard_start_xmit = tg3_start_xmit;
11904         else
11905                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11906
11907         tp->rx_offset = 2;
11908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11909             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11910                 tp->rx_offset = 0;
11911
11912         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11913
11914         /* Increment the rx prod index on the rx std ring by at most
11915          * 8 for these chips to workaround hw errata.
11916          */
11917         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11918             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11919             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11920                 tp->rx_std_max_post = 8;
11921
11922         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11923                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11924                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11925
11926         return err;
11927 }
11928
11929 #ifdef CONFIG_SPARC
11930 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11931 {
11932         struct net_device *dev = tp->dev;
11933         struct pci_dev *pdev = tp->pdev;
11934         struct device_node *dp = pci_device_to_OF_node(pdev);
11935         const unsigned char *addr;
11936         int len;
11937
11938         addr = of_get_property(dp, "local-mac-address", &len);
11939         if (addr && len == 6) {
11940                 memcpy(dev->dev_addr, addr, 6);
11941                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11942                 return 0;
11943         }
11944         return -ENODEV;
11945 }
11946
11947 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11948 {
11949         struct net_device *dev = tp->dev;
11950
11951         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11952         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11953         return 0;
11954 }
11955 #endif
11956
11957 static int __devinit tg3_get_device_address(struct tg3 *tp)
11958 {
11959         struct net_device *dev = tp->dev;
11960         u32 hi, lo, mac_offset;
11961         int addr_ok = 0;
11962
11963 #ifdef CONFIG_SPARC
11964         if (!tg3_get_macaddr_sparc(tp))
11965                 return 0;
11966 #endif
11967
11968         mac_offset = 0x7c;
11969         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11970             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11971                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11972                         mac_offset = 0xcc;
11973                 if (tg3_nvram_lock(tp))
11974                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11975                 else
11976                         tg3_nvram_unlock(tp);
11977         }
11978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11979                 mac_offset = 0x10;
11980
11981         /* First try to get it from MAC address mailbox. */
11982         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11983         if ((hi >> 16) == 0x484b) {
11984                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11985                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11986
11987                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11988                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11989                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11990                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11991                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11992
11993                 /* Some old bootcode may report a 0 MAC address in SRAM */
11994                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11995         }
11996         if (!addr_ok) {
11997                 /* Next, try NVRAM. */
11998                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11999                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12000                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12001                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12002                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12003                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12004                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12005                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12006                 }
12007                 /* Finally just fetch it out of the MAC control regs. */
12008                 else {
12009                         hi = tr32(MAC_ADDR_0_HIGH);
12010                         lo = tr32(MAC_ADDR_0_LOW);
12011
12012                         dev->dev_addr[5] = lo & 0xff;
12013                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12014                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12015                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12016                         dev->dev_addr[1] = hi & 0xff;
12017                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12018                 }
12019         }
12020
12021         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12022 #ifdef CONFIG_SPARC
12023                 if (!tg3_get_default_macaddr_sparc(tp))
12024                         return 0;
12025 #endif
12026                 return -EINVAL;
12027         }
12028         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12029         return 0;
12030 }
12031
12032 #define BOUNDARY_SINGLE_CACHELINE       1
12033 #define BOUNDARY_MULTI_CACHELINE        2
12034
12035 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12036 {
12037         int cacheline_size;
12038         u8 byte;
12039         int goal;
12040
12041         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12042         if (byte == 0)
12043                 cacheline_size = 1024;
12044         else
12045                 cacheline_size = (int) byte * 4;
12046
12047         /* On 5703 and later chips, the boundary bits have no
12048          * effect.
12049          */
12050         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12051             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12052             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12053                 goto out;
12054
12055 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12056         goal = BOUNDARY_MULTI_CACHELINE;
12057 #else
12058 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12059         goal = BOUNDARY_SINGLE_CACHELINE;
12060 #else
12061         goal = 0;
12062 #endif
12063 #endif
12064
12065         if (!goal)
12066                 goto out;
12067
12068         /* PCI controllers on most RISC systems tend to disconnect
12069          * when a device tries to burst across a cache-line boundary.
12070          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12071          *
12072          * Unfortunately, for PCI-E there are only limited
12073          * write-side controls for this, and thus for reads
12074          * we will still get the disconnects.  We'll also waste
12075          * these PCI cycles for both read and write for chips
12076          * other than 5700 and 5701 which do not implement the
12077          * boundary bits.
12078          */
12079         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12080             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12081                 switch (cacheline_size) {
12082                 case 16:
12083                 case 32:
12084                 case 64:
12085                 case 128:
12086                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12087                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12088                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12089                         } else {
12090                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12091                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12092                         }
12093                         break;
12094
12095                 case 256:
12096                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12097                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12098                         break;
12099
12100                 default:
12101                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12102                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12103                         break;
12104                 };
12105         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12106                 switch (cacheline_size) {
12107                 case 16:
12108                 case 32:
12109                 case 64:
12110                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12111                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12112                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12113                                 break;
12114                         }
12115                         /* fallthrough */
12116                 case 128:
12117                 default:
12118                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12119                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12120                         break;
12121                 };
12122         } else {
12123                 switch (cacheline_size) {
12124                 case 16:
12125                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12126                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12127                                         DMA_RWCTRL_WRITE_BNDRY_16);
12128                                 break;
12129                         }
12130                         /* fallthrough */
12131                 case 32:
12132                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12133                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12134                                         DMA_RWCTRL_WRITE_BNDRY_32);
12135                                 break;
12136                         }
12137                         /* fallthrough */
12138                 case 64:
12139                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12140                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12141                                         DMA_RWCTRL_WRITE_BNDRY_64);
12142                                 break;
12143                         }
12144                         /* fallthrough */
12145                 case 128:
12146                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12147                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12148                                         DMA_RWCTRL_WRITE_BNDRY_128);
12149                                 break;
12150                         }
12151                         /* fallthrough */
12152                 case 256:
12153                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12154                                 DMA_RWCTRL_WRITE_BNDRY_256);
12155                         break;
12156                 case 512:
12157                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12158                                 DMA_RWCTRL_WRITE_BNDRY_512);
12159                         break;
12160                 case 1024:
12161                 default:
12162                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12163                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12164                         break;
12165                 };
12166         }
12167
12168 out:
12169         return val;
12170 }
12171
12172 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12173 {
12174         struct tg3_internal_buffer_desc test_desc;
12175         u32 sram_dma_descs;
12176         int i, ret;
12177
12178         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12179
12180         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12181         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12182         tw32(RDMAC_STATUS, 0);
12183         tw32(WDMAC_STATUS, 0);
12184
12185         tw32(BUFMGR_MODE, 0);
12186         tw32(FTQ_RESET, 0);
12187
12188         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12189         test_desc.addr_lo = buf_dma & 0xffffffff;
12190         test_desc.nic_mbuf = 0x00002100;
12191         test_desc.len = size;
12192
12193         /*
12194          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12195          * the *second* time the tg3 driver was getting loaded after an
12196          * initial scan.
12197          *
12198          * Broadcom tells me:
12199          *   ...the DMA engine is connected to the GRC block and a DMA
12200          *   reset may affect the GRC block in some unpredictable way...
12201          *   The behavior of resets to individual blocks has not been tested.
12202          *
12203          * Broadcom noted the GRC reset will also reset all sub-components.
12204          */
12205         if (to_device) {
12206                 test_desc.cqid_sqid = (13 << 8) | 2;
12207
12208                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12209                 udelay(40);
12210         } else {
12211                 test_desc.cqid_sqid = (16 << 8) | 7;
12212
12213                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12214                 udelay(40);
12215         }
12216         test_desc.flags = 0x00000005;
12217
12218         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12219                 u32 val;
12220
12221                 val = *(((u32 *)&test_desc) + i);
12222                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12223                                        sram_dma_descs + (i * sizeof(u32)));
12224                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12225         }
12226         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12227
12228         if (to_device) {
12229                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12230         } else {
12231                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12232         }
12233
12234         ret = -ENODEV;
12235         for (i = 0; i < 40; i++) {
12236                 u32 val;
12237
12238                 if (to_device)
12239                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12240                 else
12241                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12242                 if ((val & 0xffff) == sram_dma_descs) {
12243                         ret = 0;
12244                         break;
12245                 }
12246
12247                 udelay(100);
12248         }
12249
12250         return ret;
12251 }
12252
12253 #define TEST_BUFFER_SIZE        0x2000
12254
12255 static int __devinit tg3_test_dma(struct tg3 *tp)
12256 {
12257         dma_addr_t buf_dma;
12258         u32 *buf, saved_dma_rwctrl;
12259         int ret;
12260
12261         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12262         if (!buf) {
12263                 ret = -ENOMEM;
12264                 goto out_nofree;
12265         }
12266
12267         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12268                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12269
12270         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12271
12272         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12273                 /* DMA read watermark not used on PCIE */
12274                 tp->dma_rwctrl |= 0x00180000;
12275         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12276                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12277                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12278                         tp->dma_rwctrl |= 0x003f0000;
12279                 else
12280                         tp->dma_rwctrl |= 0x003f000f;
12281         } else {
12282                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12283                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12284                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12285                         u32 read_water = 0x7;
12286
12287                         /* If the 5704 is behind the EPB bridge, we can
12288                          * do the less restrictive ONE_DMA workaround for
12289                          * better performance.
12290                          */
12291                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12292                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12293                                 tp->dma_rwctrl |= 0x8000;
12294                         else if (ccval == 0x6 || ccval == 0x7)
12295                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12296
12297                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12298                                 read_water = 4;
12299                         /* Set bit 23 to enable PCIX hw bug fix */
12300                         tp->dma_rwctrl |=
12301                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12302                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12303                                 (1 << 23);
12304                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12305                         /* 5780 always in PCIX mode */
12306                         tp->dma_rwctrl |= 0x00144000;
12307                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12308                         /* 5714 always in PCIX mode */
12309                         tp->dma_rwctrl |= 0x00148000;
12310                 } else {
12311                         tp->dma_rwctrl |= 0x001b000f;
12312                 }
12313         }
12314
12315         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12316             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12317                 tp->dma_rwctrl &= 0xfffffff0;
12318
12319         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12320             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12321                 /* Remove this if it causes problems for some boards. */
12322                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12323
12324                 /* On 5700/5701 chips, we need to set this bit.
12325                  * Otherwise the chip will issue cacheline transactions
12326                  * to streamable DMA memory with not all the byte
12327                  * enables turned on.  This is an error on several
12328                  * RISC PCI controllers, in particular sparc64.
12329                  *
12330                  * On 5703/5704 chips, this bit has been reassigned
12331                  * a different meaning.  In particular, it is used
12332                  * on those chips to enable a PCI-X workaround.
12333                  */
12334                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12335         }
12336
12337         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12338
12339 #if 0
12340         /* Unneeded, already done by tg3_get_invariants.  */
12341         tg3_switch_clocks(tp);
12342 #endif
12343
12344         ret = 0;
12345         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12346             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12347                 goto out;
12348
12349         /* It is best to perform DMA test with maximum write burst size
12350          * to expose the 5700/5701 write DMA bug.
12351          */
12352         saved_dma_rwctrl = tp->dma_rwctrl;
12353         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12354         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12355
12356         while (1) {
12357                 u32 *p = buf, i;
12358
12359                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12360                         p[i] = i;
12361
12362                 /* Send the buffer to the chip. */
12363                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12364                 if (ret) {
12365                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12366                         break;
12367                 }
12368
12369 #if 0
12370                 /* validate data reached card RAM correctly. */
12371                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12372                         u32 val;
12373                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12374                         if (le32_to_cpu(val) != p[i]) {
12375                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12376                                 /* ret = -ENODEV here? */
12377                         }
12378                         p[i] = 0;
12379                 }
12380 #endif
12381                 /* Now read it back. */
12382                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12383                 if (ret) {
12384                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12385
12386                         break;
12387                 }
12388
12389                 /* Verify it. */
12390                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12391                         if (p[i] == i)
12392                                 continue;
12393
12394                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12395                             DMA_RWCTRL_WRITE_BNDRY_16) {
12396                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12397                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12398                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12399                                 break;
12400                         } else {
12401                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12402                                 ret = -ENODEV;
12403                                 goto out;
12404                         }
12405                 }
12406
12407                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12408                         /* Success. */
12409                         ret = 0;
12410                         break;
12411                 }
12412         }
12413         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12414             DMA_RWCTRL_WRITE_BNDRY_16) {
12415                 static struct pci_device_id dma_wait_state_chipsets[] = {
12416                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12417                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12418                         { },
12419                 };
12420
12421                 /* DMA test passed without adjusting DMA boundary,
12422                  * now look for chipsets that are known to expose the
12423                  * DMA bug without failing the test.
12424                  */
12425                 if (pci_dev_present(dma_wait_state_chipsets)) {
12426                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12427                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12428                 }
12429                 else
12430                         /* Safe to use the calculated DMA boundary. */
12431                         tp->dma_rwctrl = saved_dma_rwctrl;
12432
12433                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12434         }
12435
12436 out:
12437         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12438 out_nofree:
12439         return ret;
12440 }
12441
12442 static void __devinit tg3_init_link_config(struct tg3 *tp)
12443 {
12444         tp->link_config.advertising =
12445                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12446                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12447                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12448                  ADVERTISED_Autoneg | ADVERTISED_MII);
12449         tp->link_config.speed = SPEED_INVALID;
12450         tp->link_config.duplex = DUPLEX_INVALID;
12451         tp->link_config.autoneg = AUTONEG_ENABLE;
12452         tp->link_config.active_speed = SPEED_INVALID;
12453         tp->link_config.active_duplex = DUPLEX_INVALID;
12454         tp->link_config.phy_is_low_power = 0;
12455         tp->link_config.orig_speed = SPEED_INVALID;
12456         tp->link_config.orig_duplex = DUPLEX_INVALID;
12457         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12458 }
12459
12460 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12461 {
12462         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12463                 tp->bufmgr_config.mbuf_read_dma_low_water =
12464                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12465                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12466                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12467                 tp->bufmgr_config.mbuf_high_water =
12468                         DEFAULT_MB_HIGH_WATER_5705;
12469                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12470                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12471                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12472                         tp->bufmgr_config.mbuf_high_water =
12473                                 DEFAULT_MB_HIGH_WATER_5906;
12474                 }
12475
12476                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12477                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12478                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12479                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12480                 tp->bufmgr_config.mbuf_high_water_jumbo =
12481                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12482         } else {
12483                 tp->bufmgr_config.mbuf_read_dma_low_water =
12484                         DEFAULT_MB_RDMA_LOW_WATER;
12485                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12486                         DEFAULT_MB_MACRX_LOW_WATER;
12487                 tp->bufmgr_config.mbuf_high_water =
12488                         DEFAULT_MB_HIGH_WATER;
12489
12490                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12491                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12492                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12493                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12494                 tp->bufmgr_config.mbuf_high_water_jumbo =
12495                         DEFAULT_MB_HIGH_WATER_JUMBO;
12496         }
12497
12498         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12499         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12500 }
12501
12502 static char * __devinit tg3_phy_string(struct tg3 *tp)
12503 {
12504         switch (tp->phy_id & PHY_ID_MASK) {
12505         case PHY_ID_BCM5400:    return "5400";
12506         case PHY_ID_BCM5401:    return "5401";
12507         case PHY_ID_BCM5411:    return "5411";
12508         case PHY_ID_BCM5701:    return "5701";
12509         case PHY_ID_BCM5703:    return "5703";
12510         case PHY_ID_BCM5704:    return "5704";
12511         case PHY_ID_BCM5705:    return "5705";
12512         case PHY_ID_BCM5750:    return "5750";
12513         case PHY_ID_BCM5752:    return "5752";
12514         case PHY_ID_BCM5714:    return "5714";
12515         case PHY_ID_BCM5780:    return "5780";
12516         case PHY_ID_BCM5755:    return "5755";
12517         case PHY_ID_BCM5787:    return "5787";
12518         case PHY_ID_BCM5784:    return "5784";
12519         case PHY_ID_BCM5756:    return "5722/5756";
12520         case PHY_ID_BCM5906:    return "5906";
12521         case PHY_ID_BCM5761:    return "5761";
12522         case PHY_ID_BCM8002:    return "8002/serdes";
12523         case 0:                 return "serdes";
12524         default:                return "unknown";
12525         };
12526 }
12527
12528 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12529 {
12530         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12531                 strcpy(str, "PCI Express");
12532                 return str;
12533         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12534                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12535
12536                 strcpy(str, "PCIX:");
12537
12538                 if ((clock_ctrl == 7) ||
12539                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12540                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12541                         strcat(str, "133MHz");
12542                 else if (clock_ctrl == 0)
12543                         strcat(str, "33MHz");
12544                 else if (clock_ctrl == 2)
12545                         strcat(str, "50MHz");
12546                 else if (clock_ctrl == 4)
12547                         strcat(str, "66MHz");
12548                 else if (clock_ctrl == 6)
12549                         strcat(str, "100MHz");
12550         } else {
12551                 strcpy(str, "PCI:");
12552                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12553                         strcat(str, "66MHz");
12554                 else
12555                         strcat(str, "33MHz");
12556         }
12557         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12558                 strcat(str, ":32-bit");
12559         else
12560                 strcat(str, ":64-bit");
12561         return str;
12562 }
12563
12564 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12565 {
12566         struct pci_dev *peer;
12567         unsigned int func, devnr = tp->pdev->devfn & ~7;
12568
12569         for (func = 0; func < 8; func++) {
12570                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12571                 if (peer && peer != tp->pdev)
12572                         break;
12573                 pci_dev_put(peer);
12574         }
12575         /* 5704 can be configured in single-port mode, set peer to
12576          * tp->pdev in that case.
12577          */
12578         if (!peer) {
12579                 peer = tp->pdev;
12580                 return peer;
12581         }
12582
12583         /*
12584          * We don't need to keep the refcount elevated; there's no way
12585          * to remove one half of this device without removing the other
12586          */
12587         pci_dev_put(peer);
12588
12589         return peer;
12590 }
12591
12592 static void __devinit tg3_init_coal(struct tg3 *tp)
12593 {
12594         struct ethtool_coalesce *ec = &tp->coal;
12595
12596         memset(ec, 0, sizeof(*ec));
12597         ec->cmd = ETHTOOL_GCOALESCE;
12598         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12599         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12600         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12601         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12602         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12603         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12604         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12605         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12606         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12607
12608         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12609                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12610                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12611                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12612                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12613                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12614         }
12615
12616         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12617                 ec->rx_coalesce_usecs_irq = 0;
12618                 ec->tx_coalesce_usecs_irq = 0;
12619                 ec->stats_block_coalesce_usecs = 0;
12620         }
12621 }
12622
12623 static int __devinit tg3_init_one(struct pci_dev *pdev,
12624                                   const struct pci_device_id *ent)
12625 {
12626         static int tg3_version_printed = 0;
12627         resource_size_t tg3reg_base;
12628         unsigned long tg3reg_len;
12629         struct net_device *dev;
12630         struct tg3 *tp;
12631         int err, pm_cap;
12632         char str[40];
12633         u64 dma_mask, persist_dma_mask;
12634         DECLARE_MAC_BUF(mac);
12635
12636         if (tg3_version_printed++ == 0)
12637                 printk(KERN_INFO "%s", version);
12638
12639         err = pci_enable_device(pdev);
12640         if (err) {
12641                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12642                        "aborting.\n");
12643                 return err;
12644         }
12645
12646         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12647                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12648                        "base address, aborting.\n");
12649                 err = -ENODEV;
12650                 goto err_out_disable_pdev;
12651         }
12652
12653         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12654         if (err) {
12655                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12656                        "aborting.\n");
12657                 goto err_out_disable_pdev;
12658         }
12659
12660         pci_set_master(pdev);
12661
12662         /* Find power-management capability. */
12663         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12664         if (pm_cap == 0) {
12665                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12666                        "aborting.\n");
12667                 err = -EIO;
12668                 goto err_out_free_res;
12669         }
12670
12671         tg3reg_base = pci_resource_start(pdev, 0);
12672         tg3reg_len = pci_resource_len(pdev, 0);
12673
12674         dev = alloc_etherdev(sizeof(*tp));
12675         if (!dev) {
12676                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12677                 err = -ENOMEM;
12678                 goto err_out_free_res;
12679         }
12680
12681         SET_NETDEV_DEV(dev, &pdev->dev);
12682
12683 #if TG3_VLAN_TAG_USED
12684         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12685         dev->vlan_rx_register = tg3_vlan_rx_register;
12686 #endif
12687
12688         tp = netdev_priv(dev);
12689         tp->pdev = pdev;
12690         tp->dev = dev;
12691         tp->pm_cap = pm_cap;
12692         tp->mac_mode = TG3_DEF_MAC_MODE;
12693         tp->rx_mode = TG3_DEF_RX_MODE;
12694         tp->tx_mode = TG3_DEF_TX_MODE;
12695         tp->mi_mode = MAC_MI_MODE_BASE;
12696         if (tg3_debug > 0)
12697                 tp->msg_enable = tg3_debug;
12698         else
12699                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12700
12701         /* The word/byte swap controls here control register access byte
12702          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12703          * setting below.
12704          */
12705         tp->misc_host_ctrl =
12706                 MISC_HOST_CTRL_MASK_PCI_INT |
12707                 MISC_HOST_CTRL_WORD_SWAP |
12708                 MISC_HOST_CTRL_INDIR_ACCESS |
12709                 MISC_HOST_CTRL_PCISTATE_RW;
12710
12711         /* The NONFRM (non-frame) byte/word swap controls take effect
12712          * on descriptor entries, anything which isn't packet data.
12713          *
12714          * The StrongARM chips on the board (one for tx, one for rx)
12715          * are running in big-endian mode.
12716          */
12717         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12718                         GRC_MODE_WSWAP_NONFRM_DATA);
12719 #ifdef __BIG_ENDIAN
12720         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12721 #endif
12722         spin_lock_init(&tp->lock);
12723         spin_lock_init(&tp->indirect_lock);
12724         INIT_WORK(&tp->reset_task, tg3_reset_task);
12725
12726         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12727         if (!tp->regs) {
12728                 printk(KERN_ERR PFX "Cannot map device registers, "
12729                        "aborting.\n");
12730                 err = -ENOMEM;
12731                 goto err_out_free_dev;
12732         }
12733
12734         tg3_init_link_config(tp);
12735
12736         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12737         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12738         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12739
12740         dev->open = tg3_open;
12741         dev->stop = tg3_close;
12742         dev->get_stats = tg3_get_stats;
12743         dev->set_multicast_list = tg3_set_rx_mode;
12744         dev->set_mac_address = tg3_set_mac_addr;
12745         dev->do_ioctl = tg3_ioctl;
12746         dev->tx_timeout = tg3_tx_timeout;
12747         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12748         dev->ethtool_ops = &tg3_ethtool_ops;
12749         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12750         dev->change_mtu = tg3_change_mtu;
12751         dev->irq = pdev->irq;
12752 #ifdef CONFIG_NET_POLL_CONTROLLER
12753         dev->poll_controller = tg3_poll_controller;
12754 #endif
12755
12756         err = tg3_get_invariants(tp);
12757         if (err) {
12758                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12759                        "aborting.\n");
12760                 goto err_out_iounmap;
12761         }
12762
12763         /* The EPB bridge inside 5714, 5715, and 5780 and any
12764          * device behind the EPB cannot support DMA addresses > 40-bit.
12765          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12766          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12767          * do DMA address check in tg3_start_xmit().
12768          */
12769         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12770                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12771         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12772                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12773 #ifdef CONFIG_HIGHMEM
12774                 dma_mask = DMA_64BIT_MASK;
12775 #endif
12776         } else
12777                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12778
12779         /* Configure DMA attributes. */
12780         if (dma_mask > DMA_32BIT_MASK) {
12781                 err = pci_set_dma_mask(pdev, dma_mask);
12782                 if (!err) {
12783                         dev->features |= NETIF_F_HIGHDMA;
12784                         err = pci_set_consistent_dma_mask(pdev,
12785                                                           persist_dma_mask);
12786                         if (err < 0) {
12787                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12788                                        "DMA for consistent allocations\n");
12789                                 goto err_out_iounmap;
12790                         }
12791                 }
12792         }
12793         if (err || dma_mask == DMA_32BIT_MASK) {
12794                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12795                 if (err) {
12796                         printk(KERN_ERR PFX "No usable DMA configuration, "
12797                                "aborting.\n");
12798                         goto err_out_iounmap;
12799                 }
12800         }
12801
12802         tg3_init_bufmgr_config(tp);
12803
12804         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12805                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12806         }
12807         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12809             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12811             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12812                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12813         } else {
12814                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12815         }
12816
12817         /* TSO is on by default on chips that support hardware TSO.
12818          * Firmware TSO on older chips gives lower performance, so it
12819          * is off by default, but can be enabled using ethtool.
12820          */
12821         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12822                 dev->features |= NETIF_F_TSO;
12823                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12824                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12825                         dev->features |= NETIF_F_TSO6;
12826                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12827                         dev->features |= NETIF_F_TSO_ECN;
12828         }
12829
12830
12831         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12832             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12833             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12834                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12835                 tp->rx_pending = 63;
12836         }
12837
12838         err = tg3_get_device_address(tp);
12839         if (err) {
12840                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12841                        "aborting.\n");
12842                 goto err_out_iounmap;
12843         }
12844
12845         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12846                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12847                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12848                                "base address for APE, aborting.\n");
12849                         err = -ENODEV;
12850                         goto err_out_iounmap;
12851                 }
12852
12853                 tg3reg_base = pci_resource_start(pdev, 2);
12854                 tg3reg_len = pci_resource_len(pdev, 2);
12855
12856                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12857                 if (!tp->aperegs) {
12858                         printk(KERN_ERR PFX "Cannot map APE registers, "
12859                                "aborting.\n");
12860                         err = -ENOMEM;
12861                         goto err_out_iounmap;
12862                 }
12863
12864                 tg3_ape_lock_init(tp);
12865         }
12866
12867         /*
12868          * Reset chip in case UNDI or EFI driver did not shutdown
12869          * DMA self test will enable WDMAC and we'll see (spurious)
12870          * pending DMA on the PCI bus at that point.
12871          */
12872         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12873             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12874                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12875                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12876         }
12877
12878         err = tg3_test_dma(tp);
12879         if (err) {
12880                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12881                 goto err_out_apeunmap;
12882         }
12883
12884         /* Tigon3 can do ipv4 only... and some chips have buggy
12885          * checksumming.
12886          */
12887         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12888                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12889                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12890                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12891                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12892                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12893                         dev->features |= NETIF_F_IPV6_CSUM;
12894
12895                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12896         } else
12897                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12898
12899         /* flow control autonegotiation is default behavior */
12900         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12901         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12902
12903         tg3_init_coal(tp);
12904
12905         pci_set_drvdata(pdev, dev);
12906
12907         err = register_netdev(dev);
12908         if (err) {
12909                 printk(KERN_ERR PFX "Cannot register net device, "
12910                        "aborting.\n");
12911                 goto err_out_apeunmap;
12912         }
12913
12914         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12915                "(%s) %s Ethernet %s\n",
12916                dev->name,
12917                tp->board_part_number,
12918                tp->pci_chip_rev_id,
12919                tg3_phy_string(tp),
12920                tg3_bus_string(tp, str),
12921                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12922                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12923                  "10/100/1000Base-T")),
12924                print_mac(mac, dev->dev_addr));
12925
12926         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12927                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12928                dev->name,
12929                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12930                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12931                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12932                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12933                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12934                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12935         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12936                dev->name, tp->dma_rwctrl,
12937                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12938                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12939
12940         return 0;
12941
12942 err_out_apeunmap:
12943         if (tp->aperegs) {
12944                 iounmap(tp->aperegs);
12945                 tp->aperegs = NULL;
12946         }
12947
12948 err_out_iounmap:
12949         if (tp->regs) {
12950                 iounmap(tp->regs);
12951                 tp->regs = NULL;
12952         }
12953
12954 err_out_free_dev:
12955         free_netdev(dev);
12956
12957 err_out_free_res:
12958         pci_release_regions(pdev);
12959
12960 err_out_disable_pdev:
12961         pci_disable_device(pdev);
12962         pci_set_drvdata(pdev, NULL);
12963         return err;
12964 }
12965
12966 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12967 {
12968         struct net_device *dev = pci_get_drvdata(pdev);
12969
12970         if (dev) {
12971                 struct tg3 *tp = netdev_priv(dev);
12972
12973                 flush_scheduled_work();
12974                 unregister_netdev(dev);
12975                 if (tp->aperegs) {
12976                         iounmap(tp->aperegs);
12977                         tp->aperegs = NULL;
12978                 }
12979                 if (tp->regs) {
12980                         iounmap(tp->regs);
12981                         tp->regs = NULL;
12982                 }
12983                 free_netdev(dev);
12984                 pci_release_regions(pdev);
12985                 pci_disable_device(pdev);
12986                 pci_set_drvdata(pdev, NULL);
12987         }
12988 }
12989
12990 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12991 {
12992         struct net_device *dev = pci_get_drvdata(pdev);
12993         struct tg3 *tp = netdev_priv(dev);
12994         int err;
12995
12996         /* PCI register 4 needs to be saved whether netif_running() or not.
12997          * MSI address and data need to be saved if using MSI and
12998          * netif_running().
12999          */
13000         pci_save_state(pdev);
13001
13002         if (!netif_running(dev))
13003                 return 0;
13004
13005         flush_scheduled_work();
13006         tg3_netif_stop(tp);
13007
13008         del_timer_sync(&tp->timer);
13009
13010         tg3_full_lock(tp, 1);
13011         tg3_disable_ints(tp);
13012         tg3_full_unlock(tp);
13013
13014         netif_device_detach(dev);
13015
13016         tg3_full_lock(tp, 0);
13017         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13018         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13019         tg3_full_unlock(tp);
13020
13021         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13022         if (err) {
13023                 tg3_full_lock(tp, 0);
13024
13025                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13026                 if (tg3_restart_hw(tp, 1))
13027                         goto out;
13028
13029                 tp->timer.expires = jiffies + tp->timer_offset;
13030                 add_timer(&tp->timer);
13031
13032                 netif_device_attach(dev);
13033                 tg3_netif_start(tp);
13034
13035 out:
13036                 tg3_full_unlock(tp);
13037         }
13038
13039         return err;
13040 }
13041
13042 static int tg3_resume(struct pci_dev *pdev)
13043 {
13044         struct net_device *dev = pci_get_drvdata(pdev);
13045         struct tg3 *tp = netdev_priv(dev);
13046         int err;
13047
13048         pci_restore_state(tp->pdev);
13049
13050         if (!netif_running(dev))
13051                 return 0;
13052
13053         err = tg3_set_power_state(tp, PCI_D0);
13054         if (err)
13055                 return err;
13056
13057         netif_device_attach(dev);
13058
13059         tg3_full_lock(tp, 0);
13060
13061         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13062         err = tg3_restart_hw(tp, 1);
13063         if (err)
13064                 goto out;
13065
13066         tp->timer.expires = jiffies + tp->timer_offset;
13067         add_timer(&tp->timer);
13068
13069         tg3_netif_start(tp);
13070
13071 out:
13072         tg3_full_unlock(tp);
13073
13074         return err;
13075 }
13076
13077 static struct pci_driver tg3_driver = {
13078         .name           = DRV_MODULE_NAME,
13079         .id_table       = tg3_pci_tbl,
13080         .probe          = tg3_init_one,
13081         .remove         = __devexit_p(tg3_remove_one),
13082         .suspend        = tg3_suspend,
13083         .resume         = tg3_resume
13084 };
13085
13086 static int __init tg3_init(void)
13087 {
13088         return pci_register_driver(&tg3_driver);
13089 }
13090
13091 static void __exit tg3_cleanup(void)
13092 {
13093         pci_unregister_driver(&tg3_driver);
13094 }
13095
13096 module_init(tg3_init);
13097 module_exit(tg3_cleanup);