]> err.no Git - linux-2.6/blob - drivers/net/tg3.c
[TG3]: add variable buffer size for standard ring
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.33"
70 #define DRV_MODULE_RELDATE      "July 5, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344                 spin_lock_bh(&tp->indirect_lock);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347                 spin_unlock_bh(&tp->indirect_lock);
348         } else {
349                 writel(val, tp->regs + off);
350                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351                         readl(tp->regs + off);
352         }
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         void __iomem *mbox = tp->regs + off;
372         writel(val, mbox);
373         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374                 readl(mbox);
375 }
376
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 {
379         void __iomem *mbox = tp->regs + off;
380         writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382                 writel(val, mbox);
383         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384                 readl(mbox);
385 }
386
387 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
390
391 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg)               readl(tp->regs + (reg))
396 #define tr16(reg)               readw(tp->regs + (reg))
397 #define tr8(reg)                readb(tp->regs + (reg))
398
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400 {
401         spin_lock_bh(&tp->indirect_lock);
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404
405         /* Always leave this as zero. */
406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407         spin_unlock_bh(&tp->indirect_lock);
408 }
409
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411 {
412         spin_lock_bh(&tp->indirect_lock);
413         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415
416         /* Always leave this as zero. */
417         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418         spin_unlock_bh(&tp->indirect_lock);
419 }
420
421 static void tg3_disable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427 }
428
429 static inline void tg3_cond_int(struct tg3 *tp)
430 {
431         if (tp->hw_status->status & SD_STATUS_UPDATED)
432                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433 }
434
435 static void tg3_enable_ints(struct tg3 *tp)
436 {
437         tp->irq_sync = 0;
438         wmb();
439
440         tw32(TG3PCI_MISC_HOST_CTRL,
441              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443                      (tp->last_tag << 24));
444         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445         tg3_cond_int(tp);
446 }
447
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
449 {
450         struct tg3_hw_status *sblk = tp->hw_status;
451         unsigned int work_exists = 0;
452
453         /* check for phy events */
454         if (!(tp->tg3_flags &
455               (TG3_FLAG_USE_LINKCHG_REG |
456                TG3_FLAG_POLL_SERDES))) {
457                 if (sblk->status & SD_STATUS_LINK_CHG)
458                         work_exists = 1;
459         }
460         /* check for RX/TX work to do */
461         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
463                 work_exists = 1;
464
465         return work_exists;
466 }
467
468 /* tg3_restart_ints
469  *  similar to tg3_enable_ints, but it accurately determines whether there
470  *  is new work pending and can return without flushing the PIO write
471  *  which reenables interrupts 
472  */
473 static void tg3_restart_ints(struct tg3 *tp)
474 {
475         tw32(TG3PCI_MISC_HOST_CTRL,
476                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478                      tp->last_tag << 24);
479         mmiowb();
480
481         /* When doing tagged status, this work check is unnecessary.
482          * The last_tag we write above tells the chip which piece of
483          * work we've completed.
484          */
485         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
486             tg3_has_work(tp))
487                 tw32(HOSTCC_MODE, tp->coalesce_mode |
488                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
489 }
490
491 static inline void tg3_netif_stop(struct tg3 *tp)
492 {
493         tp->dev->trans_start = jiffies; /* prevent tx timeout */
494         netif_poll_disable(tp->dev);
495         netif_tx_disable(tp->dev);
496 }
497
498 static inline void tg3_netif_start(struct tg3 *tp)
499 {
500         netif_wake_queue(tp->dev);
501         /* NOTE: unconditional netif_wake_queue is only appropriate
502          * so long as all callers are assured to have free tx slots
503          * (such as after tg3_init_hw)
504          */
505         netif_poll_enable(tp->dev);
506         tp->hw_status->status |= SD_STATUS_UPDATED;
507         tg3_enable_ints(tp);
508 }
509
510 static void tg3_switch_clocks(struct tg3 *tp)
511 {
512         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513         u32 orig_clock_ctrl;
514
515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
516                 return;
517
518         orig_clock_ctrl = clock_ctrl;
519         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520                        CLOCK_CTRL_CLKRUN_OENABLE |
521                        0x1f);
522         tp->pci_clock_ctrl = clock_ctrl;
523
524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526                         tw32_f(TG3PCI_CLOCK_CTRL,
527                                clock_ctrl | CLOCK_CTRL_625_CORE);
528                         udelay(40);
529                 }
530         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531                 tw32_f(TG3PCI_CLOCK_CTRL,
532                      clock_ctrl |
533                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
534                 udelay(40);
535                 tw32_f(TG3PCI_CLOCK_CTRL,
536                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
537                 udelay(40);
538         }
539         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
540         udelay(40);
541 }
542
543 #define PHY_BUSY_LOOPS  5000
544
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
546 {
547         u32 frame_val;
548         unsigned int loops;
549         int ret;
550
551         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
552                 tw32_f(MAC_MI_MODE,
553                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554                 udelay(80);
555         }
556
557         *val = 0x0;
558
559         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560                       MI_COM_PHY_ADDR_MASK);
561         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562                       MI_COM_REG_ADDR_MASK);
563         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
564         
565         tw32_f(MAC_MI_COM, frame_val);
566
567         loops = PHY_BUSY_LOOPS;
568         while (loops != 0) {
569                 udelay(10);
570                 frame_val = tr32(MAC_MI_COM);
571
572                 if ((frame_val & MI_COM_BUSY) == 0) {
573                         udelay(5);
574                         frame_val = tr32(MAC_MI_COM);
575                         break;
576                 }
577                 loops -= 1;
578         }
579
580         ret = -EBUSY;
581         if (loops != 0) {
582                 *val = frame_val & MI_COM_DATA_MASK;
583                 ret = 0;
584         }
585
586         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587                 tw32_f(MAC_MI_MODE, tp->mi_mode);
588                 udelay(80);
589         }
590
591         return ret;
592 }
593
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
595 {
596         u32 frame_val;
597         unsigned int loops;
598         int ret;
599
600         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
601                 tw32_f(MAC_MI_MODE,
602                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
603                 udelay(80);
604         }
605
606         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607                       MI_COM_PHY_ADDR_MASK);
608         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609                       MI_COM_REG_ADDR_MASK);
610         frame_val |= (val & MI_COM_DATA_MASK);
611         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
612         
613         tw32_f(MAC_MI_COM, frame_val);
614
615         loops = PHY_BUSY_LOOPS;
616         while (loops != 0) {
617                 udelay(10);
618                 frame_val = tr32(MAC_MI_COM);
619                 if ((frame_val & MI_COM_BUSY) == 0) {
620                         udelay(5);
621                         frame_val = tr32(MAC_MI_COM);
622                         break;
623                 }
624                 loops -= 1;
625         }
626
627         ret = -EBUSY;
628         if (loops != 0)
629                 ret = 0;
630
631         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632                 tw32_f(MAC_MI_MODE, tp->mi_mode);
633                 udelay(80);
634         }
635
636         return ret;
637 }
638
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
640 {
641         u32 val;
642
643         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
644                 return;
645
646         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649                              (val | (1 << 15) | (1 << 4)));
650 }
651
652 static int tg3_bmcr_reset(struct tg3 *tp)
653 {
654         u32 phy_control;
655         int limit, err;
656
657         /* OK, reset it, and poll the BMCR_RESET bit until it
658          * clears or we time out.
659          */
660         phy_control = BMCR_RESET;
661         err = tg3_writephy(tp, MII_BMCR, phy_control);
662         if (err != 0)
663                 return -EBUSY;
664
665         limit = 5000;
666         while (limit--) {
667                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
668                 if (err != 0)
669                         return -EBUSY;
670
671                 if ((phy_control & BMCR_RESET) == 0) {
672                         udelay(40);
673                         break;
674                 }
675                 udelay(10);
676         }
677         if (limit <= 0)
678                 return -EBUSY;
679
680         return 0;
681 }
682
683 static int tg3_wait_macro_done(struct tg3 *tp)
684 {
685         int limit = 100;
686
687         while (limit--) {
688                 u32 tmp32;
689
690                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691                         if ((tmp32 & 0x1000) == 0)
692                                 break;
693                 }
694         }
695         if (limit <= 0)
696                 return -EBUSY;
697
698         return 0;
699 }
700
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
702 {
703         static const u32 test_pat[4][6] = {
704         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
708         };
709         int chan;
710
711         for (chan = 0; chan < 4; chan++) {
712                 int i;
713
714                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715                              (chan * 0x2000) | 0x0200);
716                 tg3_writephy(tp, 0x16, 0x0002);
717
718                 for (i = 0; i < 6; i++)
719                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
720                                      test_pat[chan][i]);
721
722                 tg3_writephy(tp, 0x16, 0x0202);
723                 if (tg3_wait_macro_done(tp)) {
724                         *resetp = 1;
725                         return -EBUSY;
726                 }
727
728                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729                              (chan * 0x2000) | 0x0200);
730                 tg3_writephy(tp, 0x16, 0x0082);
731                 if (tg3_wait_macro_done(tp)) {
732                         *resetp = 1;
733                         return -EBUSY;
734                 }
735
736                 tg3_writephy(tp, 0x16, 0x0802);
737                 if (tg3_wait_macro_done(tp)) {
738                         *resetp = 1;
739                         return -EBUSY;
740                 }
741
742                 for (i = 0; i < 6; i += 2) {
743                         u32 low, high;
744
745                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747                             tg3_wait_macro_done(tp)) {
748                                 *resetp = 1;
749                                 return -EBUSY;
750                         }
751                         low &= 0x7fff;
752                         high &= 0x000f;
753                         if (low != test_pat[chan][i] ||
754                             high != test_pat[chan][i+1]) {
755                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
758
759                                 return -EBUSY;
760                         }
761                 }
762         }
763
764         return 0;
765 }
766
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
768 {
769         int chan;
770
771         for (chan = 0; chan < 4; chan++) {
772                 int i;
773
774                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775                              (chan * 0x2000) | 0x0200);
776                 tg3_writephy(tp, 0x16, 0x0002);
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp))
781                         return -EBUSY;
782         }
783
784         return 0;
785 }
786
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
788 {
789         u32 reg32, phy9_orig;
790         int retries, do_phy_reset, err;
791
792         retries = 10;
793         do_phy_reset = 1;
794         do {
795                 if (do_phy_reset) {
796                         err = tg3_bmcr_reset(tp);
797                         if (err)
798                                 return err;
799                         do_phy_reset = 0;
800                 }
801
802                 /* Disable transmitter and interrupt.  */
803                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
804                         continue;
805
806                 reg32 |= 0x3000;
807                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
808
809                 /* Set full-duplex, 1000 mbps.  */
810                 tg3_writephy(tp, MII_BMCR,
811                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
812
813                 /* Set to master mode.  */
814                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
815                         continue;
816
817                 tg3_writephy(tp, MII_TG3_CTRL,
818                              (MII_TG3_CTRL_AS_MASTER |
819                               MII_TG3_CTRL_ENABLE_AS_MASTER));
820
821                 /* Enable SM_DSP_CLOCK and 6dB.  */
822                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
823
824                 /* Block the PHY control access.  */
825                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
827
828                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
829                 if (!err)
830                         break;
831         } while (--retries);
832
833         err = tg3_phy_reset_chanpat(tp);
834         if (err)
835                 return err;
836
837         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
839
840         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841         tg3_writephy(tp, 0x16, 0x0000);
842
843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845                 /* Set Extended packet length bit for jumbo frames */
846                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
847         }
848         else {
849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
850         }
851
852         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
853
854         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
855                 reg32 &= ~0x3000;
856                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
857         } else if (!err)
858                 err = -EBUSY;
859
860         return err;
861 }
862
863 /* This will reset the tigon3 PHY if there is no valid
864  * link unless the FORCE argument is non-zero.
865  */
866 static int tg3_phy_reset(struct tg3 *tp)
867 {
868         u32 phy_status;
869         int err;
870
871         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
872         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
873         if (err != 0)
874                 return -EBUSY;
875
876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879                 err = tg3_phy_reset_5703_4_5(tp);
880                 if (err)
881                         return err;
882                 goto out;
883         }
884
885         err = tg3_bmcr_reset(tp);
886         if (err)
887                 return err;
888
889 out:
890         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
897         }
898         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899                 tg3_writephy(tp, 0x1c, 0x8d68);
900                 tg3_writephy(tp, 0x1c, 0x8d68);
901         }
902         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
911         }
912         /* Set Extended packet length bit (bit 14) on all chips that */
913         /* support jumbo frames */
914         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915                 /* Cannot do read-modify-write on 5401 */
916                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
918                 u32 phy_reg;
919
920                 /* Set bit 14 with read-modify-write to preserve other bits */
921                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
924         }
925
926         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927          * jumbo frames transmission.
928          */
929         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
930                 u32 phy_reg;
931
932                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
934                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
935         }
936
937         tg3_phy_set_wirespeed(tp);
938         return 0;
939 }
940
941 static void tg3_frob_aux_power(struct tg3 *tp)
942 {
943         struct tg3 *tp_peer = tp;
944
945         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
946                 return;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949                 tp_peer = pci_get_drvdata(tp->pdev_peer);
950                 if (!tp_peer)
951                         BUG();
952         }
953
954
955         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960                              (GRC_LCLCTRL_GPIO_OE0 |
961                               GRC_LCLCTRL_GPIO_OE1 |
962                               GRC_LCLCTRL_GPIO_OE2 |
963                               GRC_LCLCTRL_GPIO_OUTPUT0 |
964                               GRC_LCLCTRL_GPIO_OUTPUT1));
965                         udelay(100);
966                 } else {
967                         u32 no_gpio2;
968                         u32 grc_local_ctrl;
969
970                         if (tp_peer != tp &&
971                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
972                                 return;
973
974                         /* On 5753 and variants, GPIO2 cannot be used. */
975                         no_gpio2 = tp->nic_sram_data_cfg &
976                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
977
978                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979                                          GRC_LCLCTRL_GPIO_OE1 |
980                                          GRC_LCLCTRL_GPIO_OE2 |
981                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
982                                          GRC_LCLCTRL_GPIO_OUTPUT2;
983                         if (no_gpio2) {
984                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
986                         }
987                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
988                                                 grc_local_ctrl);
989                         udelay(100);
990
991                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
992
993                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994                                                 grc_local_ctrl);
995                         udelay(100);
996
997                         if (!no_gpio2) {
998                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                                        grc_local_ctrl);
1001                                 udelay(100);
1002                         }
1003                 }
1004         } else {
1005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007                         if (tp_peer != tp &&
1008                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1009                                 return;
1010
1011                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012                              (GRC_LCLCTRL_GPIO_OE1 |
1013                               GRC_LCLCTRL_GPIO_OUTPUT1));
1014                         udelay(100);
1015
1016                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017                              (GRC_LCLCTRL_GPIO_OE1));
1018                         udelay(100);
1019
1020                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021                              (GRC_LCLCTRL_GPIO_OE1 |
1022                               GRC_LCLCTRL_GPIO_OUTPUT1));
1023                         udelay(100);
1024                 }
1025         }
1026 }
1027
1028 static int tg3_setup_phy(struct tg3 *, int);
1029
1030 #define RESET_KIND_SHUTDOWN     0
1031 #define RESET_KIND_INIT         1
1032 #define RESET_KIND_SUSPEND      2
1033
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1036
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1038 {
1039         u32 misc_host_ctrl;
1040         u16 power_control, power_caps;
1041         int pm = tp->pm_cap;
1042
1043         /* Make sure register accesses (indirect or otherwise)
1044          * will function correctly.
1045          */
1046         pci_write_config_dword(tp->pdev,
1047                                TG3PCI_MISC_HOST_CTRL,
1048                                tp->misc_host_ctrl);
1049
1050         pci_read_config_word(tp->pdev,
1051                              pm + PCI_PM_CTRL,
1052                              &power_control);
1053         power_control |= PCI_PM_CTRL_PME_STATUS;
1054         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1055         switch (state) {
1056         case 0:
1057                 power_control |= 0;
1058                 pci_write_config_word(tp->pdev,
1059                                       pm + PCI_PM_CTRL,
1060                                       power_control);
1061                 udelay(100);    /* Delay after power state change */
1062
1063                 /* Switch out of Vaux if it is not a LOM */
1064                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1066                         udelay(100);
1067                 }
1068
1069                 return 0;
1070
1071         case 1:
1072                 power_control |= 1;
1073                 break;
1074
1075         case 2:
1076                 power_control |= 2;
1077                 break;
1078
1079         case 3:
1080                 power_control |= 3;
1081                 break;
1082
1083         default:
1084                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1085                        "requested.\n",
1086                        tp->dev->name, state);
1087                 return -EINVAL;
1088         };
1089
1090         power_control |= PCI_PM_CTRL_PME_ENABLE;
1091
1092         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093         tw32(TG3PCI_MISC_HOST_CTRL,
1094              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1095
1096         if (tp->link_config.phy_is_low_power == 0) {
1097                 tp->link_config.phy_is_low_power = 1;
1098                 tp->link_config.orig_speed = tp->link_config.speed;
1099                 tp->link_config.orig_duplex = tp->link_config.duplex;
1100                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1101         }
1102
1103         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1104                 tp->link_config.speed = SPEED_10;
1105                 tp->link_config.duplex = DUPLEX_HALF;
1106                 tp->link_config.autoneg = AUTONEG_ENABLE;
1107                 tg3_setup_phy(tp, 0);
1108         }
1109
1110         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1111
1112         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1113                 u32 mac_mode;
1114
1115                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1117                         udelay(40);
1118
1119                         mac_mode = MAC_MODE_PORT_MODE_MII;
1120
1121                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1124                 } else {
1125                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1126                 }
1127
1128                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1130
1131                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1134
1135                 tw32_f(MAC_MODE, mac_mode);
1136                 udelay(100);
1137
1138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1139                 udelay(10);
1140         }
1141
1142         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1145                 u32 base_val;
1146
1147                 base_val = tp->pci_clock_ctrl;
1148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149                              CLOCK_CTRL_TXCLK_DISABLE);
1150
1151                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1152                      CLOCK_CTRL_ALTCLK |
1153                      CLOCK_CTRL_PWRDOWN_PLL133);
1154                 udelay(40);
1155         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1156                 /* do nothing */
1157         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159                 u32 newbits1, newbits2;
1160
1161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                     CLOCK_CTRL_TXCLK_DISABLE |
1165                                     CLOCK_CTRL_ALTCLK);
1166                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168                         newbits1 = CLOCK_CTRL_625_CORE;
1169                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1170                 } else {
1171                         newbits1 = CLOCK_CTRL_ALTCLK;
1172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173                 }
1174
1175                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1176                 udelay(40);
1177
1178                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1179                 udelay(40);
1180
1181                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1182                         u32 newbits3;
1183
1184                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187                                             CLOCK_CTRL_TXCLK_DISABLE |
1188                                             CLOCK_CTRL_44MHZ_CORE);
1189                         } else {
1190                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1191                         }
1192
1193                         tw32_f(TG3PCI_CLOCK_CTRL,
1194                                          tp->pci_clock_ctrl | newbits3);
1195                         udelay(40);
1196                 }
1197         }
1198
1199         tg3_frob_aux_power(tp);
1200
1201         /* Workaround for unstable PLL clock */
1202         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204                 u32 val = tr32(0x7d00);
1205
1206                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1207                 tw32(0x7d00, val);
1208                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209                         tg3_halt_cpu(tp, RX_CPU_BASE);
1210         }
1211
1212         /* Finally, set the new power state. */
1213         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214         udelay(100);    /* Delay after power state change */
1215
1216         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1217
1218         return 0;
1219 }
1220
1221 static void tg3_link_report(struct tg3 *tp)
1222 {
1223         if (!netif_carrier_ok(tp->dev)) {
1224                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1225         } else {
1226                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1227                        tp->dev->name,
1228                        (tp->link_config.active_speed == SPEED_1000 ?
1229                         1000 :
1230                         (tp->link_config.active_speed == SPEED_100 ?
1231                          100 : 10)),
1232                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1233                         "full" : "half"));
1234
1235                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1236                        "%s for RX.\n",
1237                        tp->dev->name,
1238                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1240         }
1241 }
1242
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1244 {
1245         u32 new_tg3_flags = 0;
1246         u32 old_rx_mode = tp->rx_mode;
1247         u32 old_tx_mode = tp->tx_mode;
1248
1249         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1250                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1251                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1252                                 if (remote_adv & LPA_PAUSE_CAP)
1253                                         new_tg3_flags |=
1254                                                 (TG3_FLAG_RX_PAUSE |
1255                                                 TG3_FLAG_TX_PAUSE);
1256                                 else if (remote_adv & LPA_PAUSE_ASYM)
1257                                         new_tg3_flags |=
1258                                                 (TG3_FLAG_RX_PAUSE);
1259                         } else {
1260                                 if (remote_adv & LPA_PAUSE_CAP)
1261                                         new_tg3_flags |=
1262                                                 (TG3_FLAG_RX_PAUSE |
1263                                                 TG3_FLAG_TX_PAUSE);
1264                         }
1265                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1266                         if ((remote_adv & LPA_PAUSE_CAP) &&
1267                         (remote_adv & LPA_PAUSE_ASYM))
1268                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1269                 }
1270
1271                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1272                 tp->tg3_flags |= new_tg3_flags;
1273         } else {
1274                 new_tg3_flags = tp->tg3_flags;
1275         }
1276
1277         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1278                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1279         else
1280                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1281
1282         if (old_rx_mode != tp->rx_mode) {
1283                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1284         }
1285         
1286         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1287                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1288         else
1289                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1290
1291         if (old_tx_mode != tp->tx_mode) {
1292                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1293         }
1294 }
1295
1296 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1297 {
1298         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1299         case MII_TG3_AUX_STAT_10HALF:
1300                 *speed = SPEED_10;
1301                 *duplex = DUPLEX_HALF;
1302                 break;
1303
1304         case MII_TG3_AUX_STAT_10FULL:
1305                 *speed = SPEED_10;
1306                 *duplex = DUPLEX_FULL;
1307                 break;
1308
1309         case MII_TG3_AUX_STAT_100HALF:
1310                 *speed = SPEED_100;
1311                 *duplex = DUPLEX_HALF;
1312                 break;
1313
1314         case MII_TG3_AUX_STAT_100FULL:
1315                 *speed = SPEED_100;
1316                 *duplex = DUPLEX_FULL;
1317                 break;
1318
1319         case MII_TG3_AUX_STAT_1000HALF:
1320                 *speed = SPEED_1000;
1321                 *duplex = DUPLEX_HALF;
1322                 break;
1323
1324         case MII_TG3_AUX_STAT_1000FULL:
1325                 *speed = SPEED_1000;
1326                 *duplex = DUPLEX_FULL;
1327                 break;
1328
1329         default:
1330                 *speed = SPEED_INVALID;
1331                 *duplex = DUPLEX_INVALID;
1332                 break;
1333         };
1334 }
1335
1336 static void tg3_phy_copper_begin(struct tg3 *tp)
1337 {
1338         u32 new_adv;
1339         int i;
1340
1341         if (tp->link_config.phy_is_low_power) {
1342                 /* Entering low power mode.  Disable gigabit and
1343                  * 100baseT advertisements.
1344                  */
1345                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1346
1347                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1348                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1349                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1350                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1351
1352                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1353         } else if (tp->link_config.speed == SPEED_INVALID) {
1354                 tp->link_config.advertising =
1355                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1356                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1357                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1358                          ADVERTISED_Autoneg | ADVERTISED_MII);
1359
1360                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1361                         tp->link_config.advertising &=
1362                                 ~(ADVERTISED_1000baseT_Half |
1363                                   ADVERTISED_1000baseT_Full);
1364
1365                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1366                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1367                         new_adv |= ADVERTISE_10HALF;
1368                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1369                         new_adv |= ADVERTISE_10FULL;
1370                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1371                         new_adv |= ADVERTISE_100HALF;
1372                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1373                         new_adv |= ADVERTISE_100FULL;
1374                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1375
1376                 if (tp->link_config.advertising &
1377                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1378                         new_adv = 0;
1379                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1380                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1381                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1382                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1383                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1384                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1385                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1386                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1387                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1388                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1389                 } else {
1390                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1391                 }
1392         } else {
1393                 /* Asking for a specific link mode. */
1394                 if (tp->link_config.speed == SPEED_1000) {
1395                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1396                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1397
1398                         if (tp->link_config.duplex == DUPLEX_FULL)
1399                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1400                         else
1401                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1402                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1403                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1404                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1405                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1406                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1407                 } else {
1408                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1409
1410                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1411                         if (tp->link_config.speed == SPEED_100) {
1412                                 if (tp->link_config.duplex == DUPLEX_FULL)
1413                                         new_adv |= ADVERTISE_100FULL;
1414                                 else
1415                                         new_adv |= ADVERTISE_100HALF;
1416                         } else {
1417                                 if (tp->link_config.duplex == DUPLEX_FULL)
1418                                         new_adv |= ADVERTISE_10FULL;
1419                                 else
1420                                         new_adv |= ADVERTISE_10HALF;
1421                         }
1422                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1423                 }
1424         }
1425
1426         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1427             tp->link_config.speed != SPEED_INVALID) {
1428                 u32 bmcr, orig_bmcr;
1429
1430                 tp->link_config.active_speed = tp->link_config.speed;
1431                 tp->link_config.active_duplex = tp->link_config.duplex;
1432
1433                 bmcr = 0;
1434                 switch (tp->link_config.speed) {
1435                 default:
1436                 case SPEED_10:
1437                         break;
1438
1439                 case SPEED_100:
1440                         bmcr |= BMCR_SPEED100;
1441                         break;
1442
1443                 case SPEED_1000:
1444                         bmcr |= TG3_BMCR_SPEED1000;
1445                         break;
1446                 };
1447
1448                 if (tp->link_config.duplex == DUPLEX_FULL)
1449                         bmcr |= BMCR_FULLDPLX;
1450
1451                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1452                     (bmcr != orig_bmcr)) {
1453                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1454                         for (i = 0; i < 1500; i++) {
1455                                 u32 tmp;
1456
1457                                 udelay(10);
1458                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1459                                     tg3_readphy(tp, MII_BMSR, &tmp))
1460                                         continue;
1461                                 if (!(tmp & BMSR_LSTATUS)) {
1462                                         udelay(40);
1463                                         break;
1464                                 }
1465                         }
1466                         tg3_writephy(tp, MII_BMCR, bmcr);
1467                         udelay(40);
1468                 }
1469         } else {
1470                 tg3_writephy(tp, MII_BMCR,
1471                              BMCR_ANENABLE | BMCR_ANRESTART);
1472         }
1473 }
1474
1475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1476 {
1477         int err;
1478
1479         /* Turn off tap power management. */
1480         /* Set Extended packet length bit */
1481         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1482
1483         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1484         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1485
1486         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1487         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1488
1489         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1490         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1491
1492         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1493         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1494
1495         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1496         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1497
1498         udelay(40);
1499
1500         return err;
1501 }
1502
1503 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1504 {
1505         u32 adv_reg, all_mask;
1506
1507         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1508                 return 0;
1509
1510         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1511                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1512         if ((adv_reg & all_mask) != all_mask)
1513                 return 0;
1514         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1515                 u32 tg3_ctrl;
1516
1517                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1518                         return 0;
1519
1520                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1521                             MII_TG3_CTRL_ADV_1000_FULL);
1522                 if ((tg3_ctrl & all_mask) != all_mask)
1523                         return 0;
1524         }
1525         return 1;
1526 }
1527
1528 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1529 {
1530         int current_link_up;
1531         u32 bmsr, dummy;
1532         u16 current_speed;
1533         u8 current_duplex;
1534         int i, err;
1535
1536         tw32(MAC_EVENT, 0);
1537
1538         tw32_f(MAC_STATUS,
1539              (MAC_STATUS_SYNC_CHANGED |
1540               MAC_STATUS_CFG_CHANGED |
1541               MAC_STATUS_MI_COMPLETION |
1542               MAC_STATUS_LNKSTATE_CHANGED));
1543         udelay(40);
1544
1545         tp->mi_mode = MAC_MI_MODE_BASE;
1546         tw32_f(MAC_MI_MODE, tp->mi_mode);
1547         udelay(80);
1548
1549         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1550
1551         /* Some third-party PHYs need to be reset on link going
1552          * down.
1553          */
1554         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1555              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1556              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1557             netif_carrier_ok(tp->dev)) {
1558                 tg3_readphy(tp, MII_BMSR, &bmsr);
1559                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1560                     !(bmsr & BMSR_LSTATUS))
1561                         force_reset = 1;
1562         }
1563         if (force_reset)
1564                 tg3_phy_reset(tp);
1565
1566         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1567                 tg3_readphy(tp, MII_BMSR, &bmsr);
1568                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1569                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1570                         bmsr = 0;
1571
1572                 if (!(bmsr & BMSR_LSTATUS)) {
1573                         err = tg3_init_5401phy_dsp(tp);
1574                         if (err)
1575                                 return err;
1576
1577                         tg3_readphy(tp, MII_BMSR, &bmsr);
1578                         for (i = 0; i < 1000; i++) {
1579                                 udelay(10);
1580                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1581                                     (bmsr & BMSR_LSTATUS)) {
1582                                         udelay(40);
1583                                         break;
1584                                 }
1585                         }
1586
1587                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1588                             !(bmsr & BMSR_LSTATUS) &&
1589                             tp->link_config.active_speed == SPEED_1000) {
1590                                 err = tg3_phy_reset(tp);
1591                                 if (!err)
1592                                         err = tg3_init_5401phy_dsp(tp);
1593                                 if (err)
1594                                         return err;
1595                         }
1596                 }
1597         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1598                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1599                 /* 5701 {A0,B0} CRC bug workaround */
1600                 tg3_writephy(tp, 0x15, 0x0a75);
1601                 tg3_writephy(tp, 0x1c, 0x8c68);
1602                 tg3_writephy(tp, 0x1c, 0x8d68);
1603                 tg3_writephy(tp, 0x1c, 0x8c68);
1604         }
1605
1606         /* Clear pending interrupts... */
1607         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1608         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1609
1610         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1611                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1612         else
1613                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1614
1615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1617                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1618                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1619                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1620                 else
1621                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1622         }
1623
1624         current_link_up = 0;
1625         current_speed = SPEED_INVALID;
1626         current_duplex = DUPLEX_INVALID;
1627
1628         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1629                 u32 val;
1630
1631                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1632                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1633                 if (!(val & (1 << 10))) {
1634                         val |= (1 << 10);
1635                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1636                         goto relink;
1637                 }
1638         }
1639
1640         bmsr = 0;
1641         for (i = 0; i < 100; i++) {
1642                 tg3_readphy(tp, MII_BMSR, &bmsr);
1643                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1644                     (bmsr & BMSR_LSTATUS))
1645                         break;
1646                 udelay(40);
1647         }
1648
1649         if (bmsr & BMSR_LSTATUS) {
1650                 u32 aux_stat, bmcr;
1651
1652                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1653                 for (i = 0; i < 2000; i++) {
1654                         udelay(10);
1655                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1656                             aux_stat)
1657                                 break;
1658                 }
1659
1660                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1661                                              &current_speed,
1662                                              &current_duplex);
1663
1664                 bmcr = 0;
1665                 for (i = 0; i < 200; i++) {
1666                         tg3_readphy(tp, MII_BMCR, &bmcr);
1667                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1668                                 continue;
1669                         if (bmcr && bmcr != 0x7fff)
1670                                 break;
1671                         udelay(10);
1672                 }
1673
1674                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1675                         if (bmcr & BMCR_ANENABLE) {
1676                                 current_link_up = 1;
1677
1678                                 /* Force autoneg restart if we are exiting
1679                                  * low power mode.
1680                                  */
1681                                 if (!tg3_copper_is_advertising_all(tp))
1682                                         current_link_up = 0;
1683                         } else {
1684                                 current_link_up = 0;
1685                         }
1686                 } else {
1687                         if (!(bmcr & BMCR_ANENABLE) &&
1688                             tp->link_config.speed == current_speed &&
1689                             tp->link_config.duplex == current_duplex) {
1690                                 current_link_up = 1;
1691                         } else {
1692                                 current_link_up = 0;
1693                         }
1694                 }
1695
1696                 tp->link_config.active_speed = current_speed;
1697                 tp->link_config.active_duplex = current_duplex;
1698         }
1699
1700         if (current_link_up == 1 &&
1701             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1702             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1703                 u32 local_adv, remote_adv;
1704
1705                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1706                         local_adv = 0;
1707                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1708
1709                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1710                         remote_adv = 0;
1711
1712                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1713
1714                 /* If we are not advertising full pause capability,
1715                  * something is wrong.  Bring the link down and reconfigure.
1716                  */
1717                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1718                         current_link_up = 0;
1719                 } else {
1720                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1721                 }
1722         }
1723 relink:
1724         if (current_link_up == 0) {
1725                 u32 tmp;
1726
1727                 tg3_phy_copper_begin(tp);
1728
1729                 tg3_readphy(tp, MII_BMSR, &tmp);
1730                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1731                     (tmp & BMSR_LSTATUS))
1732                         current_link_up = 1;
1733         }
1734
1735         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1736         if (current_link_up == 1) {
1737                 if (tp->link_config.active_speed == SPEED_100 ||
1738                     tp->link_config.active_speed == SPEED_10)
1739                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1740                 else
1741                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1742         } else
1743                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1744
1745         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1746         if (tp->link_config.active_duplex == DUPLEX_HALF)
1747                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1748
1749         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1751                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1752                     (current_link_up == 1 &&
1753                      tp->link_config.active_speed == SPEED_10))
1754                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1755         } else {
1756                 if (current_link_up == 1)
1757                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1758         }
1759
1760         /* ??? Without this setting Netgear GA302T PHY does not
1761          * ??? send/receive packets...
1762          */
1763         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1764             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1765                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1766                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1767                 udelay(80);
1768         }
1769
1770         tw32_f(MAC_MODE, tp->mac_mode);
1771         udelay(40);
1772
1773         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1774                 /* Polled via timer. */
1775                 tw32_f(MAC_EVENT, 0);
1776         } else {
1777                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1778         }
1779         udelay(40);
1780
1781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1782             current_link_up == 1 &&
1783             tp->link_config.active_speed == SPEED_1000 &&
1784             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1785              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1786                 udelay(120);
1787                 tw32_f(MAC_STATUS,
1788                      (MAC_STATUS_SYNC_CHANGED |
1789                       MAC_STATUS_CFG_CHANGED));
1790                 udelay(40);
1791                 tg3_write_mem(tp,
1792                               NIC_SRAM_FIRMWARE_MBOX,
1793                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1794         }
1795
1796         if (current_link_up != netif_carrier_ok(tp->dev)) {
1797                 if (current_link_up)
1798                         netif_carrier_on(tp->dev);
1799                 else
1800                         netif_carrier_off(tp->dev);
1801                 tg3_link_report(tp);
1802         }
1803
1804         return 0;
1805 }
1806
1807 struct tg3_fiber_aneginfo {
1808         int state;
1809 #define ANEG_STATE_UNKNOWN              0
1810 #define ANEG_STATE_AN_ENABLE            1
1811 #define ANEG_STATE_RESTART_INIT         2
1812 #define ANEG_STATE_RESTART              3
1813 #define ANEG_STATE_DISABLE_LINK_OK      4
1814 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1815 #define ANEG_STATE_ABILITY_DETECT       6
1816 #define ANEG_STATE_ACK_DETECT_INIT      7
1817 #define ANEG_STATE_ACK_DETECT           8
1818 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1819 #define ANEG_STATE_COMPLETE_ACK         10
1820 #define ANEG_STATE_IDLE_DETECT_INIT     11
1821 #define ANEG_STATE_IDLE_DETECT          12
1822 #define ANEG_STATE_LINK_OK              13
1823 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1824 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1825
1826         u32 flags;
1827 #define MR_AN_ENABLE            0x00000001
1828 #define MR_RESTART_AN           0x00000002
1829 #define MR_AN_COMPLETE          0x00000004
1830 #define MR_PAGE_RX              0x00000008
1831 #define MR_NP_LOADED            0x00000010
1832 #define MR_TOGGLE_TX            0x00000020
1833 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1834 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1835 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1836 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1837 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1838 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1839 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1840 #define MR_TOGGLE_RX            0x00002000
1841 #define MR_NP_RX                0x00004000
1842
1843 #define MR_LINK_OK              0x80000000
1844
1845         unsigned long link_time, cur_time;
1846
1847         u32 ability_match_cfg;
1848         int ability_match_count;
1849
1850         char ability_match, idle_match, ack_match;
1851
1852         u32 txconfig, rxconfig;
1853 #define ANEG_CFG_NP             0x00000080
1854 #define ANEG_CFG_ACK            0x00000040
1855 #define ANEG_CFG_RF2            0x00000020
1856 #define ANEG_CFG_RF1            0x00000010
1857 #define ANEG_CFG_PS2            0x00000001
1858 #define ANEG_CFG_PS1            0x00008000
1859 #define ANEG_CFG_HD             0x00004000
1860 #define ANEG_CFG_FD             0x00002000
1861 #define ANEG_CFG_INVAL          0x00001f06
1862
1863 };
1864 #define ANEG_OK         0
1865 #define ANEG_DONE       1
1866 #define ANEG_TIMER_ENAB 2
1867 #define ANEG_FAILED     -1
1868
1869 #define ANEG_STATE_SETTLE_TIME  10000
1870
1871 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1872                                    struct tg3_fiber_aneginfo *ap)
1873 {
1874         unsigned long delta;
1875         u32 rx_cfg_reg;
1876         int ret;
1877
1878         if (ap->state == ANEG_STATE_UNKNOWN) {
1879                 ap->rxconfig = 0;
1880                 ap->link_time = 0;
1881                 ap->cur_time = 0;
1882                 ap->ability_match_cfg = 0;
1883                 ap->ability_match_count = 0;
1884                 ap->ability_match = 0;
1885                 ap->idle_match = 0;
1886                 ap->ack_match = 0;
1887         }
1888         ap->cur_time++;
1889
1890         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1891                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1892
1893                 if (rx_cfg_reg != ap->ability_match_cfg) {
1894                         ap->ability_match_cfg = rx_cfg_reg;
1895                         ap->ability_match = 0;
1896                         ap->ability_match_count = 0;
1897                 } else {
1898                         if (++ap->ability_match_count > 1) {
1899                                 ap->ability_match = 1;
1900                                 ap->ability_match_cfg = rx_cfg_reg;
1901                         }
1902                 }
1903                 if (rx_cfg_reg & ANEG_CFG_ACK)
1904                         ap->ack_match = 1;
1905                 else
1906                         ap->ack_match = 0;
1907
1908                 ap->idle_match = 0;
1909         } else {
1910                 ap->idle_match = 1;
1911                 ap->ability_match_cfg = 0;
1912                 ap->ability_match_count = 0;
1913                 ap->ability_match = 0;
1914                 ap->ack_match = 0;
1915
1916                 rx_cfg_reg = 0;
1917         }
1918
1919         ap->rxconfig = rx_cfg_reg;
1920         ret = ANEG_OK;
1921
1922         switch(ap->state) {
1923         case ANEG_STATE_UNKNOWN:
1924                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1925                         ap->state = ANEG_STATE_AN_ENABLE;
1926
1927                 /* fallthru */
1928         case ANEG_STATE_AN_ENABLE:
1929                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1930                 if (ap->flags & MR_AN_ENABLE) {
1931                         ap->link_time = 0;
1932                         ap->cur_time = 0;
1933                         ap->ability_match_cfg = 0;
1934                         ap->ability_match_count = 0;
1935                         ap->ability_match = 0;
1936                         ap->idle_match = 0;
1937                         ap->ack_match = 0;
1938
1939                         ap->state = ANEG_STATE_RESTART_INIT;
1940                 } else {
1941                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1942                 }
1943                 break;
1944
1945         case ANEG_STATE_RESTART_INIT:
1946                 ap->link_time = ap->cur_time;
1947                 ap->flags &= ~(MR_NP_LOADED);
1948                 ap->txconfig = 0;
1949                 tw32(MAC_TX_AUTO_NEG, 0);
1950                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953
1954                 ret = ANEG_TIMER_ENAB;
1955                 ap->state = ANEG_STATE_RESTART;
1956
1957                 /* fallthru */
1958         case ANEG_STATE_RESTART:
1959                 delta = ap->cur_time - ap->link_time;
1960                 if (delta > ANEG_STATE_SETTLE_TIME) {
1961                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1962                 } else {
1963                         ret = ANEG_TIMER_ENAB;
1964                 }
1965                 break;
1966
1967         case ANEG_STATE_DISABLE_LINK_OK:
1968                 ret = ANEG_DONE;
1969                 break;
1970
1971         case ANEG_STATE_ABILITY_DETECT_INIT:
1972                 ap->flags &= ~(MR_TOGGLE_TX);
1973                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1974                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1976                 tw32_f(MAC_MODE, tp->mac_mode);
1977                 udelay(40);
1978
1979                 ap->state = ANEG_STATE_ABILITY_DETECT;
1980                 break;
1981
1982         case ANEG_STATE_ABILITY_DETECT:
1983                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1984                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1985                 }
1986                 break;
1987
1988         case ANEG_STATE_ACK_DETECT_INIT:
1989                 ap->txconfig |= ANEG_CFG_ACK;
1990                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1991                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994
1995                 ap->state = ANEG_STATE_ACK_DETECT;
1996
1997                 /* fallthru */
1998         case ANEG_STATE_ACK_DETECT:
1999                 if (ap->ack_match != 0) {
2000                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2001                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2002                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2003                         } else {
2004                                 ap->state = ANEG_STATE_AN_ENABLE;
2005                         }
2006                 } else if (ap->ability_match != 0 &&
2007                            ap->rxconfig == 0) {
2008                         ap->state = ANEG_STATE_AN_ENABLE;
2009                 }
2010                 break;
2011
2012         case ANEG_STATE_COMPLETE_ACK_INIT:
2013                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2014                         ret = ANEG_FAILED;
2015                         break;
2016                 }
2017                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2018                                MR_LP_ADV_HALF_DUPLEX |
2019                                MR_LP_ADV_SYM_PAUSE |
2020                                MR_LP_ADV_ASYM_PAUSE |
2021                                MR_LP_ADV_REMOTE_FAULT1 |
2022                                MR_LP_ADV_REMOTE_FAULT2 |
2023                                MR_LP_ADV_NEXT_PAGE |
2024                                MR_TOGGLE_RX |
2025                                MR_NP_RX);
2026                 if (ap->rxconfig & ANEG_CFG_FD)
2027                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2028                 if (ap->rxconfig & ANEG_CFG_HD)
2029                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2030                 if (ap->rxconfig & ANEG_CFG_PS1)
2031                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2032                 if (ap->rxconfig & ANEG_CFG_PS2)
2033                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2034                 if (ap->rxconfig & ANEG_CFG_RF1)
2035                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2036                 if (ap->rxconfig & ANEG_CFG_RF2)
2037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2038                 if (ap->rxconfig & ANEG_CFG_NP)
2039                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2040
2041                 ap->link_time = ap->cur_time;
2042
2043                 ap->flags ^= (MR_TOGGLE_TX);
2044                 if (ap->rxconfig & 0x0008)
2045                         ap->flags |= MR_TOGGLE_RX;
2046                 if (ap->rxconfig & ANEG_CFG_NP)
2047                         ap->flags |= MR_NP_RX;
2048                 ap->flags |= MR_PAGE_RX;
2049
2050                 ap->state = ANEG_STATE_COMPLETE_ACK;
2051                 ret = ANEG_TIMER_ENAB;
2052                 break;
2053
2054         case ANEG_STATE_COMPLETE_ACK:
2055                 if (ap->ability_match != 0 &&
2056                     ap->rxconfig == 0) {
2057                         ap->state = ANEG_STATE_AN_ENABLE;
2058                         break;
2059                 }
2060                 delta = ap->cur_time - ap->link_time;
2061                 if (delta > ANEG_STATE_SETTLE_TIME) {
2062                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2063                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2064                         } else {
2065                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2066                                     !(ap->flags & MR_NP_RX)) {
2067                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2068                                 } else {
2069                                         ret = ANEG_FAILED;
2070                                 }
2071                         }
2072                 }
2073                 break;
2074
2075         case ANEG_STATE_IDLE_DETECT_INIT:
2076                 ap->link_time = ap->cur_time;
2077                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2078                 tw32_f(MAC_MODE, tp->mac_mode);
2079                 udelay(40);
2080
2081                 ap->state = ANEG_STATE_IDLE_DETECT;
2082                 ret = ANEG_TIMER_ENAB;
2083                 break;
2084
2085         case ANEG_STATE_IDLE_DETECT:
2086                 if (ap->ability_match != 0 &&
2087                     ap->rxconfig == 0) {
2088                         ap->state = ANEG_STATE_AN_ENABLE;
2089                         break;
2090                 }
2091                 delta = ap->cur_time - ap->link_time;
2092                 if (delta > ANEG_STATE_SETTLE_TIME) {
2093                         /* XXX another gem from the Broadcom driver :( */
2094                         ap->state = ANEG_STATE_LINK_OK;
2095                 }
2096                 break;
2097
2098         case ANEG_STATE_LINK_OK:
2099                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2100                 ret = ANEG_DONE;
2101                 break;
2102
2103         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2104                 /* ??? unimplemented */
2105                 break;
2106
2107         case ANEG_STATE_NEXT_PAGE_WAIT:
2108                 /* ??? unimplemented */
2109                 break;
2110
2111         default:
2112                 ret = ANEG_FAILED;
2113                 break;
2114         };
2115
2116         return ret;
2117 }
2118
2119 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2120 {
2121         int res = 0;
2122         struct tg3_fiber_aneginfo aninfo;
2123         int status = ANEG_FAILED;
2124         unsigned int tick;
2125         u32 tmp;
2126
2127         tw32_f(MAC_TX_AUTO_NEG, 0);
2128
2129         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2130         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2131         udelay(40);
2132
2133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2134         udelay(40);
2135
2136         memset(&aninfo, 0, sizeof(aninfo));
2137         aninfo.flags |= MR_AN_ENABLE;
2138         aninfo.state = ANEG_STATE_UNKNOWN;
2139         aninfo.cur_time = 0;
2140         tick = 0;
2141         while (++tick < 195000) {
2142                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2143                 if (status == ANEG_DONE || status == ANEG_FAILED)
2144                         break;
2145
2146                 udelay(1);
2147         }
2148
2149         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2150         tw32_f(MAC_MODE, tp->mac_mode);
2151         udelay(40);
2152
2153         *flags = aninfo.flags;
2154
2155         if (status == ANEG_DONE &&
2156             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2157                              MR_LP_ADV_FULL_DUPLEX)))
2158                 res = 1;
2159
2160         return res;
2161 }
2162
2163 static void tg3_init_bcm8002(struct tg3 *tp)
2164 {
2165         u32 mac_status = tr32(MAC_STATUS);
2166         int i;
2167
2168         /* Reset when initting first time or we have a link. */
2169         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2170             !(mac_status & MAC_STATUS_PCS_SYNCED))
2171                 return;
2172
2173         /* Set PLL lock range. */
2174         tg3_writephy(tp, 0x16, 0x8007);
2175
2176         /* SW reset */
2177         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2178
2179         /* Wait for reset to complete. */
2180         /* XXX schedule_timeout() ... */
2181         for (i = 0; i < 500; i++)
2182                 udelay(10);
2183
2184         /* Config mode; select PMA/Ch 1 regs. */
2185         tg3_writephy(tp, 0x10, 0x8411);
2186
2187         /* Enable auto-lock and comdet, select txclk for tx. */
2188         tg3_writephy(tp, 0x11, 0x0a10);
2189
2190         tg3_writephy(tp, 0x18, 0x00a0);
2191         tg3_writephy(tp, 0x16, 0x41ff);
2192
2193         /* Assert and deassert POR. */
2194         tg3_writephy(tp, 0x13, 0x0400);
2195         udelay(40);
2196         tg3_writephy(tp, 0x13, 0x0000);
2197
2198         tg3_writephy(tp, 0x11, 0x0a50);
2199         udelay(40);
2200         tg3_writephy(tp, 0x11, 0x0a10);
2201
2202         /* Wait for signal to stabilize */
2203         /* XXX schedule_timeout() ... */
2204         for (i = 0; i < 15000; i++)
2205                 udelay(10);
2206
2207         /* Deselect the channel register so we can read the PHYID
2208          * later.
2209          */
2210         tg3_writephy(tp, 0x10, 0x8011);
2211 }
2212
2213 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2214 {
2215         u32 sg_dig_ctrl, sg_dig_status;
2216         u32 serdes_cfg, expected_sg_dig_ctrl;
2217         int workaround, port_a;
2218         int current_link_up;
2219
2220         serdes_cfg = 0;
2221         expected_sg_dig_ctrl = 0;
2222         workaround = 0;
2223         port_a = 1;
2224         current_link_up = 0;
2225
2226         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2227             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2228                 workaround = 1;
2229                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2230                         port_a = 0;
2231
2232                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2233                 /* preserve bits 20-23 for voltage regulator */
2234                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2235         }
2236
2237         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2238
2239         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2240                 if (sg_dig_ctrl & (1 << 31)) {
2241                         if (workaround) {
2242                                 u32 val = serdes_cfg;
2243
2244                                 if (port_a)
2245                                         val |= 0xc010000;
2246                                 else
2247                                         val |= 0x4010000;
2248                                 tw32_f(MAC_SERDES_CFG, val);
2249                         }
2250                         tw32_f(SG_DIG_CTRL, 0x01388400);
2251                 }
2252                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2253                         tg3_setup_flow_control(tp, 0, 0);
2254                         current_link_up = 1;
2255                 }
2256                 goto out;
2257         }
2258
2259         /* Want auto-negotiation.  */
2260         expected_sg_dig_ctrl = 0x81388400;
2261
2262         /* Pause capability */
2263         expected_sg_dig_ctrl |= (1 << 11);
2264
2265         /* Asymettric pause */
2266         expected_sg_dig_ctrl |= (1 << 12);
2267
2268         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2269                 if (workaround)
2270                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2271                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2272                 udelay(5);
2273                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2274
2275                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2276         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2277                                  MAC_STATUS_SIGNAL_DET)) {
2278                 int i;
2279
2280                 /* Giver time to negotiate (~200ms) */
2281                 for (i = 0; i < 40000; i++) {
2282                         sg_dig_status = tr32(SG_DIG_STATUS);
2283                         if (sg_dig_status & (0x3))
2284                                 break;
2285                         udelay(5);
2286                 }
2287                 mac_status = tr32(MAC_STATUS);
2288
2289                 if ((sg_dig_status & (1 << 1)) &&
2290                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2291                         u32 local_adv, remote_adv;
2292
2293                         local_adv = ADVERTISE_PAUSE_CAP;
2294                         remote_adv = 0;
2295                         if (sg_dig_status & (1 << 19))
2296                                 remote_adv |= LPA_PAUSE_CAP;
2297                         if (sg_dig_status & (1 << 20))
2298                                 remote_adv |= LPA_PAUSE_ASYM;
2299
2300                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2301                         current_link_up = 1;
2302                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2303                 } else if (!(sg_dig_status & (1 << 1))) {
2304                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2305                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2306                         else {
2307                                 if (workaround) {
2308                                         u32 val = serdes_cfg;
2309
2310                                         if (port_a)
2311                                                 val |= 0xc010000;
2312                                         else
2313                                                 val |= 0x4010000;
2314
2315                                         tw32_f(MAC_SERDES_CFG, val);
2316                                 }
2317
2318                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2319                                 udelay(40);
2320
2321                                 /* Link parallel detection - link is up */
2322                                 /* only if we have PCS_SYNC and not */
2323                                 /* receiving config code words */
2324                                 mac_status = tr32(MAC_STATUS);
2325                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2327                                         tg3_setup_flow_control(tp, 0, 0);
2328                                         current_link_up = 1;
2329                                 }
2330                         }
2331                 }
2332         }
2333
2334 out:
2335         return current_link_up;
2336 }
2337
2338 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2339 {
2340         int current_link_up = 0;
2341
2342         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2343                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2344                 goto out;
2345         }
2346
2347         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2348                 u32 flags;
2349                 int i;
2350   
2351                 if (fiber_autoneg(tp, &flags)) {
2352                         u32 local_adv, remote_adv;
2353
2354                         local_adv = ADVERTISE_PAUSE_CAP;
2355                         remote_adv = 0;
2356                         if (flags & MR_LP_ADV_SYM_PAUSE)
2357                                 remote_adv |= LPA_PAUSE_CAP;
2358                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2359                                 remote_adv |= LPA_PAUSE_ASYM;
2360
2361                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2362
2363                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2364                         current_link_up = 1;
2365                 }
2366                 for (i = 0; i < 30; i++) {
2367                         udelay(20);
2368                         tw32_f(MAC_STATUS,
2369                                (MAC_STATUS_SYNC_CHANGED |
2370                                 MAC_STATUS_CFG_CHANGED));
2371                         udelay(40);
2372                         if ((tr32(MAC_STATUS) &
2373                              (MAC_STATUS_SYNC_CHANGED |
2374                               MAC_STATUS_CFG_CHANGED)) == 0)
2375                                 break;
2376                 }
2377
2378                 mac_status = tr32(MAC_STATUS);
2379                 if (current_link_up == 0 &&
2380                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2381                     !(mac_status & MAC_STATUS_RCVD_CFG))
2382                         current_link_up = 1;
2383         } else {
2384                 /* Forcing 1000FD link up. */
2385                 current_link_up = 1;
2386                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2387
2388                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2389                 udelay(40);
2390         }
2391
2392 out:
2393         return current_link_up;
2394 }
2395
2396 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2397 {
2398         u32 orig_pause_cfg;
2399         u16 orig_active_speed;
2400         u8 orig_active_duplex;
2401         u32 mac_status;
2402         int current_link_up;
2403         int i;
2404
2405         orig_pause_cfg =
2406                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2407                                   TG3_FLAG_TX_PAUSE));
2408         orig_active_speed = tp->link_config.active_speed;
2409         orig_active_duplex = tp->link_config.active_duplex;
2410
2411         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2412             netif_carrier_ok(tp->dev) &&
2413             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2414                 mac_status = tr32(MAC_STATUS);
2415                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2416                                MAC_STATUS_SIGNAL_DET |
2417                                MAC_STATUS_CFG_CHANGED |
2418                                MAC_STATUS_RCVD_CFG);
2419                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2420                                    MAC_STATUS_SIGNAL_DET)) {
2421                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2422                                             MAC_STATUS_CFG_CHANGED));
2423                         return 0;
2424                 }
2425         }
2426
2427         tw32_f(MAC_TX_AUTO_NEG, 0);
2428
2429         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2430         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2431         tw32_f(MAC_MODE, tp->mac_mode);
2432         udelay(40);
2433
2434         if (tp->phy_id == PHY_ID_BCM8002)
2435                 tg3_init_bcm8002(tp);
2436
2437         /* Enable link change event even when serdes polling.  */
2438         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2439         udelay(40);
2440
2441         current_link_up = 0;
2442         mac_status = tr32(MAC_STATUS);
2443
2444         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2445                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2446         else
2447                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2448
2449         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2450         tw32_f(MAC_MODE, tp->mac_mode);
2451         udelay(40);
2452
2453         tp->hw_status->status =
2454                 (SD_STATUS_UPDATED |
2455                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2456
2457         for (i = 0; i < 100; i++) {
2458                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2459                                     MAC_STATUS_CFG_CHANGED));
2460                 udelay(5);
2461                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2462                                          MAC_STATUS_CFG_CHANGED)) == 0)
2463                         break;
2464         }
2465
2466         mac_status = tr32(MAC_STATUS);
2467         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2468                 current_link_up = 0;
2469                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2470                         tw32_f(MAC_MODE, (tp->mac_mode |
2471                                           MAC_MODE_SEND_CONFIGS));
2472                         udelay(1);
2473                         tw32_f(MAC_MODE, tp->mac_mode);
2474                 }
2475         }
2476
2477         if (current_link_up == 1) {
2478                 tp->link_config.active_speed = SPEED_1000;
2479                 tp->link_config.active_duplex = DUPLEX_FULL;
2480                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2481                                     LED_CTRL_LNKLED_OVERRIDE |
2482                                     LED_CTRL_1000MBPS_ON));
2483         } else {
2484                 tp->link_config.active_speed = SPEED_INVALID;
2485                 tp->link_config.active_duplex = DUPLEX_INVALID;
2486                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2487                                     LED_CTRL_LNKLED_OVERRIDE |
2488                                     LED_CTRL_TRAFFIC_OVERRIDE));
2489         }
2490
2491         if (current_link_up != netif_carrier_ok(tp->dev)) {
2492                 if (current_link_up)
2493                         netif_carrier_on(tp->dev);
2494                 else
2495                         netif_carrier_off(tp->dev);
2496                 tg3_link_report(tp);
2497         } else {
2498                 u32 now_pause_cfg =
2499                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2500                                          TG3_FLAG_TX_PAUSE);
2501                 if (orig_pause_cfg != now_pause_cfg ||
2502                     orig_active_speed != tp->link_config.active_speed ||
2503                     orig_active_duplex != tp->link_config.active_duplex)
2504                         tg3_link_report(tp);
2505         }
2506
2507         return 0;
2508 }
2509
2510 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2511 {
2512         int err;
2513
2514         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2515                 err = tg3_setup_fiber_phy(tp, force_reset);
2516         } else {
2517                 err = tg3_setup_copper_phy(tp, force_reset);
2518         }
2519
2520         if (tp->link_config.active_speed == SPEED_1000 &&
2521             tp->link_config.active_duplex == DUPLEX_HALF)
2522                 tw32(MAC_TX_LENGTHS,
2523                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2524                       (6 << TX_LENGTHS_IPG_SHIFT) |
2525                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2526         else
2527                 tw32(MAC_TX_LENGTHS,
2528                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2529                       (6 << TX_LENGTHS_IPG_SHIFT) |
2530                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2531
2532         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2533                 if (netif_carrier_ok(tp->dev)) {
2534                         tw32(HOSTCC_STAT_COAL_TICKS,
2535                              tp->coal.stats_block_coalesce_usecs);
2536                 } else {
2537                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2538                 }
2539         }
2540
2541         return err;
2542 }
2543
2544 /* Tigon3 never reports partial packet sends.  So we do not
2545  * need special logic to handle SKBs that have not had all
2546  * of their frags sent yet, like SunGEM does.
2547  */
2548 static void tg3_tx(struct tg3 *tp)
2549 {
2550         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2551         u32 sw_idx = tp->tx_cons;
2552
2553         while (sw_idx != hw_idx) {
2554                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2555                 struct sk_buff *skb = ri->skb;
2556                 int i;
2557
2558                 if (unlikely(skb == NULL))
2559                         BUG();
2560
2561                 pci_unmap_single(tp->pdev,
2562                                  pci_unmap_addr(ri, mapping),
2563                                  skb_headlen(skb),
2564                                  PCI_DMA_TODEVICE);
2565
2566                 ri->skb = NULL;
2567
2568                 sw_idx = NEXT_TX(sw_idx);
2569
2570                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571                         if (unlikely(sw_idx == hw_idx))
2572                                 BUG();
2573
2574                         ri = &tp->tx_buffers[sw_idx];
2575                         if (unlikely(ri->skb != NULL))
2576                                 BUG();
2577
2578                         pci_unmap_page(tp->pdev,
2579                                        pci_unmap_addr(ri, mapping),
2580                                        skb_shinfo(skb)->frags[i].size,
2581                                        PCI_DMA_TODEVICE);
2582
2583                         sw_idx = NEXT_TX(sw_idx);
2584                 }
2585
2586                 dev_kfree_skb(skb);
2587         }
2588
2589         tp->tx_cons = sw_idx;
2590
2591         if (netif_queue_stopped(tp->dev) &&
2592             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2593                 netif_wake_queue(tp->dev);
2594 }
2595
2596 /* Returns size of skb allocated or < 0 on error.
2597  *
2598  * We only need to fill in the address because the other members
2599  * of the RX descriptor are invariant, see tg3_init_rings.
2600  *
2601  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2602  * posting buffers we only dirty the first cache line of the RX
2603  * descriptor (containing the address).  Whereas for the RX status
2604  * buffers the cpu only reads the last cacheline of the RX descriptor
2605  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2606  */
2607 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2608                             int src_idx, u32 dest_idx_unmasked)
2609 {
2610         struct tg3_rx_buffer_desc *desc;
2611         struct ring_info *map, *src_map;
2612         struct sk_buff *skb;
2613         dma_addr_t mapping;
2614         int skb_size, dest_idx;
2615
2616         src_map = NULL;
2617         switch (opaque_key) {
2618         case RXD_OPAQUE_RING_STD:
2619                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2620                 desc = &tp->rx_std[dest_idx];
2621                 map = &tp->rx_std_buffers[dest_idx];
2622                 if (src_idx >= 0)
2623                         src_map = &tp->rx_std_buffers[src_idx];
2624                 skb_size = tp->rx_pkt_buf_sz;
2625                 break;
2626
2627         case RXD_OPAQUE_RING_JUMBO:
2628                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2629                 desc = &tp->rx_jumbo[dest_idx];
2630                 map = &tp->rx_jumbo_buffers[dest_idx];
2631                 if (src_idx >= 0)
2632                         src_map = &tp->rx_jumbo_buffers[src_idx];
2633                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2634                 break;
2635
2636         default:
2637                 return -EINVAL;
2638         };
2639
2640         /* Do not overwrite any of the map or rp information
2641          * until we are sure we can commit to a new buffer.
2642          *
2643          * Callers depend upon this behavior and assume that
2644          * we leave everything unchanged if we fail.
2645          */
2646         skb = dev_alloc_skb(skb_size);
2647         if (skb == NULL)
2648                 return -ENOMEM;
2649
2650         skb->dev = tp->dev;
2651         skb_reserve(skb, tp->rx_offset);
2652
2653         mapping = pci_map_single(tp->pdev, skb->data,
2654                                  skb_size - tp->rx_offset,
2655                                  PCI_DMA_FROMDEVICE);
2656
2657         map->skb = skb;
2658         pci_unmap_addr_set(map, mapping, mapping);
2659
2660         if (src_map != NULL)
2661                 src_map->skb = NULL;
2662
2663         desc->addr_hi = ((u64)mapping >> 32);
2664         desc->addr_lo = ((u64)mapping & 0xffffffff);
2665
2666         return skb_size;
2667 }
2668
2669 /* We only need to move over in the address because the other
2670  * members of the RX descriptor are invariant.  See notes above
2671  * tg3_alloc_rx_skb for full details.
2672  */
2673 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2674                            int src_idx, u32 dest_idx_unmasked)
2675 {
2676         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2677         struct ring_info *src_map, *dest_map;
2678         int dest_idx;
2679
2680         switch (opaque_key) {
2681         case RXD_OPAQUE_RING_STD:
2682                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2683                 dest_desc = &tp->rx_std[dest_idx];
2684                 dest_map = &tp->rx_std_buffers[dest_idx];
2685                 src_desc = &tp->rx_std[src_idx];
2686                 src_map = &tp->rx_std_buffers[src_idx];
2687                 break;
2688
2689         case RXD_OPAQUE_RING_JUMBO:
2690                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2691                 dest_desc = &tp->rx_jumbo[dest_idx];
2692                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2693                 src_desc = &tp->rx_jumbo[src_idx];
2694                 src_map = &tp->rx_jumbo_buffers[src_idx];
2695                 break;
2696
2697         default:
2698                 return;
2699         };
2700
2701         dest_map->skb = src_map->skb;
2702         pci_unmap_addr_set(dest_map, mapping,
2703                            pci_unmap_addr(src_map, mapping));
2704         dest_desc->addr_hi = src_desc->addr_hi;
2705         dest_desc->addr_lo = src_desc->addr_lo;
2706
2707         src_map->skb = NULL;
2708 }
2709
2710 #if TG3_VLAN_TAG_USED
2711 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2712 {
2713         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2714 }
2715 #endif
2716
2717 /* The RX ring scheme is composed of multiple rings which post fresh
2718  * buffers to the chip, and one special ring the chip uses to report
2719  * status back to the host.
2720  *
2721  * The special ring reports the status of received packets to the
2722  * host.  The chip does not write into the original descriptor the
2723  * RX buffer was obtained from.  The chip simply takes the original
2724  * descriptor as provided by the host, updates the status and length
2725  * field, then writes this into the next status ring entry.
2726  *
2727  * Each ring the host uses to post buffers to the chip is described
2728  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2729  * it is first placed into the on-chip ram.  When the packet's length
2730  * is known, it walks down the TG3_BDINFO entries to select the ring.
2731  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2732  * which is within the range of the new packet's length is chosen.
2733  *
2734  * The "separate ring for rx status" scheme may sound queer, but it makes
2735  * sense from a cache coherency perspective.  If only the host writes
2736  * to the buffer post rings, and only the chip writes to the rx status
2737  * rings, then cache lines never move beyond shared-modified state.
2738  * If both the host and chip were to write into the same ring, cache line
2739  * eviction could occur since both entities want it in an exclusive state.
2740  */
2741 static int tg3_rx(struct tg3 *tp, int budget)
2742 {
2743         u32 work_mask;
2744         u32 sw_idx = tp->rx_rcb_ptr;
2745         u16 hw_idx;
2746         int received;
2747
2748         hw_idx = tp->hw_status->idx[0].rx_producer;
2749         /*
2750          * We need to order the read of hw_idx and the read of
2751          * the opaque cookie.
2752          */
2753         rmb();
2754         work_mask = 0;
2755         received = 0;
2756         while (sw_idx != hw_idx && budget > 0) {
2757                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2758                 unsigned int len;
2759                 struct sk_buff *skb;
2760                 dma_addr_t dma_addr;
2761                 u32 opaque_key, desc_idx, *post_ptr;
2762
2763                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2764                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2765                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2766                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2767                                                   mapping);
2768                         skb = tp->rx_std_buffers[desc_idx].skb;
2769                         post_ptr = &tp->rx_std_ptr;
2770                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2771                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2772                                                   mapping);
2773                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2774                         post_ptr = &tp->rx_jumbo_ptr;
2775                 }
2776                 else {
2777                         goto next_pkt_nopost;
2778                 }
2779
2780                 work_mask |= opaque_key;
2781
2782                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2783                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2784                 drop_it:
2785                         tg3_recycle_rx(tp, opaque_key,
2786                                        desc_idx, *post_ptr);
2787                 drop_it_no_recycle:
2788                         /* Other statistics kept track of by card. */
2789                         tp->net_stats.rx_dropped++;
2790                         goto next_pkt;
2791                 }
2792
2793                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2794
2795                 if (len > RX_COPY_THRESHOLD 
2796                         && tp->rx_offset == 2
2797                         /* rx_offset != 2 iff this is a 5701 card running
2798                          * in PCI-X mode [see tg3_get_invariants()] */
2799                 ) {
2800                         int skb_size;
2801
2802                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2803                                                     desc_idx, *post_ptr);
2804                         if (skb_size < 0)
2805                                 goto drop_it;
2806
2807                         pci_unmap_single(tp->pdev, dma_addr,
2808                                          skb_size - tp->rx_offset,
2809                                          PCI_DMA_FROMDEVICE);
2810
2811                         skb_put(skb, len);
2812                 } else {
2813                         struct sk_buff *copy_skb;
2814
2815                         tg3_recycle_rx(tp, opaque_key,
2816                                        desc_idx, *post_ptr);
2817
2818                         copy_skb = dev_alloc_skb(len + 2);
2819                         if (copy_skb == NULL)
2820                                 goto drop_it_no_recycle;
2821
2822                         copy_skb->dev = tp->dev;
2823                         skb_reserve(copy_skb, 2);
2824                         skb_put(copy_skb, len);
2825                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2826                         memcpy(copy_skb->data, skb->data, len);
2827                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2828
2829                         /* We'll reuse the original ring buffer. */
2830                         skb = copy_skb;
2831                 }
2832
2833                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2834                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2835                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2836                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2837                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2838                 else
2839                         skb->ip_summed = CHECKSUM_NONE;
2840
2841                 skb->protocol = eth_type_trans(skb, tp->dev);
2842 #if TG3_VLAN_TAG_USED
2843                 if (tp->vlgrp != NULL &&
2844                     desc->type_flags & RXD_FLAG_VLAN) {
2845                         tg3_vlan_rx(tp, skb,
2846                                     desc->err_vlan & RXD_VLAN_MASK);
2847                 } else
2848 #endif
2849                         netif_receive_skb(skb);
2850
2851                 tp->dev->last_rx = jiffies;
2852                 received++;
2853                 budget--;
2854
2855 next_pkt:
2856                 (*post_ptr)++;
2857 next_pkt_nopost:
2858                 sw_idx++;
2859                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2860
2861                 /* Refresh hw_idx to see if there is new work */
2862                 if (sw_idx == hw_idx) {
2863                         hw_idx = tp->hw_status->idx[0].rx_producer;
2864                         rmb();
2865                 }
2866         }
2867
2868         /* ACK the status ring. */
2869         tp->rx_rcb_ptr = sw_idx;
2870         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2871
2872         /* Refill RX ring(s). */
2873         if (work_mask & RXD_OPAQUE_RING_STD) {
2874                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2875                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2876                              sw_idx);
2877         }
2878         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2879                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2880                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2881                              sw_idx);
2882         }
2883         mmiowb();
2884
2885         return received;
2886 }
2887
2888 static int tg3_poll(struct net_device *netdev, int *budget)
2889 {
2890         struct tg3 *tp = netdev_priv(netdev);
2891         struct tg3_hw_status *sblk = tp->hw_status;
2892         int done;
2893
2894         /* handle link change and other phy events */
2895         if (!(tp->tg3_flags &
2896               (TG3_FLAG_USE_LINKCHG_REG |
2897                TG3_FLAG_POLL_SERDES))) {
2898                 if (sblk->status & SD_STATUS_LINK_CHG) {
2899                         sblk->status = SD_STATUS_UPDATED |
2900                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2901                         spin_lock(&tp->lock);
2902                         tg3_setup_phy(tp, 0);
2903                         spin_unlock(&tp->lock);
2904                 }
2905         }
2906
2907         /* run TX completion thread */
2908         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2909                 spin_lock(&tp->tx_lock);
2910                 tg3_tx(tp);
2911                 spin_unlock(&tp->tx_lock);
2912         }
2913
2914         /* run RX thread, within the bounds set by NAPI.
2915          * All RX "locking" is done by ensuring outside
2916          * code synchronizes with dev->poll()
2917          */
2918         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2919                 int orig_budget = *budget;
2920                 int work_done;
2921
2922                 if (orig_budget > netdev->quota)
2923                         orig_budget = netdev->quota;
2924
2925                 work_done = tg3_rx(tp, orig_budget);
2926
2927                 *budget -= work_done;
2928                 netdev->quota -= work_done;
2929         }
2930
2931         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2932                 tp->last_tag = sblk->status_tag;
2933         rmb();
2934         sblk->status &= ~SD_STATUS_UPDATED;
2935
2936         /* if no more work, tell net stack and NIC we're done */
2937         done = !tg3_has_work(tp);
2938         if (done) {
2939                 spin_lock(&tp->lock);
2940                 netif_rx_complete(netdev);
2941                 tg3_restart_ints(tp);
2942                 spin_unlock(&tp->lock);
2943         }
2944
2945         return (done ? 0 : 1);
2946 }
2947
2948 static void tg3_irq_quiesce(struct tg3 *tp)
2949 {
2950         BUG_ON(tp->irq_sync);
2951
2952         tp->irq_sync = 1;
2953         smp_mb();
2954
2955         synchronize_irq(tp->pdev->irq);
2956 }
2957
2958 static inline int tg3_irq_sync(struct tg3 *tp)
2959 {
2960         return tp->irq_sync;
2961 }
2962
2963 /* Fully shutdown all tg3 driver activity elsewhere in the system.
2964  * If irq_sync is non-zero, then the IRQ handler must be synchronized
2965  * with as well.  Most of the time, this is not necessary except when
2966  * shutting down the device.
2967  */
2968 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2969 {
2970         if (irq_sync)
2971                 tg3_irq_quiesce(tp);
2972         spin_lock_bh(&tp->lock);
2973         spin_lock(&tp->tx_lock);
2974 }
2975
2976 static inline void tg3_full_unlock(struct tg3 *tp)
2977 {
2978         spin_unlock(&tp->tx_lock);
2979         spin_unlock_bh(&tp->lock);
2980 }
2981
2982 /* MSI ISR - No need to check for interrupt sharing and no need to
2983  * flush status block and interrupt mailbox. PCI ordering rules
2984  * guarantee that MSI will arrive after the status block.
2985  */
2986 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2987 {
2988         struct net_device *dev = dev_id;
2989         struct tg3 *tp = netdev_priv(dev);
2990         struct tg3_hw_status *sblk = tp->hw_status;
2991
2992         /*
2993          * Writing any value to intr-mbox-0 clears PCI INTA# and
2994          * chip-internal interrupt pending events.
2995          * Writing non-zero to intr-mbox-0 additional tells the
2996          * NIC to stop sending us irqs, engaging "in-intr-handler"
2997          * event coalescing.
2998          */
2999         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3000         tp->last_tag = sblk->status_tag;
3001         rmb();
3002         if (tg3_irq_sync(tp))
3003                 goto out;
3004         sblk->status &= ~SD_STATUS_UPDATED;
3005         if (likely(tg3_has_work(tp)))
3006                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3007         else {
3008                 /* No work, re-enable interrupts.  */
3009                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3010                              tp->last_tag << 24);
3011         }
3012 out:
3013         return IRQ_RETVAL(1);
3014 }
3015
3016 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3017 {
3018         struct net_device *dev = dev_id;
3019         struct tg3 *tp = netdev_priv(dev);
3020         struct tg3_hw_status *sblk = tp->hw_status;
3021         unsigned int handled = 1;
3022
3023         /* In INTx mode, it is possible for the interrupt to arrive at
3024          * the CPU before the status block posted prior to the interrupt.
3025          * Reading the PCI State register will confirm whether the
3026          * interrupt is ours and will flush the status block.
3027          */
3028         if ((sblk->status & SD_STATUS_UPDATED) ||
3029             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3030                 /*
3031                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3032                  * chip-internal interrupt pending events.
3033                  * Writing non-zero to intr-mbox-0 additional tells the
3034                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3035                  * event coalescing.
3036                  */
3037                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3038                              0x00000001);
3039                 if (tg3_irq_sync(tp))
3040                         goto out;
3041                 sblk->status &= ~SD_STATUS_UPDATED;
3042                 if (likely(tg3_has_work(tp)))
3043                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3044                 else {
3045                         /* No work, shared interrupt perhaps?  re-enable
3046                          * interrupts, and flush that PCI write
3047                          */
3048                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3049                                 0x00000000);
3050                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3051                 }
3052         } else {        /* shared interrupt */
3053                 handled = 0;
3054         }
3055 out:
3056         return IRQ_RETVAL(handled);
3057 }
3058
3059 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3060 {
3061         struct net_device *dev = dev_id;
3062         struct tg3 *tp = netdev_priv(dev);
3063         struct tg3_hw_status *sblk = tp->hw_status;
3064         unsigned int handled = 1;
3065
3066         /* In INTx mode, it is possible for the interrupt to arrive at
3067          * the CPU before the status block posted prior to the interrupt.
3068          * Reading the PCI State register will confirm whether the
3069          * interrupt is ours and will flush the status block.
3070          */
3071         if ((sblk->status & SD_STATUS_UPDATED) ||
3072             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3073                 /*
3074                  * writing any value to intr-mbox-0 clears PCI INTA# and
3075                  * chip-internal interrupt pending events.
3076                  * writing non-zero to intr-mbox-0 additional tells the
3077                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3078                  * event coalescing.
3079                  */
3080                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3081                              0x00000001);
3082                 tp->last_tag = sblk->status_tag;
3083                 rmb();
3084                 if (tg3_irq_sync(tp))
3085                         goto out;
3086                 sblk->status &= ~SD_STATUS_UPDATED;
3087                 if (likely(tg3_has_work(tp)))
3088                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3089                 else {
3090                         /* no work, shared interrupt perhaps?  re-enable
3091                          * interrupts, and flush that PCI write
3092                          */
3093                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3094                                      tp->last_tag << 24);
3095                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3096                 }
3097         } else {        /* shared interrupt */
3098                 handled = 0;
3099         }
3100 out:
3101         return IRQ_RETVAL(handled);
3102 }
3103
3104 /* ISR for interrupt test */
3105 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3106                 struct pt_regs *regs)
3107 {
3108         struct net_device *dev = dev_id;
3109         struct tg3 *tp = netdev_priv(dev);
3110         struct tg3_hw_status *sblk = tp->hw_status;
3111
3112         if (sblk->status & SD_STATUS_UPDATED) {
3113                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3114                              0x00000001);
3115                 return IRQ_RETVAL(1);
3116         }
3117         return IRQ_RETVAL(0);
3118 }
3119
3120 static int tg3_init_hw(struct tg3 *);
3121 static int tg3_halt(struct tg3 *, int, int);
3122
3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3124 static void tg3_poll_controller(struct net_device *dev)
3125 {
3126         struct tg3 *tp = netdev_priv(dev);
3127
3128         tg3_interrupt(tp->pdev->irq, dev, NULL);
3129 }
3130 #endif
3131
3132 static void tg3_reset_task(void *_data)
3133 {
3134         struct tg3 *tp = _data;
3135         unsigned int restart_timer;
3136
3137         tg3_netif_stop(tp);
3138
3139         tg3_full_lock(tp, 1);
3140
3141         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3142         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3143
3144         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3145         tg3_init_hw(tp);
3146
3147         tg3_netif_start(tp);
3148
3149         tg3_full_unlock(tp);
3150
3151         if (restart_timer)
3152                 mod_timer(&tp->timer, jiffies + 1);
3153 }
3154
3155 static void tg3_tx_timeout(struct net_device *dev)
3156 {
3157         struct tg3 *tp = netdev_priv(dev);
3158
3159         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3160                dev->name);
3161
3162         schedule_work(&tp->reset_task);
3163 }
3164
3165 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3166
3167 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3168                                        u32 guilty_entry, int guilty_len,
3169                                        u32 last_plus_one, u32 *start, u32 mss)
3170 {
3171         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3172         dma_addr_t new_addr;
3173         u32 entry = *start;
3174         int i;
3175
3176         if (!new_skb) {
3177                 dev_kfree_skb(skb);
3178                 return -1;
3179         }
3180
3181         /* New SKB is guaranteed to be linear. */
3182         entry = *start;
3183         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3184                                   PCI_DMA_TODEVICE);
3185         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3186                     (skb->ip_summed == CHECKSUM_HW) ?
3187                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3188         *start = NEXT_TX(entry);
3189
3190         /* Now clean up the sw ring entries. */
3191         i = 0;
3192         while (entry != last_plus_one) {
3193                 int len;
3194
3195                 if (i == 0)
3196                         len = skb_headlen(skb);
3197                 else
3198                         len = skb_shinfo(skb)->frags[i-1].size;
3199                 pci_unmap_single(tp->pdev,
3200                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3201                                  len, PCI_DMA_TODEVICE);
3202                 if (i == 0) {
3203                         tp->tx_buffers[entry].skb = new_skb;
3204                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3205                 } else {
3206                         tp->tx_buffers[entry].skb = NULL;
3207                 }
3208                 entry = NEXT_TX(entry);
3209                 i++;
3210         }
3211
3212         dev_kfree_skb(skb);
3213
3214         return 0;
3215 }
3216
3217 static void tg3_set_txd(struct tg3 *tp, int entry,
3218                         dma_addr_t mapping, int len, u32 flags,
3219                         u32 mss_and_is_end)
3220 {
3221         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3222         int is_end = (mss_and_is_end & 0x1);
3223         u32 mss = (mss_and_is_end >> 1);
3224         u32 vlan_tag = 0;
3225
3226         if (is_end)
3227                 flags |= TXD_FLAG_END;
3228         if (flags & TXD_FLAG_VLAN) {
3229                 vlan_tag = flags >> 16;
3230                 flags &= 0xffff;
3231         }
3232         vlan_tag |= (mss << TXD_MSS_SHIFT);
3233
3234         txd->addr_hi = ((u64) mapping >> 32);
3235         txd->addr_lo = ((u64) mapping & 0xffffffff);
3236         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3237         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3238 }
3239
3240 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3241 {
3242         u32 base = (u32) mapping & 0xffffffff;
3243
3244         return ((base > 0xffffdcc0) &&
3245                 (base + len + 8 < base));
3246 }
3247
3248 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3249 {
3250         struct tg3 *tp = netdev_priv(dev);
3251         dma_addr_t mapping;
3252         unsigned int i;
3253         u32 len, entry, base_flags, mss;
3254         int would_hit_hwbug;
3255
3256         len = skb_headlen(skb);
3257
3258         /* No BH disabling for tx_lock here.  We are running in BH disabled
3259          * context and TX reclaim runs via tp->poll inside of a software
3260          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3261          * no IRQ context deadlocks to worry about either.  Rejoice!
3262          */
3263         if (!spin_trylock(&tp->tx_lock))
3264                 return NETDEV_TX_LOCKED; 
3265
3266         /* This is a hard error, log it. */
3267         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3268                 netif_stop_queue(dev);
3269                 spin_unlock(&tp->tx_lock);
3270                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3271                        dev->name);
3272                 return NETDEV_TX_BUSY;
3273         }
3274
3275         entry = tp->tx_prod;
3276         base_flags = 0;
3277         if (skb->ip_summed == CHECKSUM_HW)
3278                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3279 #if TG3_TSO_SUPPORT != 0
3280         mss = 0;
3281         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3282             (mss = skb_shinfo(skb)->tso_size) != 0) {
3283                 int tcp_opt_len, ip_tcp_len;
3284
3285                 if (skb_header_cloned(skb) &&
3286                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3287                         dev_kfree_skb(skb);
3288                         goto out_unlock;
3289                 }
3290
3291                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3292                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3293
3294                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3295                                TXD_FLAG_CPU_POST_DMA);
3296
3297                 skb->nh.iph->check = 0;
3298                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3299                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3300                         skb->h.th->check = 0;
3301                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3302                 }
3303                 else {
3304                         skb->h.th->check =
3305                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3306                                                    skb->nh.iph->daddr,
3307                                                    0, IPPROTO_TCP, 0);
3308                 }
3309
3310                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3311                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3312                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3313                                 int tsflags;
3314
3315                                 tsflags = ((skb->nh.iph->ihl - 5) +
3316                                            (tcp_opt_len >> 2));
3317                                 mss |= (tsflags << 11);
3318                         }
3319                 } else {
3320                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3321                                 int tsflags;
3322
3323                                 tsflags = ((skb->nh.iph->ihl - 5) +
3324                                            (tcp_opt_len >> 2));
3325                                 base_flags |= tsflags << 12;
3326                         }
3327                 }
3328         }
3329 #else
3330         mss = 0;
3331 #endif
3332 #if TG3_VLAN_TAG_USED
3333         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3334                 base_flags |= (TXD_FLAG_VLAN |
3335                                (vlan_tx_tag_get(skb) << 16));
3336 #endif
3337
3338         /* Queue skb data, a.k.a. the main skb fragment. */
3339         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3340
3341         tp->tx_buffers[entry].skb = skb;
3342         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3343
3344         would_hit_hwbug = 0;
3345
3346         if (tg3_4g_overflow_test(mapping, len))
3347                 would_hit_hwbug = entry + 1;
3348
3349         tg3_set_txd(tp, entry, mapping, len, base_flags,
3350                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3351
3352         entry = NEXT_TX(entry);
3353
3354         /* Now loop through additional data fragments, and queue them. */
3355         if (skb_shinfo(skb)->nr_frags > 0) {
3356                 unsigned int i, last;
3357
3358                 last = skb_shinfo(skb)->nr_frags - 1;
3359                 for (i = 0; i <= last; i++) {
3360                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3361
3362                         len = frag->size;
3363                         mapping = pci_map_page(tp->pdev,
3364                                                frag->page,
3365                                                frag->page_offset,
3366                                                len, PCI_DMA_TODEVICE);
3367
3368                         tp->tx_buffers[entry].skb = NULL;
3369                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3370
3371                         if (tg3_4g_overflow_test(mapping, len)) {
3372                                 /* Only one should match. */
3373                                 if (would_hit_hwbug)
3374                                         BUG();
3375                                 would_hit_hwbug = entry + 1;
3376                         }
3377
3378                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3379                                 tg3_set_txd(tp, entry, mapping, len,
3380                                             base_flags, (i == last)|(mss << 1));
3381                         else
3382                                 tg3_set_txd(tp, entry, mapping, len,
3383                                             base_flags, (i == last));
3384
3385                         entry = NEXT_TX(entry);
3386                 }
3387         }
3388
3389         if (would_hit_hwbug) {
3390                 u32 last_plus_one = entry;
3391                 u32 start;
3392                 unsigned int len = 0;
3393
3394                 would_hit_hwbug -= 1;
3395                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3396                 entry &= (TG3_TX_RING_SIZE - 1);
3397                 start = entry;
3398                 i = 0;
3399                 while (entry != last_plus_one) {
3400                         if (i == 0)
3401                                 len = skb_headlen(skb);
3402                         else
3403                                 len = skb_shinfo(skb)->frags[i-1].size;
3404
3405                         if (entry == would_hit_hwbug)
3406                                 break;
3407
3408                         i++;
3409                         entry = NEXT_TX(entry);
3410
3411                 }
3412
3413                 /* If the workaround fails due to memory/mapping
3414                  * failure, silently drop this packet.
3415                  */
3416                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3417                                                 entry, len,
3418                                                 last_plus_one,
3419                                                 &start, mss))
3420                         goto out_unlock;
3421
3422                 entry = start;
3423         }
3424
3425         /* Packets are ready, update Tx producer idx local and on card. */
3426         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3427
3428         tp->tx_prod = entry;
3429         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3430                 netif_stop_queue(dev);
3431
3432 out_unlock:
3433         mmiowb();
3434         spin_unlock(&tp->tx_lock);
3435
3436         dev->trans_start = jiffies;
3437
3438         return NETDEV_TX_OK;
3439 }
3440
3441 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3442                                int new_mtu)
3443 {
3444         dev->mtu = new_mtu;
3445
3446         if (new_mtu > ETH_DATA_LEN)
3447                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3448         else
3449                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3450 }
3451
3452 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3453 {
3454         struct tg3 *tp = netdev_priv(dev);
3455
3456         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3457                 return -EINVAL;
3458
3459         if (!netif_running(dev)) {
3460                 /* We'll just catch it later when the
3461                  * device is up'd.
3462                  */
3463                 tg3_set_mtu(dev, tp, new_mtu);
3464                 return 0;
3465         }
3466
3467         tg3_netif_stop(tp);
3468
3469         tg3_full_lock(tp, 1);
3470
3471         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3472
3473         tg3_set_mtu(dev, tp, new_mtu);
3474
3475         tg3_init_hw(tp);
3476
3477         tg3_netif_start(tp);
3478
3479         tg3_full_unlock(tp);
3480
3481         return 0;
3482 }
3483
3484 /* Free up pending packets in all rx/tx rings.
3485  *
3486  * The chip has been shut down and the driver detached from
3487  * the networking, so no interrupts or new tx packets will
3488  * end up in the driver.  tp->{tx,}lock is not held and we are not
3489  * in an interrupt context and thus may sleep.
3490  */
3491 static void tg3_free_rings(struct tg3 *tp)
3492 {
3493         struct ring_info *rxp;
3494         int i;
3495
3496         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3497                 rxp = &tp->rx_std_buffers[i];
3498
3499                 if (rxp->skb == NULL)
3500                         continue;
3501                 pci_unmap_single(tp->pdev,
3502                                  pci_unmap_addr(rxp, mapping),
3503                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3504                                  PCI_DMA_FROMDEVICE);
3505                 dev_kfree_skb_any(rxp->skb);
3506                 rxp->skb = NULL;
3507         }
3508
3509         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3510                 rxp = &tp->rx_jumbo_buffers[i];
3511
3512                 if (rxp->skb == NULL)
3513                         continue;
3514                 pci_unmap_single(tp->pdev,
3515                                  pci_unmap_addr(rxp, mapping),
3516                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3517                                  PCI_DMA_FROMDEVICE);
3518                 dev_kfree_skb_any(rxp->skb);
3519                 rxp->skb = NULL;
3520         }
3521
3522         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3523                 struct tx_ring_info *txp;
3524                 struct sk_buff *skb;
3525                 int j;
3526
3527                 txp = &tp->tx_buffers[i];
3528                 skb = txp->skb;
3529
3530                 if (skb == NULL) {
3531                         i++;
3532                         continue;
3533                 }
3534
3535                 pci_unmap_single(tp->pdev,
3536                                  pci_unmap_addr(txp, mapping),
3537                                  skb_headlen(skb),
3538                                  PCI_DMA_TODEVICE);
3539                 txp->skb = NULL;
3540
3541                 i++;
3542
3543                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3544                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3545                         pci_unmap_page(tp->pdev,
3546                                        pci_unmap_addr(txp, mapping),
3547                                        skb_shinfo(skb)->frags[j].size,
3548                                        PCI_DMA_TODEVICE);
3549                         i++;
3550                 }
3551
3552                 dev_kfree_skb_any(skb);
3553         }
3554 }
3555
3556 /* Initialize tx/rx rings for packet processing.
3557  *
3558  * The chip has been shut down and the driver detached from
3559  * the networking, so no interrupts or new tx packets will
3560  * end up in the driver.  tp->{tx,}lock are held and thus
3561  * we may not sleep.
3562  */
3563 static void tg3_init_rings(struct tg3 *tp)
3564 {
3565         u32 i;
3566
3567         /* Free up all the SKBs. */
3568         tg3_free_rings(tp);
3569
3570         /* Zero out all descriptors. */
3571         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3572         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3573         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3574         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3575
3576         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3577         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3578             (tp->dev->mtu > ETH_DATA_LEN))
3579                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3580
3581         /* Initialize invariants of the rings, we only set this
3582          * stuff once.  This works because the card does not
3583          * write into the rx buffer posting rings.
3584          */
3585         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3586                 struct tg3_rx_buffer_desc *rxd;
3587
3588                 rxd = &tp->rx_std[i];
3589                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3590                         << RXD_LEN_SHIFT;
3591                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3592                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3593                                (i << RXD_OPAQUE_INDEX_SHIFT));
3594         }
3595
3596         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3597                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3598                         struct tg3_rx_buffer_desc *rxd;
3599
3600                         rxd = &tp->rx_jumbo[i];
3601                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3602                                 << RXD_LEN_SHIFT;
3603                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3604                                 RXD_FLAG_JUMBO;
3605                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3606                                (i << RXD_OPAQUE_INDEX_SHIFT));
3607                 }
3608         }
3609
3610         /* Now allocate fresh SKBs for each rx ring. */
3611         for (i = 0; i < tp->rx_pending; i++) {
3612                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3613                                      -1, i) < 0)
3614                         break;
3615         }
3616
3617         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3618                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3619                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3620                                              -1, i) < 0)
3621                                 break;
3622                 }
3623         }
3624 }
3625
3626 /*
3627  * Must not be invoked with interrupt sources disabled and
3628  * the hardware shutdown down.
3629  */
3630 static void tg3_free_consistent(struct tg3 *tp)
3631 {
3632         if (tp->rx_std_buffers) {
3633                 kfree(tp->rx_std_buffers);
3634                 tp->rx_std_buffers = NULL;
3635         }
3636         if (tp->rx_std) {
3637                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3638                                     tp->rx_std, tp->rx_std_mapping);
3639                 tp->rx_std = NULL;
3640         }
3641         if (tp->rx_jumbo) {
3642                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3643                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3644                 tp->rx_jumbo = NULL;
3645         }
3646         if (tp->rx_rcb) {
3647                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3648                                     tp->rx_rcb, tp->rx_rcb_mapping);
3649                 tp->rx_rcb = NULL;
3650         }
3651         if (tp->tx_ring) {
3652                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3653                         tp->tx_ring, tp->tx_desc_mapping);
3654                 tp->tx_ring = NULL;
3655         }
3656         if (tp->hw_status) {
3657                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3658                                     tp->hw_status, tp->status_mapping);
3659                 tp->hw_status = NULL;
3660         }
3661         if (tp->hw_stats) {
3662                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3663                                     tp->hw_stats, tp->stats_mapping);
3664                 tp->hw_stats = NULL;
3665         }
3666 }
3667
3668 /*
3669  * Must not be invoked with interrupt sources disabled and
3670  * the hardware shutdown down.  Can sleep.
3671  */
3672 static int tg3_alloc_consistent(struct tg3 *tp)
3673 {
3674         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3675                                       (TG3_RX_RING_SIZE +
3676                                        TG3_RX_JUMBO_RING_SIZE)) +
3677                                      (sizeof(struct tx_ring_info) *
3678                                       TG3_TX_RING_SIZE),
3679                                      GFP_KERNEL);
3680         if (!tp->rx_std_buffers)
3681                 return -ENOMEM;
3682
3683         memset(tp->rx_std_buffers, 0,
3684                (sizeof(struct ring_info) *
3685                 (TG3_RX_RING_SIZE +
3686                  TG3_RX_JUMBO_RING_SIZE)) +
3687                (sizeof(struct tx_ring_info) *
3688                 TG3_TX_RING_SIZE));
3689
3690         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3691         tp->tx_buffers = (struct tx_ring_info *)
3692                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3693
3694         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3695                                           &tp->rx_std_mapping);
3696         if (!tp->rx_std)
3697                 goto err_out;
3698
3699         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3700                                             &tp->rx_jumbo_mapping);
3701
3702         if (!tp->rx_jumbo)
3703                 goto err_out;
3704
3705         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3706                                           &tp->rx_rcb_mapping);
3707         if (!tp->rx_rcb)
3708                 goto err_out;
3709
3710         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3711                                            &tp->tx_desc_mapping);
3712         if (!tp->tx_ring)
3713                 goto err_out;
3714
3715         tp->hw_status = pci_alloc_consistent(tp->pdev,
3716                                              TG3_HW_STATUS_SIZE,
3717                                              &tp->status_mapping);
3718         if (!tp->hw_status)
3719                 goto err_out;
3720
3721         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3722                                             sizeof(struct tg3_hw_stats),
3723                                             &tp->stats_mapping);
3724         if (!tp->hw_stats)
3725                 goto err_out;
3726
3727         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3728         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3729
3730         return 0;
3731
3732 err_out:
3733         tg3_free_consistent(tp);
3734         return -ENOMEM;
3735 }
3736
3737 #define MAX_WAIT_CNT 1000
3738
3739 /* To stop a block, clear the enable bit and poll till it
3740  * clears.  tp->lock is held.
3741  */
3742 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3743 {
3744         unsigned int i;
3745         u32 val;
3746
3747         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3748                 switch (ofs) {
3749                 case RCVLSC_MODE:
3750                 case DMAC_MODE:
3751                 case MBFREE_MODE:
3752                 case BUFMGR_MODE:
3753                 case MEMARB_MODE:
3754                         /* We can't enable/disable these bits of the
3755                          * 5705/5750, just say success.
3756                          */
3757                         return 0;
3758
3759                 default:
3760                         break;
3761                 };
3762         }
3763
3764         val = tr32(ofs);
3765         val &= ~enable_bit;
3766         tw32_f(ofs, val);
3767
3768         for (i = 0; i < MAX_WAIT_CNT; i++) {
3769                 udelay(100);
3770                 val = tr32(ofs);
3771                 if ((val & enable_bit) == 0)
3772                         break;
3773         }
3774
3775         if (i == MAX_WAIT_CNT && !silent) {
3776                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3777                        "ofs=%lx enable_bit=%x\n",
3778                        ofs, enable_bit);
3779                 return -ENODEV;
3780         }
3781
3782         return 0;
3783 }
3784
3785 /* tp->lock is held. */
3786 static int tg3_abort_hw(struct tg3 *tp, int silent)
3787 {
3788         int i, err;
3789
3790         tg3_disable_ints(tp);
3791
3792         tp->rx_mode &= ~RX_MODE_ENABLE;
3793         tw32_f(MAC_RX_MODE, tp->rx_mode);
3794         udelay(10);
3795
3796         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3797         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3798         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3799         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3800         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3801         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3802
3803         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3804         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3805         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3806         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3807         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3808         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3809         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3810
3811         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3812         tw32_f(MAC_MODE, tp->mac_mode);
3813         udelay(40);
3814
3815         tp->tx_mode &= ~TX_MODE_ENABLE;
3816         tw32_f(MAC_TX_MODE, tp->tx_mode);
3817
3818         for (i = 0; i < MAX_WAIT_CNT; i++) {
3819                 udelay(100);
3820                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3821                         break;
3822         }
3823         if (i >= MAX_WAIT_CNT) {
3824                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3825                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3826                        tp->dev->name, tr32(MAC_TX_MODE));
3827                 err |= -ENODEV;
3828         }
3829
3830         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3831         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3832         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3833
3834         tw32(FTQ_RESET, 0xffffffff);
3835         tw32(FTQ_RESET, 0x00000000);
3836
3837         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3838         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3839
3840         if (tp->hw_status)
3841                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3842         if (tp->hw_stats)
3843                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3844
3845         return err;
3846 }
3847
3848 /* tp->lock is held. */
3849 static int tg3_nvram_lock(struct tg3 *tp)
3850 {
3851         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3852                 int i;
3853
3854                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3855                 for (i = 0; i < 8000; i++) {
3856                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3857                                 break;
3858                         udelay(20);
3859                 }
3860                 if (i == 8000)
3861                         return -ENODEV;
3862         }
3863         return 0;
3864 }
3865
3866 /* tp->lock is held. */
3867 static void tg3_nvram_unlock(struct tg3 *tp)
3868 {
3869         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3870                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3871 }
3872
3873 /* tp->lock is held. */
3874 static void tg3_enable_nvram_access(struct tg3 *tp)
3875 {
3876         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3877             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3878                 u32 nvaccess = tr32(NVRAM_ACCESS);
3879
3880                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3881         }
3882 }
3883
3884 /* tp->lock is held. */
3885 static void tg3_disable_nvram_access(struct tg3 *tp)
3886 {
3887         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3888             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3889                 u32 nvaccess = tr32(NVRAM_ACCESS);
3890
3891                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3892         }
3893 }
3894
3895 /* tp->lock is held. */
3896 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3897 {
3898         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3899                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3900                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3901
3902         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3903                 switch (kind) {
3904                 case RESET_KIND_INIT:
3905                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3906                                       DRV_STATE_START);
3907                         break;
3908
3909                 case RESET_KIND_SHUTDOWN:
3910                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3911                                       DRV_STATE_UNLOAD);
3912                         break;
3913
3914                 case RESET_KIND_SUSPEND:
3915                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3916                                       DRV_STATE_SUSPEND);
3917                         break;
3918
3919                 default:
3920                         break;
3921                 };
3922         }
3923 }
3924
3925 /* tp->lock is held. */
3926 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3927 {
3928         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3929                 switch (kind) {
3930                 case RESET_KIND_INIT:
3931                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3932                                       DRV_STATE_START_DONE);
3933                         break;
3934
3935                 case RESET_KIND_SHUTDOWN:
3936                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3937                                       DRV_STATE_UNLOAD_DONE);
3938                         break;
3939
3940                 default:
3941                         break;
3942                 };
3943         }
3944 }
3945
3946 /* tp->lock is held. */
3947 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3948 {
3949         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3950                 switch (kind) {
3951                 case RESET_KIND_INIT:
3952                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3953                                       DRV_STATE_START);
3954                         break;
3955
3956                 case RESET_KIND_SHUTDOWN:
3957                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3958                                       DRV_STATE_UNLOAD);
3959                         break;
3960
3961                 case RESET_KIND_SUSPEND:
3962                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3963                                       DRV_STATE_SUSPEND);
3964                         break;
3965
3966                 default:
3967                         break;
3968                 };
3969         }
3970 }
3971
3972 static void tg3_stop_fw(struct tg3 *);
3973
3974 /* tp->lock is held. */
3975 static int tg3_chip_reset(struct tg3 *tp)
3976 {
3977         u32 val;
3978         u32 flags_save;
3979         int i;
3980
3981         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3982                 tg3_nvram_lock(tp);
3983
3984         /*
3985          * We must avoid the readl() that normally takes place.
3986          * It locks machines, causes machine checks, and other
3987          * fun things.  So, temporarily disable the 5701
3988          * hardware workaround, while we do the reset.
3989          */
3990         flags_save = tp->tg3_flags;
3991         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3992
3993         /* do the reset */
3994         val = GRC_MISC_CFG_CORECLK_RESET;
3995
3996         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3997                 if (tr32(0x7e2c) == 0x60) {
3998                         tw32(0x7e2c, 0x20);
3999                 }
4000                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4001                         tw32(GRC_MISC_CFG, (1 << 29));
4002                         val |= (1 << 29);
4003                 }
4004         }
4005
4006         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4007                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4008         tw32(GRC_MISC_CFG, val);
4009
4010         /* restore 5701 hardware bug workaround flag */
4011         tp->tg3_flags = flags_save;
4012
4013         /* Unfortunately, we have to delay before the PCI read back.
4014          * Some 575X chips even will not respond to a PCI cfg access
4015          * when the reset command is given to the chip.
4016          *
4017          * How do these hardware designers expect things to work
4018          * properly if the PCI write is posted for a long period
4019          * of time?  It is always necessary to have some method by
4020          * which a register read back can occur to push the write
4021          * out which does the reset.
4022          *
4023          * For most tg3 variants the trick below was working.
4024          * Ho hum...
4025          */
4026         udelay(120);
4027
4028         /* Flush PCI posted writes.  The normal MMIO registers
4029          * are inaccessible at this time so this is the only
4030          * way to make this reliably (actually, this is no longer
4031          * the case, see above).  I tried to use indirect
4032          * register read/write but this upset some 5701 variants.
4033          */
4034         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4035
4036         udelay(120);
4037
4038         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4039                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4040                         int i;
4041                         u32 cfg_val;
4042
4043                         /* Wait for link training to complete.  */
4044                         for (i = 0; i < 5000; i++)
4045                                 udelay(100);
4046
4047                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4048                         pci_write_config_dword(tp->pdev, 0xc4,
4049                                                cfg_val | (1 << 15));
4050                 }
4051                 /* Set PCIE max payload size and clear error status.  */
4052                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4053         }
4054
4055         /* Re-enable indirect register accesses. */
4056         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4057                                tp->misc_host_ctrl);
4058
4059         /* Set MAX PCI retry to zero. */
4060         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4061         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4062             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4063                 val |= PCISTATE_RETRY_SAME_DMA;
4064         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4065
4066         pci_restore_state(tp->pdev);
4067
4068         /* Make sure PCI-X relaxed ordering bit is clear. */
4069         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4070         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4071         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4072
4073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4074                 u32 val;
4075
4076                 /* Chip reset on 5780 will reset MSI enable bit,
4077                  * so need to restore it.
4078                  */
4079                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4080                         u16 ctrl;
4081
4082                         pci_read_config_word(tp->pdev,
4083                                              tp->msi_cap + PCI_MSI_FLAGS,
4084                                              &ctrl);
4085                         pci_write_config_word(tp->pdev,
4086                                               tp->msi_cap + PCI_MSI_FLAGS,
4087                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4088                         val = tr32(MSGINT_MODE);
4089                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4090                 }
4091
4092                 val = tr32(MEMARB_MODE);
4093                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4094
4095         } else
4096                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4097
4098         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4099                 tg3_stop_fw(tp);
4100                 tw32(0x5000, 0x400);
4101         }
4102
4103         tw32(GRC_MODE, tp->grc_mode);
4104
4105         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4106                 u32 val = tr32(0xc4);
4107
4108                 tw32(0xc4, val | (1 << 15));
4109         }
4110
4111         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4113                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4114                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4115                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4116                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4117         }
4118
4119         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4120                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4121                 tw32_f(MAC_MODE, tp->mac_mode);
4122         } else
4123                 tw32_f(MAC_MODE, 0);
4124         udelay(40);
4125
4126         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4127                 /* Wait for firmware initialization to complete. */
4128                 for (i = 0; i < 100000; i++) {
4129                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4130                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4131                                 break;
4132                         udelay(10);
4133                 }
4134                 if (i >= 100000) {
4135                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4136                                "firmware will not restart magic=%08x\n",
4137                                tp->dev->name, val);
4138                         return -ENODEV;
4139                 }
4140         }
4141
4142         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4143             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4144                 u32 val = tr32(0x7c00);
4145
4146                 tw32(0x7c00, val | (1 << 25));
4147         }
4148
4149         /* Reprobe ASF enable state.  */
4150         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4151         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4152         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4153         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4154                 u32 nic_cfg;
4155
4156                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4157                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4158                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4159                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4160                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4161                 }
4162         }
4163
4164         return 0;
4165 }
4166
4167 /* tp->lock is held. */
4168 static void tg3_stop_fw(struct tg3 *tp)
4169 {
4170         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4171                 u32 val;
4172                 int i;
4173
4174                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4175                 val = tr32(GRC_RX_CPU_EVENT);
4176                 val |= (1 << 14);
4177                 tw32(GRC_RX_CPU_EVENT, val);
4178
4179                 /* Wait for RX cpu to ACK the event.  */
4180                 for (i = 0; i < 100; i++) {
4181                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4182                                 break;
4183                         udelay(1);
4184                 }
4185         }
4186 }
4187
4188 /* tp->lock is held. */
4189 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4190 {
4191         int err;
4192
4193         tg3_stop_fw(tp);
4194
4195         tg3_write_sig_pre_reset(tp, kind);
4196
4197         tg3_abort_hw(tp, silent);
4198         err = tg3_chip_reset(tp);
4199
4200         tg3_write_sig_legacy(tp, kind);
4201         tg3_write_sig_post_reset(tp, kind);
4202
4203         if (err)
4204                 return err;
4205
4206         return 0;
4207 }
4208
4209 #define TG3_FW_RELEASE_MAJOR    0x0
4210 #define TG3_FW_RELASE_MINOR     0x0
4211 #define TG3_FW_RELEASE_FIX      0x0
4212 #define TG3_FW_START_ADDR       0x08000000
4213 #define TG3_FW_TEXT_ADDR        0x08000000
4214 #define TG3_FW_TEXT_LEN         0x9c0
4215 #define TG3_FW_RODATA_ADDR      0x080009c0
4216 #define TG3_FW_RODATA_LEN       0x60
4217 #define TG3_FW_DATA_ADDR        0x08000a40
4218 #define TG3_FW_DATA_LEN         0x20
4219 #define TG3_FW_SBSS_ADDR        0x08000a60
4220 #define TG3_FW_SBSS_LEN         0xc
4221 #define TG3_FW_BSS_ADDR         0x08000a70
4222 #define TG3_FW_BSS_LEN          0x10
4223
4224 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4225         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4226         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4227         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4228         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4229         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4230         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4231         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4232         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4233         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4234         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4235         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4236         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4237         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4238         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4239         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4240         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4241         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4242         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4243         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4244         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4245         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4246         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4247         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4248         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4249         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4250         0, 0, 0, 0, 0, 0,
4251         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4252         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4253         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4254         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4255         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4256         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4257         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4258         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4259         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4260         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4261         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4262         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4263         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4264         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4265         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4266         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4267         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4268         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4269         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4270         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4271         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4272         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4273         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4274         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4275         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4276         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4277         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4278         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4279         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4280         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4281         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4282         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4283         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4284         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4285         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4286         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4287         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4288         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4289         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4290         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4291         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4292         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4293         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4294         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4295         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4296         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4297         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4298         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4299         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4300         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4301         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4302         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4303         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4304         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4305         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4306         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4307         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4308         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4309         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4310         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4311         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4312         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4313         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4314         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4315         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4316 };
4317
4318 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4319         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4320         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4321         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4322         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4323         0x00000000
4324 };
4325
4326 #if 0 /* All zeros, don't eat up space with it. */
4327 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4328         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4329         0x00000000, 0x00000000, 0x00000000, 0x00000000
4330 };
4331 #endif
4332
4333 #define RX_CPU_SCRATCH_BASE     0x30000
4334 #define RX_CPU_SCRATCH_SIZE     0x04000
4335 #define TX_CPU_SCRATCH_BASE     0x34000
4336 #define TX_CPU_SCRATCH_SIZE     0x04000
4337
4338 /* tp->lock is held. */
4339 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4340 {
4341         int i;
4342
4343         if (offset == TX_CPU_BASE &&
4344             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4345                 BUG();
4346
4347         if (offset == RX_CPU_BASE) {
4348                 for (i = 0; i < 10000; i++) {
4349                         tw32(offset + CPU_STATE, 0xffffffff);
4350                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4351                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4352                                 break;
4353                 }
4354
4355                 tw32(offset + CPU_STATE, 0xffffffff);
4356                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4357                 udelay(10);
4358         } else {
4359                 for (i = 0; i < 10000; i++) {
4360                         tw32(offset + CPU_STATE, 0xffffffff);
4361                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4362                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4363                                 break;
4364                 }
4365         }
4366
4367         if (i >= 10000) {
4368                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4369                        "and %s CPU\n",
4370                        tp->dev->name,
4371                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4372                 return -ENODEV;
4373         }
4374         return 0;
4375 }
4376
4377 struct fw_info {
4378         unsigned int text_base;
4379         unsigned int text_len;
4380         u32 *text_data;
4381         unsigned int rodata_base;
4382         unsigned int rodata_len;
4383         u32 *rodata_data;
4384         unsigned int data_base;
4385         unsigned int data_len;
4386         u32 *data_data;
4387 };
4388
4389 /* tp->lock is held. */
4390 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4391                                  int cpu_scratch_size, struct fw_info *info)
4392 {
4393         int err, i;
4394         u32 orig_tg3_flags = tp->tg3_flags;
4395         void (*write_op)(struct tg3 *, u32, u32);
4396
4397         if (cpu_base == TX_CPU_BASE &&
4398             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4399                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4400                        "TX cpu firmware on %s which is 5705.\n",
4401                        tp->dev->name);
4402                 return -EINVAL;
4403         }
4404
4405         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4406                 write_op = tg3_write_mem;
4407         else
4408                 write_op = tg3_write_indirect_reg32;
4409
4410         /* Force use of PCI config space for indirect register
4411          * write calls.
4412          */
4413         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4414
4415         /* It is possible that bootcode is still loading at this point.
4416          * Get the nvram lock first before halting the cpu.
4417          */
4418         tg3_nvram_lock(tp);
4419         err = tg3_halt_cpu(tp, cpu_base);
4420         tg3_nvram_unlock(tp);
4421         if (err)
4422                 goto out;
4423
4424         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4425                 write_op(tp, cpu_scratch_base + i, 0);
4426         tw32(cpu_base + CPU_STATE, 0xffffffff);
4427         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4428         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4429                 write_op(tp, (cpu_scratch_base +
4430                               (info->text_base & 0xffff) +
4431                               (i * sizeof(u32))),
4432                          (info->text_data ?
4433                           info->text_data[i] : 0));
4434         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4435                 write_op(tp, (cpu_scratch_base +
4436                               (info->rodata_base & 0xffff) +
4437                               (i * sizeof(u32))),
4438                          (info->rodata_data ?
4439                           info->rodata_data[i] : 0));
4440         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4441                 write_op(tp, (cpu_scratch_base +
4442                               (info->data_base & 0xffff) +
4443                               (i * sizeof(u32))),
4444                          (info->data_data ?
4445                           info->data_data[i] : 0));
4446
4447         err = 0;
4448
4449 out:
4450         tp->tg3_flags = orig_tg3_flags;
4451         return err;
4452 }
4453
4454 /* tp->lock is held. */
4455 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4456 {
4457         struct fw_info info;
4458         int err, i;
4459
4460         info.text_base = TG3_FW_TEXT_ADDR;
4461         info.text_len = TG3_FW_TEXT_LEN;
4462         info.text_data = &tg3FwText[0];
4463         info.rodata_base = TG3_FW_RODATA_ADDR;
4464         info.rodata_len = TG3_FW_RODATA_LEN;
4465         info.rodata_data = &tg3FwRodata[0];
4466         info.data_base = TG3_FW_DATA_ADDR;
4467         info.data_len = TG3_FW_DATA_LEN;
4468         info.data_data = NULL;
4469
4470         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4471                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4472                                     &info);
4473         if (err)
4474                 return err;
4475
4476         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4477                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4478                                     &info);
4479         if (err)
4480                 return err;
4481
4482         /* Now startup only the RX cpu. */
4483         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4484         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4485
4486         for (i = 0; i < 5; i++) {
4487                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4488                         break;
4489                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4490                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4491                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4492                 udelay(1000);
4493         }
4494         if (i >= 5) {
4495                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4496                        "to set RX CPU PC, is %08x should be %08x\n",
4497                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4498                        TG3_FW_TEXT_ADDR);
4499                 return -ENODEV;
4500         }
4501         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4502         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4503
4504         return 0;
4505 }
4506
4507 #if TG3_TSO_SUPPORT != 0
4508
4509 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4510 #define TG3_TSO_FW_RELASE_MINOR         0x6
4511 #define TG3_TSO_FW_RELEASE_FIX          0x0
4512 #define TG3_TSO_FW_START_ADDR           0x08000000
4513 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4514 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4515 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4516 #define TG3_TSO_FW_RODATA_LEN           0x60
4517 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4518 #define TG3_TSO_FW_DATA_LEN             0x30
4519 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4520 #define TG3_TSO_FW_SBSS_LEN             0x2c
4521 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4522 #define TG3_TSO_FW_BSS_LEN              0x894
4523
4524 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4525         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4526         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4527         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4528         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4529         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4530         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4531         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4532         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4533         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4534         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4535         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4536         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4537         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4538         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4539         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4540         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4541         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4542         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4543         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4544         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4545         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4546         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4547         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4548         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4549         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4550         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4551         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4552         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4553         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4554         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4555         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4556         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4557         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4558         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4559         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4560         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4561         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4562         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4563         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4564         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4565         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4566         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4567         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4568         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4569         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4570         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4571         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4572         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4573         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4574         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4575         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4576         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4577         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4578         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4579         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4580         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4581         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4582         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4583         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4584         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4585         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4586         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4587         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4588         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4589         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4590         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4591         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4592         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4593         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4594         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4595         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4596         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4597         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4598         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4599         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4600         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4601         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4602         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4603         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4604         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4605         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4606         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4607         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4608         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4609         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4610         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4611         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4612         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4613         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4614         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4615         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4616         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4617         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4618         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4619         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4620         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4621         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4622         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4623         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4624         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4625         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4626         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4627         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4628         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4629         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4630         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4631         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4632         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4633         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4634         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4635         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4636         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4637         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4638         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4639         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4640         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4641         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4642         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4643         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4644         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4645         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4646         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4647         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4648         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4649         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4650         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4651         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4652         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4653         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4654         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4655         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4656         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4657         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4658         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4659         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4660         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4661         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4662         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4663         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4664         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4665         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4666         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4667         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4668         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4669         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4670         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4671         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4672         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4673         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4674         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4675         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4676         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4677         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4678         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4679         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4680         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4681         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4682         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4683         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4684         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4685         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4686         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4687         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4688         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4689         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4690         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4691         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4692         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4693         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4694         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4695         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4696         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4697         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4698         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4699         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4700         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4701         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4702         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4703         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4704         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4705         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4706         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4707         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4708         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4709         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4710         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4711         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4712         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4713         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4714         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4715         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4716         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4717         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4718         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4719         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4720         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4721         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4722         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4723         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4724         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4725         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4726         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4727         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4728         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4729         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4730         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4731         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4732         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4733         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4734         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4735         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4736         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4737         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4738         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4739         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4740         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4741         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4742         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4743         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4744         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4745         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4746         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4747         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4748         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4749         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4750         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4751         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4752         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4753         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4754         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4755         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4756         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4757         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4758         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4759         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4760         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4761         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4762         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4763         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4764         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4765         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4766         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4767         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4768         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4769         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4770         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4771         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4772         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4773         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4774         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4775         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4776         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4777         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4778         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4779         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4780         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4781         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4782         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4783         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4784         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4785         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4786         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4787         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4788         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4789         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4790         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4791         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4792         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4793         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4794         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4795         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4796         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4797         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4798         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4799         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4800         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4801         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4802         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4803         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4804         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4805         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4806         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4807         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4808         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4809 };
4810
4811 static u32 tg3TsoFwRodata[] = {
4812         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4813         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4814         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4815         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4816         0x00000000,
4817 };
4818
4819 static u32 tg3TsoFwData[] = {
4820         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4821         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4822         0x00000000,
4823 };
4824
4825 /* 5705 needs a special version of the TSO firmware.  */
4826 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4827 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4828 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4829 #define TG3_TSO5_FW_START_ADDR          0x00010000
4830 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4831 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4832 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4833 #define TG3_TSO5_FW_RODATA_LEN          0x50
4834 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4835 #define TG3_TSO5_FW_DATA_LEN            0x20
4836 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4837 #define TG3_TSO5_FW_SBSS_LEN            0x28
4838 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4839 #define TG3_TSO5_FW_BSS_LEN             0x88
4840
4841 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4842         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4843         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4844         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4845         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4846         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4847         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4848         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4849         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4850         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4851         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4852         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4853         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4854         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4855         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4856         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4857         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4858         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4859         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4860         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4861         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4862         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4863         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4864         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4865         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4866         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4867         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4868         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4869         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4870         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4871         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4872         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4873         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4874         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4875         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4876         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4877         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4878         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4879         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4880         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4881         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4882         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4883         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4884         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4885         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4886         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4887         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4888         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4889         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4890         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4891         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4892         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4893         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4894         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4895         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4896         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4897         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4898         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4899         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4900         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4901         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4902         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4903         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4904         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4905         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4906         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4907         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4908         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4909         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4910         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4911         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4912         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4913         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4914         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4915         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4916         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4917         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4918         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4919         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4920         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4921         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4922         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4923         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4924         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4925         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4926         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4927         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4928         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4929         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4930         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4931         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4932         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4933         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4934         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4935         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4936         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4937         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4938         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4939         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4940         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4941         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4942         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4943         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4944         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4945         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4946         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4947         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4948         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4949         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4950         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4951         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4952         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4953         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4954         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4955         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4956         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4957         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4958         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4959         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4960         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4961         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4962         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4963         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4964         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4965         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4966         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4967         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4968         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4969         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4970         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4971         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4972         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4973         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4974         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4975         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4976         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4977         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4978         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4979         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4980         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4981         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4982         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4983         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4984         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4985         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4986         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4987         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4988         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4989         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4990         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4991         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4992         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4993         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4994         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4995         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4996         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4997         0x00000000, 0x00000000, 0x00000000,
4998 };
4999
5000 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5001         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5002         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5003         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5004         0x00000000, 0x00000000, 0x00000000,
5005 };
5006
5007 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5008         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5009         0x00000000, 0x00000000, 0x00000000,
5010 };
5011
5012 /* tp->lock is held. */
5013 static int tg3_load_tso_firmware(struct tg3 *tp)
5014 {
5015         struct fw_info info;
5016         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5017         int err, i;
5018
5019         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5020                 return 0;
5021
5022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5023                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5024                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5025                 info.text_data = &tg3Tso5FwText[0];
5026                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5027                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5028                 info.rodata_data = &tg3Tso5FwRodata[0];
5029                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5030                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5031                 info.data_data = &tg3Tso5FwData[0];
5032                 cpu_base = RX_CPU_BASE;
5033                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5034                 cpu_scratch_size = (info.text_len +
5035                                     info.rodata_len +
5036                                     info.data_len +
5037                                     TG3_TSO5_FW_SBSS_LEN +
5038                                     TG3_TSO5_FW_BSS_LEN);
5039         } else {
5040                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5041                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5042                 info.text_data = &tg3TsoFwText[0];
5043                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5044                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5045                 info.rodata_data = &tg3TsoFwRodata[0];
5046                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5047                 info.data_len = TG3_TSO_FW_DATA_LEN;
5048                 info.data_data = &tg3TsoFwData[0];
5049                 cpu_base = TX_CPU_BASE;
5050                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5051                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5052         }
5053
5054         err = tg3_load_firmware_cpu(tp, cpu_base,
5055                                     cpu_scratch_base, cpu_scratch_size,
5056                                     &info);
5057         if (err)
5058                 return err;
5059
5060         /* Now startup the cpu. */
5061         tw32(cpu_base + CPU_STATE, 0xffffffff);
5062         tw32_f(cpu_base + CPU_PC,    info.text_base);
5063
5064         for (i = 0; i < 5; i++) {
5065                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5066                         break;
5067                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5068                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5069                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5070                 udelay(1000);
5071         }
5072         if (i >= 5) {
5073                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5074                        "to set CPU PC, is %08x should be %08x\n",
5075                        tp->dev->name, tr32(cpu_base + CPU_PC),
5076                        info.text_base);
5077                 return -ENODEV;
5078         }
5079         tw32(cpu_base + CPU_STATE, 0xffffffff);
5080         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5081         return 0;
5082 }
5083
5084 #endif /* TG3_TSO_SUPPORT != 0 */
5085
5086 /* tp->lock is held. */
5087 static void __tg3_set_mac_addr(struct tg3 *tp)
5088 {
5089         u32 addr_high, addr_low;
5090         int i;
5091
5092         addr_high = ((tp->dev->dev_addr[0] << 8) |
5093                      tp->dev->dev_addr[1]);
5094         addr_low = ((tp->dev->dev_addr[2] << 24) |
5095                     (tp->dev->dev_addr[3] << 16) |
5096                     (tp->dev->dev_addr[4] <<  8) |
5097                     (tp->dev->dev_addr[5] <<  0));
5098         for (i = 0; i < 4; i++) {
5099                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5100                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5101         }
5102
5103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5105                 for (i = 0; i < 12; i++) {
5106                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5107                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5108                 }
5109         }
5110
5111         addr_high = (tp->dev->dev_addr[0] +
5112                      tp->dev->dev_addr[1] +
5113                      tp->dev->dev_addr[2] +
5114                      tp->dev->dev_addr[3] +
5115                      tp->dev->dev_addr[4] +
5116                      tp->dev->dev_addr[5]) &
5117                 TX_BACKOFF_SEED_MASK;
5118         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5119 }
5120
5121 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5122 {
5123         struct tg3 *tp = netdev_priv(dev);
5124         struct sockaddr *addr = p;
5125
5126         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5127
5128         spin_lock_bh(&tp->lock);
5129         __tg3_set_mac_addr(tp);
5130         spin_unlock_bh(&tp->lock);
5131
5132         return 0;
5133 }
5134
5135 /* tp->lock is held. */
5136 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5137                            dma_addr_t mapping, u32 maxlen_flags,
5138                            u32 nic_addr)
5139 {
5140         tg3_write_mem(tp,
5141                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5142                       ((u64) mapping >> 32));
5143         tg3_write_mem(tp,
5144                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5145                       ((u64) mapping & 0xffffffff));
5146         tg3_write_mem(tp,
5147                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5148                        maxlen_flags);
5149
5150         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5151                 tg3_write_mem(tp,
5152                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5153                               nic_addr);
5154 }
5155
5156 static void __tg3_set_rx_mode(struct net_device *);
5157 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5158 {
5159         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5160         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5161         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5162         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5163         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5164                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5165                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5166         }
5167         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5168         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5169         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5170                 u32 val = ec->stats_block_coalesce_usecs;
5171
5172                 if (!netif_carrier_ok(tp->dev))
5173                         val = 0;
5174
5175                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5176         }
5177 }
5178
5179 /* tp->lock is held. */
5180 static int tg3_reset_hw(struct tg3 *tp)
5181 {
5182         u32 val, rdmac_mode;
5183         int i, err, limit;
5184
5185         tg3_disable_ints(tp);
5186
5187         tg3_stop_fw(tp);
5188
5189         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5190
5191         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5192                 tg3_abort_hw(tp, 1);
5193         }
5194
5195         err = tg3_chip_reset(tp);
5196         if (err)
5197                 return err;
5198
5199         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5200
5201         /* This works around an issue with Athlon chipsets on
5202          * B3 tigon3 silicon.  This bit has no effect on any
5203          * other revision.  But do not set this on PCI Express
5204          * chips.
5205          */
5206         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5207                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5208         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5209
5210         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5211             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5212                 val = tr32(TG3PCI_PCISTATE);
5213                 val |= PCISTATE_RETRY_SAME_DMA;
5214                 tw32(TG3PCI_PCISTATE, val);
5215         }
5216
5217         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5218                 /* Enable some hw fixes.  */
5219                 val = tr32(TG3PCI_MSI_DATA);
5220                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5221                 tw32(TG3PCI_MSI_DATA, val);
5222         }
5223
5224         /* Descriptor ring init may make accesses to the
5225          * NIC SRAM area to setup the TX descriptors, so we
5226          * can only do this after the hardware has been
5227          * successfully reset.
5228          */
5229         tg3_init_rings(tp);
5230
5231         /* This value is determined during the probe time DMA
5232          * engine test, tg3_test_dma.
5233          */
5234         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5235
5236         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5237                           GRC_MODE_4X_NIC_SEND_RINGS |
5238                           GRC_MODE_NO_TX_PHDR_CSUM |
5239                           GRC_MODE_NO_RX_PHDR_CSUM);
5240         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5241         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5242                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5243         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5244                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5245
5246         tw32(GRC_MODE,
5247              tp->grc_mode |
5248              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5249
5250         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5251         val = tr32(GRC_MISC_CFG);
5252         val &= ~0xff;
5253         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5254         tw32(GRC_MISC_CFG, val);
5255
5256         /* Initialize MBUF/DESC pool. */
5257         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5258                 /* Do nothing.  */
5259         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5260                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5261                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5262                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5263                 else
5264                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5265                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5266                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5267         }
5268 #if TG3_TSO_SUPPORT != 0
5269         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5270                 int fw_len;
5271
5272                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5273                           TG3_TSO5_FW_RODATA_LEN +
5274                           TG3_TSO5_FW_DATA_LEN +
5275                           TG3_TSO5_FW_SBSS_LEN +
5276                           TG3_TSO5_FW_BSS_LEN);
5277                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5278                 tw32(BUFMGR_MB_POOL_ADDR,
5279                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5280                 tw32(BUFMGR_MB_POOL_SIZE,
5281                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5282         }
5283 #endif
5284
5285         if (tp->dev->mtu <= ETH_DATA_LEN) {
5286                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5287                      tp->bufmgr_config.mbuf_read_dma_low_water);
5288                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5289                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5290                 tw32(BUFMGR_MB_HIGH_WATER,
5291                      tp->bufmgr_config.mbuf_high_water);
5292         } else {
5293                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5294                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5295                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5296                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5297                 tw32(BUFMGR_MB_HIGH_WATER,
5298                      tp->bufmgr_config.mbuf_high_water_jumbo);
5299         }
5300         tw32(BUFMGR_DMA_LOW_WATER,
5301              tp->bufmgr_config.dma_low_water);
5302         tw32(BUFMGR_DMA_HIGH_WATER,
5303              tp->bufmgr_config.dma_high_water);
5304
5305         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5306         for (i = 0; i < 2000; i++) {
5307                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5308                         break;
5309                 udelay(10);
5310         }
5311         if (i >= 2000) {
5312                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5313                        tp->dev->name);
5314                 return -ENODEV;
5315         }
5316
5317         /* Setup replenish threshold. */
5318         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5319
5320         /* Initialize TG3_BDINFO's at:
5321          *  RCVDBDI_STD_BD:     standard eth size rx ring
5322          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5323          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5324          *
5325          * like so:
5326          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5327          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5328          *                              ring attribute flags
5329          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5330          *
5331          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5332          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5333          *
5334          * The size of each ring is fixed in the firmware, but the location is
5335          * configurable.
5336          */
5337         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5338              ((u64) tp->rx_std_mapping >> 32));
5339         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5340              ((u64) tp->rx_std_mapping & 0xffffffff));
5341         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5342              NIC_SRAM_RX_BUFFER_DESC);
5343
5344         /* Don't even try to program the JUMBO/MINI buffer descriptor
5345          * configs on 5705.
5346          */
5347         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5348                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5349                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5350         } else {
5351                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5352                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5353
5354                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5355                      BDINFO_FLAGS_DISABLED);
5356
5357                 /* Setup replenish threshold. */
5358                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5359
5360                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5361                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5362                              ((u64) tp->rx_jumbo_mapping >> 32));
5363                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5364                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5365                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5366                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5367                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5368                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5369                 } else {
5370                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5371                              BDINFO_FLAGS_DISABLED);
5372                 }
5373
5374         }
5375
5376         /* There is only one send ring on 5705/5750, no need to explicitly
5377          * disable the others.
5378          */
5379         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5380                 /* Clear out send RCB ring in SRAM. */
5381                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5382                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5383                                       BDINFO_FLAGS_DISABLED);
5384         }
5385
5386         tp->tx_prod = 0;
5387         tp->tx_cons = 0;
5388         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5389         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5390
5391         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5392                        tp->tx_desc_mapping,
5393                        (TG3_TX_RING_SIZE <<
5394                         BDINFO_FLAGS_MAXLEN_SHIFT),
5395                        NIC_SRAM_TX_BUFFER_DESC);
5396
5397         /* There is only one receive return ring on 5705/5750, no need
5398          * to explicitly disable the others.
5399          */
5400         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5401                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5402                      i += TG3_BDINFO_SIZE) {
5403                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5404                                       BDINFO_FLAGS_DISABLED);
5405                 }
5406         }
5407
5408         tp->rx_rcb_ptr = 0;
5409         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5410
5411         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5412                        tp->rx_rcb_mapping,
5413                        (TG3_RX_RCB_RING_SIZE(tp) <<
5414                         BDINFO_FLAGS_MAXLEN_SHIFT),
5415                        0);
5416
5417         tp->rx_std_ptr = tp->rx_pending;
5418         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5419                      tp->rx_std_ptr);
5420
5421         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5422                                                 tp->rx_jumbo_pending : 0;
5423         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5424                      tp->rx_jumbo_ptr);
5425
5426         /* Initialize MAC address and backoff seed. */
5427         __tg3_set_mac_addr(tp);
5428
5429         /* MTU + ethernet header + FCS + optional VLAN tag */
5430         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5431
5432         /* The slot time is changed by tg3_setup_phy if we
5433          * run at gigabit with half duplex.
5434          */
5435         tw32(MAC_TX_LENGTHS,
5436              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5437              (6 << TX_LENGTHS_IPG_SHIFT) |
5438              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5439
5440         /* Receive rules. */
5441         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5442         tw32(RCVLPC_CONFIG, 0x0181);
5443
5444         /* Calculate RDMAC_MODE setting early, we need it to determine
5445          * the RCVLPC_STATE_ENABLE mask.
5446          */
5447         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5448                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5449                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5450                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5451                       RDMAC_MODE_LNGREAD_ENAB);
5452         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5453                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5454
5455         /* If statement applies to 5705 and 5750 PCI devices only */
5456         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5457              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5458             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5459                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5460                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5461                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5462                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5463                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5464                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5465                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5466                 }
5467         }
5468
5469         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5470                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5471
5472 #if TG3_TSO_SUPPORT != 0
5473         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5474                 rdmac_mode |= (1 << 27);
5475 #endif
5476
5477         /* Receive/send statistics. */
5478         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5479             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5480                 val = tr32(RCVLPC_STATS_ENABLE);
5481                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5482                 tw32(RCVLPC_STATS_ENABLE, val);
5483         } else {
5484                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5485         }
5486         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5487         tw32(SNDDATAI_STATSENAB, 0xffffff);
5488         tw32(SNDDATAI_STATSCTRL,
5489              (SNDDATAI_SCTRL_ENABLE |
5490               SNDDATAI_SCTRL_FASTUPD));
5491
5492         /* Setup host coalescing engine. */
5493         tw32(HOSTCC_MODE, 0);
5494         for (i = 0; i < 2000; i++) {
5495                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5496                         break;
5497                 udelay(10);
5498         }
5499
5500         __tg3_set_coalesce(tp, &tp->coal);
5501
5502         /* set status block DMA address */
5503         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5504              ((u64) tp->status_mapping >> 32));
5505         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5506              ((u64) tp->status_mapping & 0xffffffff));
5507
5508         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5509                 /* Status/statistics block address.  See tg3_timer,
5510                  * the tg3_periodic_fetch_stats call there, and
5511                  * tg3_get_stats to see how this works for 5705/5750 chips.
5512                  */
5513                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5514                      ((u64) tp->stats_mapping >> 32));
5515                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5516                      ((u64) tp->stats_mapping & 0xffffffff));
5517                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5518                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5519         }
5520
5521         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5522
5523         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5524         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5525         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5526                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5527
5528         /* Clear statistics/status block in chip, and status block in ram. */
5529         for (i = NIC_SRAM_STATS_BLK;
5530              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5531              i += sizeof(u32)) {
5532                 tg3_write_mem(tp, i, 0);
5533                 udelay(40);
5534         }
5535         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5536
5537         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5538                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5539         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5540         udelay(40);
5541
5542         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5543          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5544          * register to preserve the GPIO settings for LOMs. The GPIOs,
5545          * whether used as inputs or outputs, are set by boot code after
5546          * reset.
5547          */
5548         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5549                 u32 gpio_mask;
5550
5551                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5552                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5553
5554                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5555                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5556                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5557
5558                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5559
5560                 /* GPIO1 must be driven high for eeprom write protect */
5561                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5562                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5563         }
5564         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5565         udelay(100);
5566
5567         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5568         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5569         tp->last_tag = 0;
5570
5571         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5572                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5573                 udelay(40);
5574         }
5575
5576         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5577                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5578                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5579                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5580                WDMAC_MODE_LNGREAD_ENAB);
5581
5582         /* If statement applies to 5705 and 5750 PCI devices only */
5583         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5584              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5585             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5586                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5587                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5588                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5589                         /* nothing */
5590                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5591                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5592                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5593                         val |= WDMAC_MODE_RX_ACCEL;
5594                 }
5595         }
5596
5597         tw32_f(WDMAC_MODE, val);
5598         udelay(40);
5599
5600         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5601                 val = tr32(TG3PCI_X_CAPS);
5602                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5603                         val &= ~PCIX_CAPS_BURST_MASK;
5604                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5605                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5606                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5607                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5608                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5609                                 val |= (tp->split_mode_max_reqs <<
5610                                         PCIX_CAPS_SPLIT_SHIFT);
5611                 }
5612                 tw32(TG3PCI_X_CAPS, val);
5613         }
5614
5615         tw32_f(RDMAC_MODE, rdmac_mode);
5616         udelay(40);
5617
5618         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5619         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5620                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5621         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5622         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5623         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5624         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5625         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5626 #if TG3_TSO_SUPPORT != 0
5627         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5628                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5629 #endif
5630         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5631         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5632
5633         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5634                 err = tg3_load_5701_a0_firmware_fix(tp);
5635                 if (err)
5636                         return err;
5637         }
5638
5639 #if TG3_TSO_SUPPORT != 0
5640         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5641                 err = tg3_load_tso_firmware(tp);
5642                 if (err)
5643                         return err;
5644         }
5645 #endif
5646
5647         tp->tx_mode = TX_MODE_ENABLE;
5648         tw32_f(MAC_TX_MODE, tp->tx_mode);
5649         udelay(100);
5650
5651         tp->rx_mode = RX_MODE_ENABLE;
5652         tw32_f(MAC_RX_MODE, tp->rx_mode);
5653         udelay(10);
5654
5655         if (tp->link_config.phy_is_low_power) {
5656                 tp->link_config.phy_is_low_power = 0;
5657                 tp->link_config.speed = tp->link_config.orig_speed;
5658                 tp->link_config.duplex = tp->link_config.orig_duplex;
5659                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5660         }
5661
5662         tp->mi_mode = MAC_MI_MODE_BASE;
5663         tw32_f(MAC_MI_MODE, tp->mi_mode);
5664         udelay(80);
5665
5666         tw32(MAC_LED_CTRL, tp->led_ctrl);
5667
5668         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5669         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5670                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5671                 udelay(10);
5672         }
5673         tw32_f(MAC_RX_MODE, tp->rx_mode);
5674         udelay(10);
5675
5676         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5677                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5678                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5679                         /* Set drive transmission level to 1.2V  */
5680                         /* only if the signal pre-emphasis bit is not set  */
5681                         val = tr32(MAC_SERDES_CFG);
5682                         val &= 0xfffff000;
5683                         val |= 0x880;
5684                         tw32(MAC_SERDES_CFG, val);
5685                 }
5686                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5687                         tw32(MAC_SERDES_CFG, 0x616000);
5688         }
5689
5690         /* Prevent chip from dropping frames when flow control
5691          * is enabled.
5692          */
5693         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5694
5695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5696             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5697                 /* Use hardware link auto-negotiation */
5698                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5699         }
5700
5701         err = tg3_setup_phy(tp, 1);
5702         if (err)
5703                 return err;
5704
5705         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5706                 u32 tmp;
5707
5708                 /* Clear CRC stats. */
5709                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5710                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5711                         tg3_readphy(tp, 0x14, &tmp);
5712                 }
5713         }
5714
5715         __tg3_set_rx_mode(tp->dev);
5716
5717         /* Initialize receive rules. */
5718         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5719         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5720         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5721         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5722
5723         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5724             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5725                 limit = 8;
5726         else
5727                 limit = 16;
5728         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5729                 limit -= 4;
5730         switch (limit) {
5731         case 16:
5732                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5733         case 15:
5734                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5735         case 14:
5736                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5737         case 13:
5738                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5739         case 12:
5740                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5741         case 11:
5742                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5743         case 10:
5744                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5745         case 9:
5746                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5747         case 8:
5748                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5749         case 7:
5750                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5751         case 6:
5752                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5753         case 5:
5754                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5755         case 4:
5756                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5757         case 3:
5758                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5759         case 2:
5760         case 1:
5761
5762         default:
5763                 break;
5764         };
5765
5766         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5767
5768         return 0;
5769 }
5770
5771 /* Called at device open time to get the chip ready for
5772  * packet processing.  Invoked with tp->lock held.
5773  */
5774 static int tg3_init_hw(struct tg3 *tp)
5775 {
5776         int err;
5777
5778         /* Force the chip into D0. */
5779         err = tg3_set_power_state(tp, 0);
5780         if (err)
5781                 goto out;
5782
5783         tg3_switch_clocks(tp);
5784
5785         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5786
5787         err = tg3_reset_hw(tp);
5788
5789 out:
5790         return err;
5791 }
5792
5793 #define TG3_STAT_ADD32(PSTAT, REG) \
5794 do {    u32 __val = tr32(REG); \
5795         (PSTAT)->low += __val; \
5796         if ((PSTAT)->low < __val) \
5797                 (PSTAT)->high += 1; \
5798 } while (0)
5799
5800 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5801 {
5802         struct tg3_hw_stats *sp = tp->hw_stats;
5803
5804         if (!netif_carrier_ok(tp->dev))
5805                 return;
5806
5807         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5808         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5809         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5810         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5811         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5812         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5813         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5814         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5815         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5816         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5817         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5818         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5819         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5820
5821         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5822         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5823         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5824         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5825         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5826         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5827         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5828         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5829         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5830         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5831         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5832         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5833         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5834         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5835 }
5836
5837 static void tg3_timer(unsigned long __opaque)
5838 {
5839         struct tg3 *tp = (struct tg3 *) __opaque;
5840
5841         spin_lock(&tp->lock);
5842
5843         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5844                 /* All of this garbage is because when using non-tagged
5845                  * IRQ status the mailbox/status_block protocol the chip
5846                  * uses with the cpu is race prone.
5847                  */
5848                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5849                         tw32(GRC_LOCAL_CTRL,
5850                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5851                 } else {
5852                         tw32(HOSTCC_MODE, tp->coalesce_mode |
5853                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5854                 }
5855
5856                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5857                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5858                         spin_unlock(&tp->lock);
5859                         schedule_work(&tp->reset_task);
5860                         return;
5861                 }
5862         }
5863
5864         /* This part only runs once per second. */
5865         if (!--tp->timer_counter) {
5866                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5867                         tg3_periodic_fetch_stats(tp);
5868
5869                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5870                         u32 mac_stat;
5871                         int phy_event;
5872
5873                         mac_stat = tr32(MAC_STATUS);
5874
5875                         phy_event = 0;
5876                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5877                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5878                                         phy_event = 1;
5879                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5880                                 phy_event = 1;
5881
5882                         if (phy_event)
5883                                 tg3_setup_phy(tp, 0);
5884                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5885                         u32 mac_stat = tr32(MAC_STATUS);
5886                         int need_setup = 0;
5887
5888                         if (netif_carrier_ok(tp->dev) &&
5889                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5890                                 need_setup = 1;
5891                         }
5892                         if (! netif_carrier_ok(tp->dev) &&
5893                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5894                                          MAC_STATUS_SIGNAL_DET))) {
5895                                 need_setup = 1;
5896                         }
5897                         if (need_setup) {
5898                                 tw32_f(MAC_MODE,
5899                                      (tp->mac_mode &
5900                                       ~MAC_MODE_PORT_MODE_MASK));
5901                                 udelay(40);
5902                                 tw32_f(MAC_MODE, tp->mac_mode);
5903                                 udelay(40);
5904                                 tg3_setup_phy(tp, 0);
5905                         }
5906                 }
5907
5908                 tp->timer_counter = tp->timer_multiplier;
5909         }
5910
5911         /* Heartbeat is only sent once every 120 seconds.  */
5912         if (!--tp->asf_counter) {
5913                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5914                         u32 val;
5915
5916                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5917                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5918                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5919                         val = tr32(GRC_RX_CPU_EVENT);
5920                         val |= (1 << 14);
5921                         tw32(GRC_RX_CPU_EVENT, val);
5922                 }
5923                 tp->asf_counter = tp->asf_multiplier;
5924         }
5925
5926         spin_unlock(&tp->lock);
5927
5928         tp->timer.expires = jiffies + tp->timer_offset;
5929         add_timer(&tp->timer);
5930 }
5931
5932 static int tg3_test_interrupt(struct tg3 *tp)
5933 {
5934         struct net_device *dev = tp->dev;
5935         int err, i;
5936         u32 int_mbox = 0;
5937
5938         if (!netif_running(dev))
5939                 return -ENODEV;
5940
5941         tg3_disable_ints(tp);
5942
5943         free_irq(tp->pdev->irq, dev);
5944
5945         err = request_irq(tp->pdev->irq, tg3_test_isr,
5946                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5947         if (err)
5948                 return err;
5949
5950         tg3_enable_ints(tp);
5951
5952         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5953                HOSTCC_MODE_NOW);
5954
5955         for (i = 0; i < 5; i++) {
5956                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5957                 if (int_mbox != 0)
5958                         break;
5959                 msleep(10);
5960         }
5961
5962         tg3_disable_ints(tp);
5963
5964         free_irq(tp->pdev->irq, dev);
5965         
5966         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5967                 err = request_irq(tp->pdev->irq, tg3_msi,
5968                                   SA_SAMPLE_RANDOM, dev->name, dev);
5969         else {
5970                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5971                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5972                         fn = tg3_interrupt_tagged;
5973                 err = request_irq(tp->pdev->irq, fn,
5974                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5975         }
5976
5977         if (err)
5978                 return err;
5979
5980         if (int_mbox != 0)
5981                 return 0;
5982
5983         return -EIO;
5984 }
5985
5986 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5987  * successfully restored
5988  */
5989 static int tg3_test_msi(struct tg3 *tp)
5990 {
5991         struct net_device *dev = tp->dev;
5992         int err;
5993         u16 pci_cmd;
5994
5995         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5996                 return 0;
5997
5998         /* Turn off SERR reporting in case MSI terminates with Master
5999          * Abort.
6000          */
6001         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6002         pci_write_config_word(tp->pdev, PCI_COMMAND,
6003                               pci_cmd & ~PCI_COMMAND_SERR);
6004
6005         err = tg3_test_interrupt(tp);
6006
6007         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6008
6009         if (!err)
6010                 return 0;
6011
6012         /* other failures */
6013         if (err != -EIO)
6014                 return err;
6015
6016         /* MSI test failed, go back to INTx mode */
6017         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6018                "switching to INTx mode. Please report this failure to "
6019                "the PCI maintainer and include system chipset information.\n",
6020                        tp->dev->name);
6021
6022         free_irq(tp->pdev->irq, dev);
6023         pci_disable_msi(tp->pdev);
6024
6025         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6026
6027         {
6028                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6029                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6030                         fn = tg3_interrupt_tagged;
6031
6032                 err = request_irq(tp->pdev->irq, fn,
6033                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6034         }
6035         if (err)
6036                 return err;
6037
6038         /* Need to reset the chip because the MSI cycle may have terminated
6039          * with Master Abort.
6040          */
6041         tg3_full_lock(tp, 1);
6042
6043         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6044         err = tg3_init_hw(tp);
6045
6046         tg3_full_unlock(tp);
6047
6048         if (err)
6049                 free_irq(tp->pdev->irq, dev);
6050
6051         return err;
6052 }
6053
6054 static int tg3_open(struct net_device *dev)
6055 {
6056         struct tg3 *tp = netdev_priv(dev);
6057         int err;
6058
6059         tg3_full_lock(tp, 0);
6060
6061         tg3_disable_ints(tp);
6062         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6063
6064         tg3_full_unlock(tp);
6065
6066         /* The placement of this call is tied
6067          * to the setup and use of Host TX descriptors.
6068          */
6069         err = tg3_alloc_consistent(tp);
6070         if (err)
6071                 return err;
6072
6073         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6074             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6075             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6076                 /* All MSI supporting chips should support tagged
6077                  * status.  Assert that this is the case.
6078                  */
6079                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6080                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6081                                "Not using MSI.\n", tp->dev->name);
6082                 } else if (pci_enable_msi(tp->pdev) == 0) {
6083                         u32 msi_mode;
6084
6085                         msi_mode = tr32(MSGINT_MODE);
6086                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6087                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6088                 }
6089         }
6090         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6091                 err = request_irq(tp->pdev->irq, tg3_msi,
6092                                   SA_SAMPLE_RANDOM, dev->name, dev);
6093         else {
6094                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6095                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6096                         fn = tg3_interrupt_tagged;
6097
6098                 err = request_irq(tp->pdev->irq, fn,
6099                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6100         }
6101
6102         if (err) {
6103                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6104                         pci_disable_msi(tp->pdev);
6105                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6106                 }
6107                 tg3_free_consistent(tp);
6108                 return err;
6109         }
6110
6111         tg3_full_lock(tp, 0);
6112
6113         err = tg3_init_hw(tp);
6114         if (err) {
6115                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6116                 tg3_free_rings(tp);
6117         } else {
6118                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6119                         tp->timer_offset = HZ;
6120                 else
6121                         tp->timer_offset = HZ / 10;
6122
6123                 BUG_ON(tp->timer_offset > HZ);
6124                 tp->timer_counter = tp->timer_multiplier =
6125                         (HZ / tp->timer_offset);
6126                 tp->asf_counter = tp->asf_multiplier =
6127                         ((HZ / tp->timer_offset) * 120);
6128
6129                 init_timer(&tp->timer);
6130                 tp->timer.expires = jiffies + tp->timer_offset;
6131                 tp->timer.data = (unsigned long) tp;
6132                 tp->timer.function = tg3_timer;
6133         }
6134
6135         tg3_full_unlock(tp);
6136
6137         if (err) {
6138                 free_irq(tp->pdev->irq, dev);
6139                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6140                         pci_disable_msi(tp->pdev);
6141                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6142                 }
6143                 tg3_free_consistent(tp);
6144                 return err;
6145         }
6146
6147         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6148                 err = tg3_test_msi(tp);
6149
6150                 if (err) {
6151                         tg3_full_lock(tp, 0);
6152
6153                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6154                                 pci_disable_msi(tp->pdev);
6155                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6156                         }
6157                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6158                         tg3_free_rings(tp);
6159                         tg3_free_consistent(tp);
6160
6161                         tg3_full_unlock(tp);
6162
6163                         return err;
6164                 }
6165         }
6166
6167         tg3_full_lock(tp, 0);
6168
6169         add_timer(&tp->timer);
6170         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6171         tg3_enable_ints(tp);
6172
6173         tg3_full_unlock(tp);
6174
6175         netif_start_queue(dev);
6176
6177         return 0;
6178 }
6179
6180 #if 0
6181 /*static*/ void tg3_dump_state(struct tg3 *tp)
6182 {
6183         u32 val32, val32_2, val32_3, val32_4, val32_5;
6184         u16 val16;
6185         int i;
6186
6187         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6188         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6189         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6190                val16, val32);
6191
6192         /* MAC block */
6193         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6194                tr32(MAC_MODE), tr32(MAC_STATUS));
6195         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6196                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6197         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6198                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6199         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6200                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6201
6202         /* Send data initiator control block */
6203         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6204                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6205         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6206                tr32(SNDDATAI_STATSCTRL));
6207
6208         /* Send data completion control block */
6209         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6210
6211         /* Send BD ring selector block */
6212         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6213                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6214
6215         /* Send BD initiator control block */
6216         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6217                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6218
6219         /* Send BD completion control block */
6220         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6221
6222         /* Receive list placement control block */
6223         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6224                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6225         printk("       RCVLPC_STATSCTRL[%08x]\n",
6226                tr32(RCVLPC_STATSCTRL));
6227
6228         /* Receive data and receive BD initiator control block */
6229         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6230                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6231
6232         /* Receive data completion control block */
6233         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6234                tr32(RCVDCC_MODE));
6235
6236         /* Receive BD initiator control block */
6237         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6238                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6239
6240         /* Receive BD completion control block */
6241         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6242                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6243
6244         /* Receive list selector control block */
6245         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6246                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6247
6248         /* Mbuf cluster free block */
6249         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6250                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6251
6252         /* Host coalescing control block */
6253         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6254                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6255         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6256                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6257                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6258         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6259                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6260                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6261         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6262                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6263         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6264                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6265
6266         /* Memory arbiter control block */
6267         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6268                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6269
6270         /* Buffer manager control block */
6271         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6272                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6273         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6274                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6275         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6276                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6277                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6278                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6279
6280         /* Read DMA control block */
6281         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6282                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6283
6284         /* Write DMA control block */
6285         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6286                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6287
6288         /* DMA completion block */
6289         printk("DEBUG: DMAC_MODE[%08x]\n",
6290                tr32(DMAC_MODE));
6291
6292         /* GRC block */
6293         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6294                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6295         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6296                tr32(GRC_LOCAL_CTRL));
6297
6298         /* TG3_BDINFOs */
6299         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6300                tr32(RCVDBDI_JUMBO_BD + 0x0),
6301                tr32(RCVDBDI_JUMBO_BD + 0x4),
6302                tr32(RCVDBDI_JUMBO_BD + 0x8),
6303                tr32(RCVDBDI_JUMBO_BD + 0xc));
6304         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6305                tr32(RCVDBDI_STD_BD + 0x0),
6306                tr32(RCVDBDI_STD_BD + 0x4),
6307                tr32(RCVDBDI_STD_BD + 0x8),
6308                tr32(RCVDBDI_STD_BD + 0xc));
6309         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6310                tr32(RCVDBDI_MINI_BD + 0x0),
6311                tr32(RCVDBDI_MINI_BD + 0x4),
6312                tr32(RCVDBDI_MINI_BD + 0x8),
6313                tr32(RCVDBDI_MINI_BD + 0xc));
6314
6315         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6316         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6317         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6318         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6319         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6320                val32, val32_2, val32_3, val32_4);
6321
6322         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6323         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6324         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6325         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6326         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6327                val32, val32_2, val32_3, val32_4);
6328
6329         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6330         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6331         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6332         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6333         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6334         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6335                val32, val32_2, val32_3, val32_4, val32_5);
6336
6337         /* SW status block */
6338         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6339                tp->hw_status->status,
6340                tp->hw_status->status_tag,
6341                tp->hw_status->rx_jumbo_consumer,
6342                tp->hw_status->rx_consumer,
6343                tp->hw_status->rx_mini_consumer,
6344                tp->hw_status->idx[0].rx_producer,
6345                tp->hw_status->idx[0].tx_consumer);
6346
6347         /* SW statistics block */
6348         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6349                ((u32 *)tp->hw_stats)[0],
6350                ((u32 *)tp->hw_stats)[1],
6351                ((u32 *)tp->hw_stats)[2],
6352                ((u32 *)tp->hw_stats)[3]);
6353
6354         /* Mailboxes */
6355         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6356                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6357                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6358                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6359                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6360
6361         /* NIC side send descriptors. */
6362         for (i = 0; i < 6; i++) {
6363                 unsigned long txd;
6364
6365                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6366                         + (i * sizeof(struct tg3_tx_buffer_desc));
6367                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6368                        i,
6369                        readl(txd + 0x0), readl(txd + 0x4),
6370                        readl(txd + 0x8), readl(txd + 0xc));
6371         }
6372
6373         /* NIC side RX descriptors. */
6374         for (i = 0; i < 6; i++) {
6375                 unsigned long rxd;
6376
6377                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6378                         + (i * sizeof(struct tg3_rx_buffer_desc));
6379                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6380                        i,
6381                        readl(rxd + 0x0), readl(rxd + 0x4),
6382                        readl(rxd + 0x8), readl(rxd + 0xc));
6383                 rxd += (4 * sizeof(u32));
6384                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6385                        i,
6386                        readl(rxd + 0x0), readl(rxd + 0x4),
6387                        readl(rxd + 0x8), readl(rxd + 0xc));
6388         }
6389
6390         for (i = 0; i < 6; i++) {
6391                 unsigned long rxd;
6392
6393                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6394                         + (i * sizeof(struct tg3_rx_buffer_desc));
6395                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6396                        i,
6397                        readl(rxd + 0x0), readl(rxd + 0x4),
6398                        readl(rxd + 0x8), readl(rxd + 0xc));
6399                 rxd += (4 * sizeof(u32));
6400                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6401                        i,
6402                        readl(rxd + 0x0), readl(rxd + 0x4),
6403                        readl(rxd + 0x8), readl(rxd + 0xc));
6404         }
6405 }
6406 #endif
6407
6408 static struct net_device_stats *tg3_get_stats(struct net_device *);
6409 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6410
6411 static int tg3_close(struct net_device *dev)
6412 {
6413         struct tg3 *tp = netdev_priv(dev);
6414
6415         netif_stop_queue(dev);
6416
6417         del_timer_sync(&tp->timer);
6418
6419         tg3_full_lock(tp, 1);
6420 #if 0
6421         tg3_dump_state(tp);
6422 #endif
6423
6424         tg3_disable_ints(tp);
6425
6426         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6427         tg3_free_rings(tp);
6428         tp->tg3_flags &=
6429                 ~(TG3_FLAG_INIT_COMPLETE |
6430                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6431         netif_carrier_off(tp->dev);
6432
6433         tg3_full_unlock(tp);
6434
6435         free_irq(tp->pdev->irq, dev);
6436         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6437                 pci_disable_msi(tp->pdev);
6438                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6439         }
6440
6441         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6442                sizeof(tp->net_stats_prev));
6443         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6444                sizeof(tp->estats_prev));
6445
6446         tg3_free_consistent(tp);
6447
6448         return 0;
6449 }
6450
6451 static inline unsigned long get_stat64(tg3_stat64_t *val)
6452 {
6453         unsigned long ret;
6454
6455 #if (BITS_PER_LONG == 32)
6456         ret = val->low;
6457 #else
6458         ret = ((u64)val->high << 32) | ((u64)val->low);
6459 #endif
6460         return ret;
6461 }
6462
6463 static unsigned long calc_crc_errors(struct tg3 *tp)
6464 {
6465         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6466
6467         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6468             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6469              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6470                 u32 val;
6471
6472                 spin_lock_bh(&tp->lock);
6473                 if (!tg3_readphy(tp, 0x1e, &val)) {
6474                         tg3_writephy(tp, 0x1e, val | 0x8000);
6475                         tg3_readphy(tp, 0x14, &val);
6476                 } else
6477                         val = 0;
6478                 spin_unlock_bh(&tp->lock);
6479
6480                 tp->phy_crc_errors += val;
6481
6482                 return tp->phy_crc_errors;
6483         }
6484
6485         return get_stat64(&hw_stats->rx_fcs_errors);
6486 }
6487
6488 #define ESTAT_ADD(member) \
6489         estats->member =        old_estats->member + \
6490                                 get_stat64(&hw_stats->member)
6491
6492 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6493 {
6494         struct tg3_ethtool_stats *estats = &tp->estats;
6495         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6496         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6497
6498         if (!hw_stats)
6499                 return old_estats;
6500
6501         ESTAT_ADD(rx_octets);
6502         ESTAT_ADD(rx_fragments);
6503         ESTAT_ADD(rx_ucast_packets);
6504         ESTAT_ADD(rx_mcast_packets);
6505         ESTAT_ADD(rx_bcast_packets);
6506         ESTAT_ADD(rx_fcs_errors);
6507         ESTAT_ADD(rx_align_errors);
6508         ESTAT_ADD(rx_xon_pause_rcvd);
6509         ESTAT_ADD(rx_xoff_pause_rcvd);
6510         ESTAT_ADD(rx_mac_ctrl_rcvd);
6511         ESTAT_ADD(rx_xoff_entered);
6512         ESTAT_ADD(rx_frame_too_long_errors);
6513         ESTAT_ADD(rx_jabbers);
6514         ESTAT_ADD(rx_undersize_packets);
6515         ESTAT_ADD(rx_in_length_errors);
6516         ESTAT_ADD(rx_out_length_errors);
6517         ESTAT_ADD(rx_64_or_less_octet_packets);
6518         ESTAT_ADD(rx_65_to_127_octet_packets);
6519         ESTAT_ADD(rx_128_to_255_octet_packets);
6520         ESTAT_ADD(rx_256_to_511_octet_packets);
6521         ESTAT_ADD(rx_512_to_1023_octet_packets);
6522         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6523         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6524         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6525         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6526         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6527
6528         ESTAT_ADD(tx_octets);
6529         ESTAT_ADD(tx_collisions);
6530         ESTAT_ADD(tx_xon_sent);
6531         ESTAT_ADD(tx_xoff_sent);
6532         ESTAT_ADD(tx_flow_control);
6533         ESTAT_ADD(tx_mac_errors);
6534         ESTAT_ADD(tx_single_collisions);
6535         ESTAT_ADD(tx_mult_collisions);
6536         ESTAT_ADD(tx_deferred);
6537         ESTAT_ADD(tx_excessive_collisions);
6538         ESTAT_ADD(tx_late_collisions);
6539         ESTAT_ADD(tx_collide_2times);
6540         ESTAT_ADD(tx_collide_3times);
6541         ESTAT_ADD(tx_collide_4times);
6542         ESTAT_ADD(tx_collide_5times);
6543         ESTAT_ADD(tx_collide_6times);
6544         ESTAT_ADD(tx_collide_7times);
6545         ESTAT_ADD(tx_collide_8times);
6546         ESTAT_ADD(tx_collide_9times);
6547         ESTAT_ADD(tx_collide_10times);
6548         ESTAT_ADD(tx_collide_11times);
6549         ESTAT_ADD(tx_collide_12times);
6550         ESTAT_ADD(tx_collide_13times);
6551         ESTAT_ADD(tx_collide_14times);
6552         ESTAT_ADD(tx_collide_15times);
6553         ESTAT_ADD(tx_ucast_packets);
6554         ESTAT_ADD(tx_mcast_packets);
6555         ESTAT_ADD(tx_bcast_packets);
6556         ESTAT_ADD(tx_carrier_sense_errors);
6557         ESTAT_ADD(tx_discards);
6558         ESTAT_ADD(tx_errors);
6559
6560         ESTAT_ADD(dma_writeq_full);
6561         ESTAT_ADD(dma_write_prioq_full);
6562         ESTAT_ADD(rxbds_empty);
6563         ESTAT_ADD(rx_discards);
6564         ESTAT_ADD(rx_errors);
6565         ESTAT_ADD(rx_threshold_hit);
6566
6567         ESTAT_ADD(dma_readq_full);
6568         ESTAT_ADD(dma_read_prioq_full);
6569         ESTAT_ADD(tx_comp_queue_full);
6570
6571         ESTAT_ADD(ring_set_send_prod_index);
6572         ESTAT_ADD(ring_status_update);
6573         ESTAT_ADD(nic_irqs);
6574         ESTAT_ADD(nic_avoided_irqs);
6575         ESTAT_ADD(nic_tx_threshold_hit);
6576
6577         return estats;
6578 }
6579
6580 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6581 {
6582         struct tg3 *tp = netdev_priv(dev);
6583         struct net_device_stats *stats = &tp->net_stats;
6584         struct net_device_stats *old_stats = &tp->net_stats_prev;
6585         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6586
6587         if (!hw_stats)
6588                 return old_stats;
6589
6590         stats->rx_packets = old_stats->rx_packets +
6591                 get_stat64(&hw_stats->rx_ucast_packets) +
6592                 get_stat64(&hw_stats->rx_mcast_packets) +
6593                 get_stat64(&hw_stats->rx_bcast_packets);
6594                 
6595         stats->tx_packets = old_stats->tx_packets +
6596                 get_stat64(&hw_stats->tx_ucast_packets) +
6597                 get_stat64(&hw_stats->tx_mcast_packets) +
6598                 get_stat64(&hw_stats->tx_bcast_packets);
6599
6600         stats->rx_bytes = old_stats->rx_bytes +
6601                 get_stat64(&hw_stats->rx_octets);
6602         stats->tx_bytes = old_stats->tx_bytes +
6603                 get_stat64(&hw_stats->tx_octets);
6604
6605         stats->rx_errors = old_stats->rx_errors +
6606                 get_stat64(&hw_stats->rx_errors) +
6607                 get_stat64(&hw_stats->rx_discards);
6608         stats->tx_errors = old_stats->tx_errors +
6609                 get_stat64(&hw_stats->tx_errors) +
6610                 get_stat64(&hw_stats->tx_mac_errors) +
6611                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6612                 get_stat64(&hw_stats->tx_discards);
6613
6614         stats->multicast = old_stats->multicast +
6615                 get_stat64(&hw_stats->rx_mcast_packets);
6616         stats->collisions = old_stats->collisions +
6617                 get_stat64(&hw_stats->tx_collisions);
6618
6619         stats->rx_length_errors = old_stats->rx_length_errors +
6620                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6621                 get_stat64(&hw_stats->rx_undersize_packets);
6622
6623         stats->rx_over_errors = old_stats->rx_over_errors +
6624                 get_stat64(&hw_stats->rxbds_empty);
6625         stats->rx_frame_errors = old_stats->rx_frame_errors +
6626                 get_stat64(&hw_stats->rx_align_errors);
6627         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6628                 get_stat64(&hw_stats->tx_discards);
6629         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6630                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6631
6632         stats->rx_crc_errors = old_stats->rx_crc_errors +
6633                 calc_crc_errors(tp);
6634
6635         return stats;
6636 }
6637
6638 static inline u32 calc_crc(unsigned char *buf, int len)
6639 {
6640         u32 reg;
6641         u32 tmp;
6642         int j, k;
6643
6644         reg = 0xffffffff;
6645
6646         for (j = 0; j < len; j++) {
6647                 reg ^= buf[j];
6648
6649                 for (k = 0; k < 8; k++) {
6650                         tmp = reg & 0x01;
6651
6652                         reg >>= 1;
6653
6654                         if (tmp) {
6655                                 reg ^= 0xedb88320;
6656                         }
6657                 }
6658         }
6659
6660         return ~reg;
6661 }
6662
6663 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6664 {
6665         /* accept or reject all multicast frames */
6666         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6667         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6668         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6669         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6670 }
6671
6672 static void __tg3_set_rx_mode(struct net_device *dev)
6673 {
6674         struct tg3 *tp = netdev_priv(dev);
6675         u32 rx_mode;
6676
6677         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6678                                   RX_MODE_KEEP_VLAN_TAG);
6679
6680         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6681          * flag clear.
6682          */
6683 #if TG3_VLAN_TAG_USED
6684         if (!tp->vlgrp &&
6685             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6686                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6687 #else
6688         /* By definition, VLAN is disabled always in this
6689          * case.
6690          */
6691         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6692                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6693 #endif
6694
6695         if (dev->flags & IFF_PROMISC) {
6696                 /* Promiscuous mode. */
6697                 rx_mode |= RX_MODE_PROMISC;
6698         } else if (dev->flags & IFF_ALLMULTI) {
6699                 /* Accept all multicast. */
6700                 tg3_set_multi (tp, 1);
6701         } else if (dev->mc_count < 1) {
6702                 /* Reject all multicast. */
6703                 tg3_set_multi (tp, 0);
6704         } else {
6705                 /* Accept one or more multicast(s). */
6706                 struct dev_mc_list *mclist;
6707                 unsigned int i;
6708                 u32 mc_filter[4] = { 0, };
6709                 u32 regidx;
6710                 u32 bit;
6711                 u32 crc;
6712
6713                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6714                      i++, mclist = mclist->next) {
6715
6716                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6717                         bit = ~crc & 0x7f;
6718                         regidx = (bit & 0x60) >> 5;
6719                         bit &= 0x1f;
6720                         mc_filter[regidx] |= (1 << bit);
6721                 }
6722
6723                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6724                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6725                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6726                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6727         }
6728
6729         if (rx_mode != tp->rx_mode) {
6730                 tp->rx_mode = rx_mode;
6731                 tw32_f(MAC_RX_MODE, rx_mode);
6732                 udelay(10);
6733         }
6734 }
6735
6736 static void tg3_set_rx_mode(struct net_device *dev)
6737 {
6738         struct tg3 *tp = netdev_priv(dev);
6739
6740         tg3_full_lock(tp, 0);
6741         __tg3_set_rx_mode(dev);
6742         tg3_full_unlock(tp);
6743 }
6744
6745 #define TG3_REGDUMP_LEN         (32 * 1024)
6746
6747 static int tg3_get_regs_len(struct net_device *dev)
6748 {
6749         return TG3_REGDUMP_LEN;
6750 }
6751
6752 static void tg3_get_regs(struct net_device *dev,
6753                 struct ethtool_regs *regs, void *_p)
6754 {
6755         u32 *p = _p;
6756         struct tg3 *tp = netdev_priv(dev);
6757         u8 *orig_p = _p;
6758         int i;
6759
6760         regs->version = 0;
6761
6762         memset(p, 0, TG3_REGDUMP_LEN);
6763
6764         tg3_full_lock(tp, 0);
6765
6766 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6767 #define GET_REG32_LOOP(base,len)                \
6768 do {    p = (u32 *)(orig_p + (base));           \
6769         for (i = 0; i < len; i += 4)            \
6770                 __GET_REG32((base) + i);        \
6771 } while (0)
6772 #define GET_REG32_1(reg)                        \
6773 do {    p = (u32 *)(orig_p + (reg));            \
6774         __GET_REG32((reg));                     \
6775 } while (0)
6776
6777         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6778         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6779         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6780         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6781         GET_REG32_1(SNDDATAC_MODE);
6782         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6783         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6784         GET_REG32_1(SNDBDC_MODE);
6785         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6786         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6787         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6788         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6789         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6790         GET_REG32_1(RCVDCC_MODE);
6791         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6792         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6793         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6794         GET_REG32_1(MBFREE_MODE);
6795         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6796         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6797         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6798         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6799         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6800         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6801         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6802         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6803         GET_REG32_LOOP(FTQ_RESET, 0x120);
6804         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6805         GET_REG32_1(DMAC_MODE);
6806         GET_REG32_LOOP(GRC_MODE, 0x4c);
6807         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6808                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6809
6810 #undef __GET_REG32
6811 #undef GET_REG32_LOOP
6812 #undef GET_REG32_1
6813
6814         tg3_full_unlock(tp);
6815 }
6816
6817 static int tg3_get_eeprom_len(struct net_device *dev)
6818 {
6819         struct tg3 *tp = netdev_priv(dev);
6820
6821         return tp->nvram_size;
6822 }
6823
6824 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6825
6826 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6827 {
6828         struct tg3 *tp = netdev_priv(dev);
6829         int ret;
6830         u8  *pd;
6831         u32 i, offset, len, val, b_offset, b_count;
6832
6833         offset = eeprom->offset;
6834         len = eeprom->len;
6835         eeprom->len = 0;
6836
6837         eeprom->magic = TG3_EEPROM_MAGIC;
6838
6839         if (offset & 3) {
6840                 /* adjustments to start on required 4 byte boundary */
6841                 b_offset = offset & 3;
6842                 b_count = 4 - b_offset;
6843                 if (b_count > len) {
6844                         /* i.e. offset=1 len=2 */
6845                         b_count = len;
6846                 }
6847                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6848                 if (ret)
6849                         return ret;
6850                 val = cpu_to_le32(val);
6851                 memcpy(data, ((char*)&val) + b_offset, b_count);
6852                 len -= b_count;
6853                 offset += b_count;
6854                 eeprom->len += b_count;
6855         }
6856
6857         /* read bytes upto the last 4 byte boundary */
6858         pd = &data[eeprom->len];
6859         for (i = 0; i < (len - (len & 3)); i += 4) {
6860                 ret = tg3_nvram_read(tp, offset + i, &val);
6861                 if (ret) {
6862                         eeprom->len += i;
6863                         return ret;
6864                 }
6865                 val = cpu_to_le32(val);
6866                 memcpy(pd + i, &val, 4);
6867         }
6868         eeprom->len += i;
6869
6870         if (len & 3) {
6871                 /* read last bytes not ending on 4 byte boundary */
6872                 pd = &data[eeprom->len];
6873                 b_count = len & 3;
6874                 b_offset = offset + len - b_count;
6875                 ret = tg3_nvram_read(tp, b_offset, &val);
6876                 if (ret)
6877                         return ret;
6878                 val = cpu_to_le32(val);
6879                 memcpy(pd, ((char*)&val), b_count);
6880                 eeprom->len += b_count;
6881         }
6882         return 0;
6883 }
6884
6885 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6886
6887 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6888 {
6889         struct tg3 *tp = netdev_priv(dev);
6890         int ret;
6891         u32 offset, len, b_offset, odd_len, start, end;
6892         u8 *buf;
6893
6894         if (eeprom->magic != TG3_EEPROM_MAGIC)
6895                 return -EINVAL;
6896
6897         offset = eeprom->offset;
6898         len = eeprom->len;
6899
6900         if ((b_offset = (offset & 3))) {
6901                 /* adjustments to start on required 4 byte boundary */
6902                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6903                 if (ret)
6904                         return ret;
6905                 start = cpu_to_le32(start);
6906                 len += b_offset;
6907                 offset &= ~3;
6908                 if (len < 4)
6909                         len = 4;
6910         }
6911
6912         odd_len = 0;
6913         if (len & 3) {
6914                 /* adjustments to end on required 4 byte boundary */
6915                 odd_len = 1;
6916                 len = (len + 3) & ~3;
6917                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6918                 if (ret)
6919                         return ret;
6920                 end = cpu_to_le32(end);
6921         }
6922
6923         buf = data;
6924         if (b_offset || odd_len) {
6925                 buf = kmalloc(len, GFP_KERNEL);
6926                 if (buf == 0)
6927                         return -ENOMEM;
6928                 if (b_offset)
6929                         memcpy(buf, &start, 4);
6930                 if (odd_len)
6931                         memcpy(buf+len-4, &end, 4);
6932                 memcpy(buf + b_offset, data, eeprom->len);
6933         }
6934
6935         ret = tg3_nvram_write_block(tp, offset, len, buf);
6936
6937         if (buf != data)
6938                 kfree(buf);
6939
6940         return ret;
6941 }
6942
6943 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6944 {
6945         struct tg3 *tp = netdev_priv(dev);
6946   
6947         cmd->supported = (SUPPORTED_Autoneg);
6948
6949         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6950                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6951                                    SUPPORTED_1000baseT_Full);
6952
6953         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6954                 cmd->supported |= (SUPPORTED_100baseT_Half |
6955                                   SUPPORTED_100baseT_Full |
6956                                   SUPPORTED_10baseT_Half |
6957                                   SUPPORTED_10baseT_Full |
6958                                   SUPPORTED_MII);
6959         else
6960                 cmd->supported |= SUPPORTED_FIBRE;
6961   
6962         cmd->advertising = tp->link_config.advertising;
6963         if (netif_running(dev)) {
6964                 cmd->speed = tp->link_config.active_speed;
6965                 cmd->duplex = tp->link_config.active_duplex;
6966         }
6967         cmd->port = 0;
6968         cmd->phy_address = PHY_ADDR;
6969         cmd->transceiver = 0;
6970         cmd->autoneg = tp->link_config.autoneg;
6971         cmd->maxtxpkt = 0;
6972         cmd->maxrxpkt = 0;
6973         return 0;
6974 }
6975   
6976 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6977 {
6978         struct tg3 *tp = netdev_priv(dev);
6979   
6980         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6981                 /* These are the only valid advertisement bits allowed.  */
6982                 if (cmd->autoneg == AUTONEG_ENABLE &&
6983                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6984                                           ADVERTISED_1000baseT_Full |
6985                                           ADVERTISED_Autoneg |
6986                                           ADVERTISED_FIBRE)))
6987                         return -EINVAL;
6988         }
6989
6990         tg3_full_lock(tp, 0);
6991
6992         tp->link_config.autoneg = cmd->autoneg;
6993         if (cmd->autoneg == AUTONEG_ENABLE) {
6994                 tp->link_config.advertising = cmd->advertising;
6995                 tp->link_config.speed = SPEED_INVALID;
6996                 tp->link_config.duplex = DUPLEX_INVALID;
6997         } else {
6998                 tp->link_config.advertising = 0;
6999                 tp->link_config.speed = cmd->speed;
7000                 tp->link_config.duplex = cmd->duplex;
7001         }
7002   
7003         if (netif_running(dev))
7004                 tg3_setup_phy(tp, 1);
7005
7006         tg3_full_unlock(tp);
7007   
7008         return 0;
7009 }
7010   
7011 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7012 {
7013         struct tg3 *tp = netdev_priv(dev);
7014   
7015         strcpy(info->driver, DRV_MODULE_NAME);
7016         strcpy(info->version, DRV_MODULE_VERSION);
7017         strcpy(info->bus_info, pci_name(tp->pdev));
7018 }
7019   
7020 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7021 {
7022         struct tg3 *tp = netdev_priv(dev);
7023   
7024         wol->supported = WAKE_MAGIC;
7025         wol->wolopts = 0;
7026         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7027                 wol->wolopts = WAKE_MAGIC;
7028         memset(&wol->sopass, 0, sizeof(wol->sopass));
7029 }
7030   
7031 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7032 {
7033         struct tg3 *tp = netdev_priv(dev);
7034   
7035         if (wol->wolopts & ~WAKE_MAGIC)
7036                 return -EINVAL;
7037         if ((wol->wolopts & WAKE_MAGIC) &&
7038             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7039             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7040                 return -EINVAL;
7041   
7042         spin_lock_bh(&tp->lock);
7043         if (wol->wolopts & WAKE_MAGIC)
7044                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7045         else
7046                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7047         spin_unlock_bh(&tp->lock);
7048   
7049         return 0;
7050 }
7051   
7052 static u32 tg3_get_msglevel(struct net_device *dev)
7053 {
7054         struct tg3 *tp = netdev_priv(dev);
7055         return tp->msg_enable;
7056 }
7057   
7058 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7059 {
7060         struct tg3 *tp = netdev_priv(dev);
7061         tp->msg_enable = value;
7062 }
7063   
7064 #if TG3_TSO_SUPPORT != 0
7065 static int tg3_set_tso(struct net_device *dev, u32 value)
7066 {
7067         struct tg3 *tp = netdev_priv(dev);
7068
7069         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7070                 if (value)
7071                         return -EINVAL;
7072                 return 0;
7073         }
7074         return ethtool_op_set_tso(dev, value);
7075 }
7076 #endif
7077   
7078 static int tg3_nway_reset(struct net_device *dev)
7079 {
7080         struct tg3 *tp = netdev_priv(dev);
7081         u32 bmcr;
7082         int r;
7083   
7084         if (!netif_running(dev))
7085                 return -EAGAIN;
7086
7087         spin_lock_bh(&tp->lock);
7088         r = -EINVAL;
7089         tg3_readphy(tp, MII_BMCR, &bmcr);
7090         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7091             (bmcr & BMCR_ANENABLE)) {
7092                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7093                 r = 0;
7094         }
7095         spin_unlock_bh(&tp->lock);
7096   
7097         return r;
7098 }
7099   
7100 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7101 {
7102         struct tg3 *tp = netdev_priv(dev);
7103   
7104         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7105         ering->rx_mini_max_pending = 0;
7106         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7107
7108         ering->rx_pending = tp->rx_pending;
7109         ering->rx_mini_pending = 0;
7110         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7111         ering->tx_pending = tp->tx_pending;
7112 }
7113   
7114 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7115 {
7116         struct tg3 *tp = netdev_priv(dev);
7117         int irq_sync = 0;
7118   
7119         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7120             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7121             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7122                 return -EINVAL;
7123   
7124         if (netif_running(dev)) {
7125                 tg3_netif_stop(tp);
7126                 irq_sync = 1;
7127         }
7128
7129         tg3_full_lock(tp, irq_sync);
7130   
7131         tp->rx_pending = ering->rx_pending;
7132
7133         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7134             tp->rx_pending > 63)
7135                 tp->rx_pending = 63;
7136         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7137         tp->tx_pending = ering->tx_pending;
7138
7139         if (netif_running(dev)) {
7140                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7141                 tg3_init_hw(tp);
7142                 tg3_netif_start(tp);
7143         }
7144
7145         tg3_full_unlock(tp);
7146   
7147         return 0;
7148 }
7149   
7150 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7151 {
7152         struct tg3 *tp = netdev_priv(dev);
7153   
7154         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7155         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7156         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7157 }
7158   
7159 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7160 {
7161         struct tg3 *tp = netdev_priv(dev);
7162         int irq_sync = 0;
7163   
7164         if (netif_running(dev)) {
7165                 tg3_netif_stop(tp);
7166                 irq_sync = 1;
7167         }
7168
7169         tg3_full_lock(tp, irq_sync);
7170
7171         if (epause->autoneg)
7172                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7173         else
7174                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7175         if (epause->rx_pause)
7176                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7177         else
7178                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7179         if (epause->tx_pause)
7180                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7181         else
7182                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7183
7184         if (netif_running(dev)) {
7185                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7186                 tg3_init_hw(tp);
7187                 tg3_netif_start(tp);
7188         }
7189
7190         tg3_full_unlock(tp);
7191   
7192         return 0;
7193 }
7194   
7195 static u32 tg3_get_rx_csum(struct net_device *dev)
7196 {
7197         struct tg3 *tp = netdev_priv(dev);
7198         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7199 }
7200   
7201 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7202 {
7203         struct tg3 *tp = netdev_priv(dev);
7204   
7205         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7206                 if (data != 0)
7207                         return -EINVAL;
7208                 return 0;
7209         }
7210   
7211         spin_lock_bh(&tp->lock);
7212         if (data)
7213                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7214         else
7215                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7216         spin_unlock_bh(&tp->lock);
7217   
7218         return 0;
7219 }
7220   
7221 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7222 {
7223         struct tg3 *tp = netdev_priv(dev);
7224   
7225         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7226                 if (data != 0)
7227                         return -EINVAL;
7228                 return 0;
7229         }
7230   
7231         if (data)
7232                 dev->features |= NETIF_F_IP_CSUM;
7233         else
7234                 dev->features &= ~NETIF_F_IP_CSUM;
7235
7236         return 0;
7237 }
7238
7239 static int tg3_get_stats_count (struct net_device *dev)
7240 {
7241         return TG3_NUM_STATS;
7242 }
7243
7244 static int tg3_get_test_count (struct net_device *dev)
7245 {
7246         return TG3_NUM_TEST;
7247 }
7248
7249 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7250 {
7251         switch (stringset) {
7252         case ETH_SS_STATS:
7253                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7254                 break;
7255         case ETH_SS_TEST:
7256                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7257                 break;
7258         default:
7259                 WARN_ON(1);     /* we need a WARN() */
7260                 break;
7261         }
7262 }
7263
7264 static void tg3_get_ethtool_stats (struct net_device *dev,
7265                                    struct ethtool_stats *estats, u64 *tmp_stats)
7266 {
7267         struct tg3 *tp = netdev_priv(dev);
7268         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7269 }
7270
7271 #define NVRAM_TEST_SIZE 0x100
7272
7273 static int tg3_test_nvram(struct tg3 *tp)
7274 {
7275         u32 *buf, csum;
7276         int i, j, err = 0;
7277
7278         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7279         if (buf == NULL)
7280                 return -ENOMEM;
7281
7282         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7283                 u32 val;
7284
7285                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7286                         break;
7287                 buf[j] = cpu_to_le32(val);
7288         }
7289         if (i < NVRAM_TEST_SIZE)
7290                 goto out;
7291
7292         err = -EIO;
7293         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7294                 goto out;
7295
7296         /* Bootstrap checksum at offset 0x10 */
7297         csum = calc_crc((unsigned char *) buf, 0x10);
7298         if(csum != cpu_to_le32(buf[0x10/4]))
7299                 goto out;
7300
7301         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7302         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7303         if (csum != cpu_to_le32(buf[0xfc/4]))
7304                  goto out;
7305
7306         err = 0;
7307
7308 out:
7309         kfree(buf);
7310         return err;
7311 }
7312
7313 #define TG3_SERDES_TIMEOUT_SEC  2
7314 #define TG3_COPPER_TIMEOUT_SEC  6
7315
7316 static int tg3_test_link(struct tg3 *tp)
7317 {
7318         int i, max;
7319
7320         if (!netif_running(tp->dev))
7321                 return -ENODEV;
7322
7323         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7324                 max = TG3_SERDES_TIMEOUT_SEC;
7325         else
7326                 max = TG3_COPPER_TIMEOUT_SEC;
7327
7328         for (i = 0; i < max; i++) {
7329                 if (netif_carrier_ok(tp->dev))
7330                         return 0;
7331
7332                 if (msleep_interruptible(1000))
7333                         break;
7334         }
7335
7336         return -EIO;
7337 }
7338
7339 /* Only test the commonly used registers */
7340 static int tg3_test_registers(struct tg3 *tp)
7341 {
7342         int i, is_5705;
7343         u32 offset, read_mask, write_mask, val, save_val, read_val;
7344         static struct {
7345                 u16 offset;
7346                 u16 flags;
7347 #define TG3_FL_5705     0x1
7348 #define TG3_FL_NOT_5705 0x2
7349 #define TG3_FL_NOT_5788 0x4
7350                 u32 read_mask;
7351                 u32 write_mask;
7352         } reg_tbl[] = {
7353                 /* MAC Control Registers */
7354                 { MAC_MODE, TG3_FL_NOT_5705,
7355                         0x00000000, 0x00ef6f8c },
7356                 { MAC_MODE, TG3_FL_5705,
7357                         0x00000000, 0x01ef6b8c },
7358                 { MAC_STATUS, TG3_FL_NOT_5705,
7359                         0x03800107, 0x00000000 },
7360                 { MAC_STATUS, TG3_FL_5705,
7361                         0x03800100, 0x00000000 },
7362                 { MAC_ADDR_0_HIGH, 0x0000,
7363                         0x00000000, 0x0000ffff },
7364                 { MAC_ADDR_0_LOW, 0x0000,
7365                         0x00000000, 0xffffffff },
7366                 { MAC_RX_MTU_SIZE, 0x0000,
7367                         0x00000000, 0x0000ffff },
7368                 { MAC_TX_MODE, 0x0000,
7369                         0x00000000, 0x00000070 },
7370                 { MAC_TX_LENGTHS, 0x0000,
7371                         0x00000000, 0x00003fff },
7372                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7373                         0x00000000, 0x000007fc },
7374                 { MAC_RX_MODE, TG3_FL_5705,
7375                         0x00000000, 0x000007dc },
7376                 { MAC_HASH_REG_0, 0x0000,
7377                         0x00000000, 0xffffffff },
7378                 { MAC_HASH_REG_1, 0x0000,
7379                         0x00000000, 0xffffffff },
7380                 { MAC_HASH_REG_2, 0x0000,
7381                         0x00000000, 0xffffffff },
7382                 { MAC_HASH_REG_3, 0x0000,
7383                         0x00000000, 0xffffffff },
7384
7385                 /* Receive Data and Receive BD Initiator Control Registers. */
7386                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7387                         0x00000000, 0xffffffff },
7388                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7389                         0x00000000, 0xffffffff },
7390                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7391                         0x00000000, 0x00000003 },
7392                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7393                         0x00000000, 0xffffffff },
7394                 { RCVDBDI_STD_BD+0, 0x0000,
7395                         0x00000000, 0xffffffff },
7396                 { RCVDBDI_STD_BD+4, 0x0000,
7397                         0x00000000, 0xffffffff },
7398                 { RCVDBDI_STD_BD+8, 0x0000,
7399                         0x00000000, 0xffff0002 },
7400                 { RCVDBDI_STD_BD+0xc, 0x0000,
7401                         0x00000000, 0xffffffff },
7402         
7403                 /* Receive BD Initiator Control Registers. */
7404                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7405                         0x00000000, 0xffffffff },
7406                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7407                         0x00000000, 0x000003ff },
7408                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7409                         0x00000000, 0xffffffff },
7410         
7411                 /* Host Coalescing Control Registers. */
7412                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7413                         0x00000000, 0x00000004 },
7414                 { HOSTCC_MODE, TG3_FL_5705,
7415                         0x00000000, 0x000000f6 },
7416                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7417                         0x00000000, 0xffffffff },
7418                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7419                         0x00000000, 0x000003ff },
7420                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7421                         0x00000000, 0xffffffff },
7422                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7423                         0x00000000, 0x000003ff },
7424                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7425                         0x00000000, 0xffffffff },
7426                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7427                         0x00000000, 0x000000ff },
7428                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7429                         0x00000000, 0xffffffff },
7430                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7431                         0x00000000, 0x000000ff },
7432                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7433                         0x00000000, 0xffffffff },
7434                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7435                         0x00000000, 0xffffffff },
7436                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7437                         0x00000000, 0xffffffff },
7438                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7439                         0x00000000, 0x000000ff },
7440                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7441                         0x00000000, 0xffffffff },
7442                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7443                         0x00000000, 0x000000ff },
7444                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7445                         0x00000000, 0xffffffff },
7446                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7447                         0x00000000, 0xffffffff },
7448                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7449                         0x00000000, 0xffffffff },
7450                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7451                         0x00000000, 0xffffffff },
7452                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7453                         0x00000000, 0xffffffff },
7454                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7455                         0xffffffff, 0x00000000 },
7456                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7457                         0xffffffff, 0x00000000 },
7458
7459                 /* Buffer Manager Control Registers. */
7460                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7461                         0x00000000, 0x007fff80 },
7462                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7463                         0x00000000, 0x007fffff },
7464                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7465                         0x00000000, 0x0000003f },
7466                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7467                         0x00000000, 0x000001ff },
7468                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7469                         0x00000000, 0x000001ff },
7470                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7471                         0xffffffff, 0x00000000 },
7472                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7473                         0xffffffff, 0x00000000 },
7474         
7475                 /* Mailbox Registers */
7476                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7477                         0x00000000, 0x000001ff },
7478                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7479                         0x00000000, 0x000001ff },
7480                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7481                         0x00000000, 0x000007ff },
7482                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7483                         0x00000000, 0x000001ff },
7484
7485                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7486         };
7487
7488         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7489                 is_5705 = 1;
7490         else
7491                 is_5705 = 0;
7492
7493         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7494                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7495                         continue;
7496
7497                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7498                         continue;
7499
7500                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7501                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7502                         continue;
7503
7504                 offset = (u32) reg_tbl[i].offset;
7505                 read_mask = reg_tbl[i].read_mask;
7506                 write_mask = reg_tbl[i].write_mask;
7507
7508                 /* Save the original register content */
7509                 save_val = tr32(offset);
7510
7511                 /* Determine the read-only value. */
7512                 read_val = save_val & read_mask;
7513
7514                 /* Write zero to the register, then make sure the read-only bits
7515                  * are not changed and the read/write bits are all zeros.
7516                  */
7517                 tw32(offset, 0);
7518
7519                 val = tr32(offset);
7520
7521                 /* Test the read-only and read/write bits. */
7522                 if (((val & read_mask) != read_val) || (val & write_mask))
7523                         goto out;
7524
7525                 /* Write ones to all the bits defined by RdMask and WrMask, then
7526                  * make sure the read-only bits are not changed and the
7527                  * read/write bits are all ones.
7528                  */
7529                 tw32(offset, read_mask | write_mask);
7530
7531                 val = tr32(offset);
7532
7533                 /* Test the read-only bits. */
7534                 if ((val & read_mask) != read_val)
7535                         goto out;
7536
7537                 /* Test the read/write bits. */
7538                 if ((val & write_mask) != write_mask)
7539                         goto out;
7540
7541                 tw32(offset, save_val);
7542         }
7543
7544         return 0;
7545
7546 out:
7547         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7548         tw32(offset, save_val);
7549         return -EIO;
7550 }
7551
7552 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7553 {
7554         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7555         int i;
7556         u32 j;
7557
7558         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7559                 for (j = 0; j < len; j += 4) {
7560                         u32 val;
7561
7562                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7563                         tg3_read_mem(tp, offset + j, &val);
7564                         if (val != test_pattern[i])
7565                                 return -EIO;
7566                 }
7567         }
7568         return 0;
7569 }
7570
7571 static int tg3_test_memory(struct tg3 *tp)
7572 {
7573         static struct mem_entry {
7574                 u32 offset;
7575                 u32 len;
7576         } mem_tbl_570x[] = {
7577                 { 0x00000000, 0x01000},
7578                 { 0x00002000, 0x1c000},
7579                 { 0xffffffff, 0x00000}
7580         }, mem_tbl_5705[] = {
7581                 { 0x00000100, 0x0000c},
7582                 { 0x00000200, 0x00008},
7583                 { 0x00000b50, 0x00400},
7584                 { 0x00004000, 0x00800},
7585                 { 0x00006000, 0x01000},
7586                 { 0x00008000, 0x02000},
7587                 { 0x00010000, 0x0e000},
7588                 { 0xffffffff, 0x00000}
7589         };
7590         struct mem_entry *mem_tbl;
7591         int err = 0;
7592         int i;
7593
7594         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7595                 mem_tbl = mem_tbl_5705;
7596         else
7597                 mem_tbl = mem_tbl_570x;
7598
7599         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7600                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7601                     mem_tbl[i].len)) != 0)
7602                         break;
7603         }
7604         
7605         return err;
7606 }
7607
7608 static int tg3_test_loopback(struct tg3 *tp)
7609 {
7610         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7611         u32 desc_idx;
7612         struct sk_buff *skb, *rx_skb;
7613         u8 *tx_data;
7614         dma_addr_t map;
7615         int num_pkts, tx_len, rx_len, i, err;
7616         struct tg3_rx_buffer_desc *desc;
7617
7618         if (!netif_running(tp->dev))
7619                 return -ENODEV;
7620
7621         err = -EIO;
7622
7623         tg3_abort_hw(tp, 1);
7624
7625         tg3_reset_hw(tp);
7626
7627         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7628                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7629                    MAC_MODE_PORT_MODE_GMII;
7630         tw32(MAC_MODE, mac_mode);
7631
7632         tx_len = 1514;
7633         skb = dev_alloc_skb(tx_len);
7634         tx_data = skb_put(skb, tx_len);
7635         memcpy(tx_data, tp->dev->dev_addr, 6);
7636         memset(tx_data + 6, 0x0, 8);
7637
7638         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7639
7640         for (i = 14; i < tx_len; i++)
7641                 tx_data[i] = (u8) (i & 0xff);
7642
7643         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7644
7645         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7646              HOSTCC_MODE_NOW);
7647
7648         udelay(10);
7649
7650         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7651
7652         send_idx = 0;
7653         num_pkts = 0;
7654
7655         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7656
7657         send_idx++;
7658         num_pkts++;
7659
7660         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7661         tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7662
7663         udelay(10);
7664
7665         for (i = 0; i < 10; i++) {
7666                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7667                        HOSTCC_MODE_NOW);
7668
7669                 udelay(10);
7670
7671                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7672                 rx_idx = tp->hw_status->idx[0].rx_producer;
7673                 if ((tx_idx == send_idx) &&
7674                     (rx_idx == (rx_start_idx + num_pkts)))
7675                         break;
7676         }
7677
7678         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7679         dev_kfree_skb(skb);
7680
7681         if (tx_idx != send_idx)
7682                 goto out;
7683
7684         if (rx_idx != rx_start_idx + num_pkts)
7685                 goto out;
7686
7687         desc = &tp->rx_rcb[rx_start_idx];
7688         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7689         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7690         if (opaque_key != RXD_OPAQUE_RING_STD)
7691                 goto out;
7692
7693         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7694             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7695                 goto out;
7696
7697         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7698         if (rx_len != tx_len)
7699                 goto out;
7700
7701         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7702
7703         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7704         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7705
7706         for (i = 14; i < tx_len; i++) {
7707                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7708                         goto out;
7709         }
7710         err = 0;
7711         
7712         /* tg3_free_rings will unmap and free the rx_skb */
7713 out:
7714         return err;
7715 }
7716
7717 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7718                           u64 *data)
7719 {
7720         struct tg3 *tp = netdev_priv(dev);
7721
7722         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7723
7724         if (tg3_test_nvram(tp) != 0) {
7725                 etest->flags |= ETH_TEST_FL_FAILED;
7726                 data[0] = 1;
7727         }
7728         if (tg3_test_link(tp) != 0) {
7729                 etest->flags |= ETH_TEST_FL_FAILED;
7730                 data[1] = 1;
7731         }
7732         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7733                 int irq_sync = 0;
7734
7735                 if (netif_running(dev)) {
7736                         tg3_netif_stop(tp);
7737                         irq_sync = 1;
7738                 }
7739
7740                 tg3_full_lock(tp, irq_sync);
7741
7742                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7743                 tg3_nvram_lock(tp);
7744                 tg3_halt_cpu(tp, RX_CPU_BASE);
7745                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7746                         tg3_halt_cpu(tp, TX_CPU_BASE);
7747                 tg3_nvram_unlock(tp);
7748
7749                 if (tg3_test_registers(tp) != 0) {
7750                         etest->flags |= ETH_TEST_FL_FAILED;
7751                         data[2] = 1;
7752                 }
7753                 if (tg3_test_memory(tp) != 0) {
7754                         etest->flags |= ETH_TEST_FL_FAILED;
7755                         data[3] = 1;
7756                 }
7757                 if (tg3_test_loopback(tp) != 0) {
7758                         etest->flags |= ETH_TEST_FL_FAILED;
7759                         data[4] = 1;
7760                 }
7761
7762                 tg3_full_unlock(tp);
7763
7764                 if (tg3_test_interrupt(tp) != 0) {
7765                         etest->flags |= ETH_TEST_FL_FAILED;
7766                         data[5] = 1;
7767                 }
7768
7769                 tg3_full_lock(tp, 0);
7770
7771                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7772                 if (netif_running(dev)) {
7773                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7774                         tg3_init_hw(tp);
7775                         tg3_netif_start(tp);
7776                 }
7777
7778                 tg3_full_unlock(tp);
7779         }
7780 }
7781
7782 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7783 {
7784         struct mii_ioctl_data *data = if_mii(ifr);
7785         struct tg3 *tp = netdev_priv(dev);
7786         int err;
7787
7788         switch(cmd) {
7789         case SIOCGMIIPHY:
7790                 data->phy_id = PHY_ADDR;
7791
7792                 /* fallthru */
7793         case SIOCGMIIREG: {
7794                 u32 mii_regval;
7795
7796                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7797                         break;                  /* We have no PHY */
7798
7799                 spin_lock_bh(&tp->lock);
7800                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7801                 spin_unlock_bh(&tp->lock);
7802
7803                 data->val_out = mii_regval;
7804
7805                 return err;
7806         }
7807
7808         case SIOCSMIIREG:
7809                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7810                         break;                  /* We have no PHY */
7811
7812                 if (!capable(CAP_NET_ADMIN))
7813                         return -EPERM;
7814
7815                 spin_lock_bh(&tp->lock);
7816                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7817                 spin_unlock_bh(&tp->lock);
7818
7819                 return err;
7820
7821         default:
7822                 /* do nothing */
7823                 break;
7824         }
7825         return -EOPNOTSUPP;
7826 }
7827
7828 #if TG3_VLAN_TAG_USED
7829 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7830 {
7831         struct tg3 *tp = netdev_priv(dev);
7832
7833         tg3_full_lock(tp, 0);
7834
7835         tp->vlgrp = grp;
7836
7837         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7838         __tg3_set_rx_mode(dev);
7839
7840         tg3_full_unlock(tp);
7841 }
7842
7843 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7844 {
7845         struct tg3 *tp = netdev_priv(dev);
7846
7847         tg3_full_lock(tp, 0);
7848         if (tp->vlgrp)
7849                 tp->vlgrp->vlan_devices[vid] = NULL;
7850         tg3_full_unlock(tp);
7851 }
7852 #endif
7853
7854 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7855 {
7856         struct tg3 *tp = netdev_priv(dev);
7857
7858         memcpy(ec, &tp->coal, sizeof(*ec));
7859         return 0;
7860 }
7861
7862 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7863 {
7864         struct tg3 *tp = netdev_priv(dev);
7865         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
7866         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
7867
7868         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7869                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
7870                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
7871                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
7872                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
7873         }
7874
7875         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
7876             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
7877             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
7878             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
7879             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
7880             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
7881             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
7882             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
7883             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
7884             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
7885                 return -EINVAL;
7886
7887         /* No rx interrupts will be generated if both are zero */
7888         if ((ec->rx_coalesce_usecs == 0) &&
7889             (ec->rx_max_coalesced_frames == 0))
7890                 return -EINVAL;
7891
7892         /* No tx interrupts will be generated if both are zero */
7893         if ((ec->tx_coalesce_usecs == 0) &&
7894             (ec->tx_max_coalesced_frames == 0))
7895                 return -EINVAL;
7896
7897         /* Only copy relevant parameters, ignore all others. */
7898         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
7899         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
7900         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
7901         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
7902         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
7903         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
7904         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
7905         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
7906         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
7907
7908         if (netif_running(dev)) {
7909                 tg3_full_lock(tp, 0);
7910                 __tg3_set_coalesce(tp, &tp->coal);
7911                 tg3_full_unlock(tp);
7912         }
7913         return 0;
7914 }
7915
7916 static struct ethtool_ops tg3_ethtool_ops = {
7917         .get_settings           = tg3_get_settings,
7918         .set_settings           = tg3_set_settings,
7919         .get_drvinfo            = tg3_get_drvinfo,
7920         .get_regs_len           = tg3_get_regs_len,
7921         .get_regs               = tg3_get_regs,
7922         .get_wol                = tg3_get_wol,
7923         .set_wol                = tg3_set_wol,
7924         .get_msglevel           = tg3_get_msglevel,
7925         .set_msglevel           = tg3_set_msglevel,
7926         .nway_reset             = tg3_nway_reset,
7927         .get_link               = ethtool_op_get_link,
7928         .get_eeprom_len         = tg3_get_eeprom_len,
7929         .get_eeprom             = tg3_get_eeprom,
7930         .set_eeprom             = tg3_set_eeprom,
7931         .get_ringparam          = tg3_get_ringparam,
7932         .set_ringparam          = tg3_set_ringparam,
7933         .get_pauseparam         = tg3_get_pauseparam,
7934         .set_pauseparam         = tg3_set_pauseparam,
7935         .get_rx_csum            = tg3_get_rx_csum,
7936         .set_rx_csum            = tg3_set_rx_csum,
7937         .get_tx_csum            = ethtool_op_get_tx_csum,
7938         .set_tx_csum            = tg3_set_tx_csum,
7939         .get_sg                 = ethtool_op_get_sg,
7940         .set_sg                 = ethtool_op_set_sg,
7941 #if TG3_TSO_SUPPORT != 0
7942         .get_tso                = ethtool_op_get_tso,
7943         .set_tso                = tg3_set_tso,
7944 #endif
7945         .self_test_count        = tg3_get_test_count,
7946         .self_test              = tg3_self_test,
7947         .get_strings            = tg3_get_strings,
7948         .get_stats_count        = tg3_get_stats_count,
7949         .get_ethtool_stats      = tg3_get_ethtool_stats,
7950         .get_coalesce           = tg3_get_coalesce,
7951         .set_coalesce           = tg3_set_coalesce,
7952 };
7953
7954 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7955 {
7956         u32 cursize, val;
7957
7958         tp->nvram_size = EEPROM_CHIP_SIZE;
7959
7960         if (tg3_nvram_read(tp, 0, &val) != 0)
7961                 return;
7962
7963         if (swab32(val) != TG3_EEPROM_MAGIC)
7964                 return;
7965
7966         /*
7967          * Size the chip by reading offsets at increasing powers of two.
7968          * When we encounter our validation signature, we know the addressing
7969          * has wrapped around, and thus have our chip size.
7970          */
7971         cursize = 0x800;
7972
7973         while (cursize < tp->nvram_size) {
7974                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7975                         return;
7976
7977                 if (swab32(val) == TG3_EEPROM_MAGIC)
7978                         break;
7979
7980                 cursize <<= 1;
7981         }
7982
7983         tp->nvram_size = cursize;
7984 }
7985                 
7986 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7987 {
7988         u32 val;
7989
7990         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7991                 if (val != 0) {
7992                         tp->nvram_size = (val >> 16) * 1024;
7993                         return;
7994                 }
7995         }
7996         tp->nvram_size = 0x20000;
7997 }
7998
7999 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8000 {
8001         u32 nvcfg1;
8002
8003         nvcfg1 = tr32(NVRAM_CFG1);
8004         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8005                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8006         }
8007         else {
8008                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8009                 tw32(NVRAM_CFG1, nvcfg1);
8010         }
8011
8012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8013                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8014                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8015                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8016                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8017                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8018                                 break;
8019                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8020                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8021                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8022                                 break;
8023                         case FLASH_VENDOR_ATMEL_EEPROM:
8024                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8025                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8026                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8027                                 break;
8028                         case FLASH_VENDOR_ST:
8029                                 tp->nvram_jedecnum = JEDEC_ST;
8030                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8031                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8032                                 break;
8033                         case FLASH_VENDOR_SAIFUN:
8034                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8035                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8036                                 break;
8037                         case FLASH_VENDOR_SST_SMALL:
8038                         case FLASH_VENDOR_SST_LARGE:
8039                                 tp->nvram_jedecnum = JEDEC_SST;
8040                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8041                                 break;
8042                 }
8043         }
8044         else {
8045                 tp->nvram_jedecnum = JEDEC_ATMEL;
8046                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8047                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8048         }
8049 }
8050
8051 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8052 {
8053         u32 nvcfg1;
8054
8055         nvcfg1 = tr32(NVRAM_CFG1);
8056
8057         /* NVRAM protection for TPM */
8058         if (nvcfg1 & (1 << 27))
8059                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8060
8061         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8062                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8063                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8064                         tp->nvram_jedecnum = JEDEC_ATMEL;
8065                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8066                         break;
8067                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8068                         tp->nvram_jedecnum = JEDEC_ATMEL;
8069                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8070                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8071                         break;
8072                 case FLASH_5752VENDOR_ST_M45PE10:
8073                 case FLASH_5752VENDOR_ST_M45PE20:
8074                 case FLASH_5752VENDOR_ST_M45PE40:
8075                         tp->nvram_jedecnum = JEDEC_ST;
8076                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8077                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8078                         break;
8079         }
8080
8081         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8082                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8083                         case FLASH_5752PAGE_SIZE_256:
8084                                 tp->nvram_pagesize = 256;
8085                                 break;
8086                         case FLASH_5752PAGE_SIZE_512:
8087                                 tp->nvram_pagesize = 512;
8088                                 break;
8089                         case FLASH_5752PAGE_SIZE_1K:
8090                                 tp->nvram_pagesize = 1024;
8091                                 break;
8092                         case FLASH_5752PAGE_SIZE_2K:
8093                                 tp->nvram_pagesize = 2048;
8094                                 break;
8095                         case FLASH_5752PAGE_SIZE_4K:
8096                                 tp->nvram_pagesize = 4096;
8097                                 break;
8098                         case FLASH_5752PAGE_SIZE_264:
8099                                 tp->nvram_pagesize = 264;
8100                                 break;
8101                 }
8102         }
8103         else {
8104                 /* For eeprom, set pagesize to maximum eeprom size */
8105                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8106
8107                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8108                 tw32(NVRAM_CFG1, nvcfg1);
8109         }
8110 }
8111
8112 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8113 static void __devinit tg3_nvram_init(struct tg3 *tp)
8114 {
8115         int j;
8116
8117         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8118                 return;
8119
8120         tw32_f(GRC_EEPROM_ADDR,
8121              (EEPROM_ADDR_FSM_RESET |
8122               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8123                EEPROM_ADDR_CLKPERD_SHIFT)));
8124
8125         /* XXX schedule_timeout() ... */
8126         for (j = 0; j < 100; j++)
8127                 udelay(10);
8128
8129         /* Enable seeprom accesses. */
8130         tw32_f(GRC_LOCAL_CTRL,
8131              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8132         udelay(100);
8133
8134         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8136                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8137
8138                 tg3_enable_nvram_access(tp);
8139
8140                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8141                         tg3_get_5752_nvram_info(tp);
8142                 else
8143                         tg3_get_nvram_info(tp);
8144
8145                 tg3_get_nvram_size(tp);
8146
8147                 tg3_disable_nvram_access(tp);
8148
8149         } else {
8150                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8151
8152                 tg3_get_eeprom_size(tp);
8153         }
8154 }
8155
8156 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8157                                         u32 offset, u32 *val)
8158 {
8159         u32 tmp;
8160         int i;
8161
8162         if (offset > EEPROM_ADDR_ADDR_MASK ||
8163             (offset % 4) != 0)
8164                 return -EINVAL;
8165
8166         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8167                                         EEPROM_ADDR_DEVID_MASK |
8168                                         EEPROM_ADDR_READ);
8169         tw32(GRC_EEPROM_ADDR,
8170              tmp |
8171              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8172              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8173               EEPROM_ADDR_ADDR_MASK) |
8174              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8175
8176         for (i = 0; i < 10000; i++) {
8177                 tmp = tr32(GRC_EEPROM_ADDR);
8178
8179                 if (tmp & EEPROM_ADDR_COMPLETE)
8180                         break;
8181                 udelay(100);
8182         }
8183         if (!(tmp & EEPROM_ADDR_COMPLETE))
8184                 return -EBUSY;
8185
8186         *val = tr32(GRC_EEPROM_DATA);
8187         return 0;
8188 }
8189
8190 #define NVRAM_CMD_TIMEOUT 10000
8191
8192 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8193 {
8194         int i;
8195
8196         tw32(NVRAM_CMD, nvram_cmd);
8197         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8198                 udelay(10);
8199                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8200                         udelay(10);
8201                         break;
8202                 }
8203         }
8204         if (i == NVRAM_CMD_TIMEOUT) {
8205                 return -EBUSY;
8206         }
8207         return 0;
8208 }
8209
8210 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8211 {
8212         int ret;
8213
8214         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8215                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8216                 return -EINVAL;
8217         }
8218
8219         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8220                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8221
8222         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8223                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8224                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8225
8226                 offset = ((offset / tp->nvram_pagesize) <<
8227                           ATMEL_AT45DB0X1B_PAGE_POS) +
8228                         (offset % tp->nvram_pagesize);
8229         }
8230
8231         if (offset > NVRAM_ADDR_MSK)
8232                 return -EINVAL;
8233
8234         tg3_nvram_lock(tp);
8235
8236         tg3_enable_nvram_access(tp);
8237
8238         tw32(NVRAM_ADDR, offset);
8239         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8240                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8241
8242         if (ret == 0)
8243                 *val = swab32(tr32(NVRAM_RDDATA));
8244
8245         tg3_nvram_unlock(tp);
8246
8247         tg3_disable_nvram_access(tp);
8248
8249         return ret;
8250 }
8251
8252 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8253                                     u32 offset, u32 len, u8 *buf)
8254 {
8255         int i, j, rc = 0;
8256         u32 val;
8257
8258         for (i = 0; i < len; i += 4) {
8259                 u32 addr, data;
8260
8261                 addr = offset + i;
8262
8263                 memcpy(&data, buf + i, 4);
8264
8265                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8266
8267                 val = tr32(GRC_EEPROM_ADDR);
8268                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8269
8270                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8271                         EEPROM_ADDR_READ);
8272                 tw32(GRC_EEPROM_ADDR, val |
8273                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8274                         (addr & EEPROM_ADDR_ADDR_MASK) |
8275                         EEPROM_ADDR_START |
8276                         EEPROM_ADDR_WRITE);
8277                 
8278                 for (j = 0; j < 10000; j++) {
8279                         val = tr32(GRC_EEPROM_ADDR);
8280
8281                         if (val & EEPROM_ADDR_COMPLETE)
8282                                 break;
8283                         udelay(100);
8284                 }
8285                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8286                         rc = -EBUSY;
8287                         break;
8288                 }
8289         }
8290
8291         return rc;
8292 }
8293
8294 /* offset and length are dword aligned */
8295 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8296                 u8 *buf)
8297 {
8298         int ret = 0;
8299         u32 pagesize = tp->nvram_pagesize;
8300         u32 pagemask = pagesize - 1;
8301         u32 nvram_cmd;
8302         u8 *tmp;
8303
8304         tmp = kmalloc(pagesize, GFP_KERNEL);
8305         if (tmp == NULL)
8306                 return -ENOMEM;
8307
8308         while (len) {
8309                 int j;
8310                 u32 phy_addr, page_off, size;
8311
8312                 phy_addr = offset & ~pagemask;
8313         
8314                 for (j = 0; j < pagesize; j += 4) {
8315                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8316                                                 (u32 *) (tmp + j))))
8317                                 break;
8318                 }
8319                 if (ret)
8320                         break;
8321
8322                 page_off = offset & pagemask;
8323                 size = pagesize;
8324                 if (len < size)
8325                         size = len;
8326
8327                 len -= size;
8328
8329                 memcpy(tmp + page_off, buf, size);
8330
8331                 offset = offset + (pagesize - page_off);
8332
8333                 tg3_enable_nvram_access(tp);
8334
8335                 /*
8336                  * Before we can erase the flash page, we need
8337                  * to issue a special "write enable" command.
8338                  */
8339                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8340
8341                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8342                         break;
8343
8344                 /* Erase the target page */
8345                 tw32(NVRAM_ADDR, phy_addr);
8346
8347                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8348                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8349
8350                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8351                         break;
8352
8353                 /* Issue another write enable to start the write. */
8354                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8355
8356                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8357                         break;
8358
8359                 for (j = 0; j < pagesize; j += 4) {
8360                         u32 data;
8361
8362                         data = *((u32 *) (tmp + j));
8363                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8364
8365                         tw32(NVRAM_ADDR, phy_addr + j);
8366
8367                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8368                                 NVRAM_CMD_WR;
8369
8370                         if (j == 0)
8371                                 nvram_cmd |= NVRAM_CMD_FIRST;
8372                         else if (j == (pagesize - 4))
8373                                 nvram_cmd |= NVRAM_CMD_LAST;
8374
8375                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8376                                 break;
8377                 }
8378                 if (ret)
8379                         break;
8380         }
8381
8382         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8383         tg3_nvram_exec_cmd(tp, nvram_cmd);
8384
8385         kfree(tmp);
8386
8387         return ret;
8388 }
8389
8390 /* offset and length are dword aligned */
8391 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8392                 u8 *buf)
8393 {
8394         int i, ret = 0;
8395
8396         for (i = 0; i < len; i += 4, offset += 4) {
8397                 u32 data, page_off, phy_addr, nvram_cmd;
8398
8399                 memcpy(&data, buf + i, 4);
8400                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8401
8402                 page_off = offset % tp->nvram_pagesize;
8403
8404                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8405                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8406
8407                         phy_addr = ((offset / tp->nvram_pagesize) <<
8408                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8409                 }
8410                 else {
8411                         phy_addr = offset;
8412                 }
8413
8414                 tw32(NVRAM_ADDR, phy_addr);
8415
8416                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8417
8418                 if ((page_off == 0) || (i == 0))
8419                         nvram_cmd |= NVRAM_CMD_FIRST;
8420                 else if (page_off == (tp->nvram_pagesize - 4))
8421                         nvram_cmd |= NVRAM_CMD_LAST;
8422
8423                 if (i == (len - 4))
8424                         nvram_cmd |= NVRAM_CMD_LAST;
8425
8426                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8427                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8428
8429                         if ((ret = tg3_nvram_exec_cmd(tp,
8430                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8431                                 NVRAM_CMD_DONE)))
8432
8433                                 break;
8434                 }
8435                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8436                         /* We always do complete word writes to eeprom. */
8437                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8438                 }
8439
8440                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8441                         break;
8442         }
8443         return ret;
8444 }
8445
8446 /* offset and length are dword aligned */
8447 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8448 {
8449         int ret;
8450
8451         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8452                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8453                 return -EINVAL;
8454         }
8455
8456         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8457                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8458                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8459                 udelay(40);
8460         }
8461
8462         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8463                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8464         }
8465         else {
8466                 u32 grc_mode;
8467
8468                 tg3_nvram_lock(tp);
8469
8470                 tg3_enable_nvram_access(tp);
8471                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8472                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8473                         tw32(NVRAM_WRITE1, 0x406);
8474
8475                 grc_mode = tr32(GRC_MODE);
8476                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8477
8478                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8479                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8480
8481                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8482                                 buf);
8483                 }
8484                 else {
8485                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8486                                 buf);
8487                 }
8488
8489                 grc_mode = tr32(GRC_MODE);
8490                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8491
8492                 tg3_disable_nvram_access(tp);
8493                 tg3_nvram_unlock(tp);
8494         }
8495
8496         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8497                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8498                 udelay(40);
8499         }
8500
8501         return ret;
8502 }
8503
8504 struct subsys_tbl_ent {
8505         u16 subsys_vendor, subsys_devid;
8506         u32 phy_id;
8507 };
8508
8509 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8510         /* Broadcom boards. */
8511         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8512         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8513         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8514         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8515         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8516         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8517         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8518         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8519         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8520         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8521         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8522
8523         /* 3com boards. */
8524         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8525         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8526         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8527         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8528         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8529
8530         /* DELL boards. */
8531         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8532         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8533         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8534         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8535
8536         /* Compaq boards. */
8537         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8538         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8539         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8540         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8541         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8542
8543         /* IBM boards. */
8544         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8545 };
8546
8547 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8548 {
8549         int i;
8550
8551         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8552                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8553                      tp->pdev->subsystem_vendor) &&
8554                     (subsys_id_to_phy_id[i].subsys_devid ==
8555                      tp->pdev->subsystem_device))
8556                         return &subsys_id_to_phy_id[i];
8557         }
8558         return NULL;
8559 }
8560
8561 /* Since this function may be called in D3-hot power state during
8562  * tg3_init_one(), only config cycles are allowed.
8563  */
8564 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8565 {
8566         u32 val;
8567
8568         /* Make sure register accesses (indirect or otherwise)
8569          * will function correctly.
8570          */
8571         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8572                                tp->misc_host_ctrl);
8573
8574         tp->phy_id = PHY_ID_INVALID;
8575         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8576
8577         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8578         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8579                 u32 nic_cfg, led_cfg;
8580                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8581                 int eeprom_phy_serdes = 0;
8582
8583                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8584                 tp->nic_sram_data_cfg = nic_cfg;
8585
8586                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8587                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8588                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8589                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8590                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8591                     (ver > 0) && (ver < 0x100))
8592                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8593
8594                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8595                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8596                         eeprom_phy_serdes = 1;
8597
8598                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8599                 if (nic_phy_id != 0) {
8600                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8601                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8602
8603                         eeprom_phy_id  = (id1 >> 16) << 10;
8604                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8605                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8606                 } else
8607                         eeprom_phy_id = 0;
8608
8609                 tp->phy_id = eeprom_phy_id;
8610                 if (eeprom_phy_serdes)
8611                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8612
8613                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8614                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8615                                     SHASTA_EXT_LED_MODE_MASK);
8616                 else
8617                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8618
8619                 switch (led_cfg) {
8620                 default:
8621                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8622                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8623                         break;
8624
8625                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8626                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8627                         break;
8628
8629                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8630                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8631
8632                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8633                          * read on some older 5700/5701 bootcode.
8634                          */
8635                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8636                             ASIC_REV_5700 ||
8637                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8638                             ASIC_REV_5701)
8639                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8640
8641                         break;
8642
8643                 case SHASTA_EXT_LED_SHARED:
8644                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8645                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8646                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8647                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8648                                                  LED_CTRL_MODE_PHY_2);
8649                         break;
8650
8651                 case SHASTA_EXT_LED_MAC:
8652                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8653                         break;
8654
8655                 case SHASTA_EXT_LED_COMBO:
8656                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8657                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8658                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8659                                                  LED_CTRL_MODE_PHY_2);
8660                         break;
8661
8662                 };
8663
8664                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8665                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8666                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8667                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8668
8669                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8670                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8671                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8672                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8673
8674                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8675                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8676                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8677                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8678                 }
8679                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8680                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8681
8682                 if (cfg2 & (1 << 17))
8683                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8684
8685                 /* serdes signal pre-emphasis in register 0x590 set by */
8686                 /* bootcode if bit 18 is set */
8687                 if (cfg2 & (1 << 18))
8688                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8689         }
8690 }
8691
8692 static int __devinit tg3_phy_probe(struct tg3 *tp)
8693 {
8694         u32 hw_phy_id_1, hw_phy_id_2;
8695         u32 hw_phy_id, hw_phy_id_masked;
8696         int err;
8697
8698         /* Reading the PHY ID register can conflict with ASF
8699          * firwmare access to the PHY hardware.
8700          */
8701         err = 0;
8702         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8703                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8704         } else {
8705                 /* Now read the physical PHY_ID from the chip and verify
8706                  * that it is sane.  If it doesn't look good, we fall back
8707                  * to either the hard-coded table based PHY_ID and failing
8708                  * that the value found in the eeprom area.
8709                  */
8710                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8711                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8712
8713                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8714                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8715                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8716
8717                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8718         }
8719
8720         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8721                 tp->phy_id = hw_phy_id;
8722                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8723                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8724         } else {
8725                 if (tp->phy_id != PHY_ID_INVALID) {
8726                         /* Do nothing, phy ID already set up in
8727                          * tg3_get_eeprom_hw_cfg().
8728                          */
8729                 } else {
8730                         struct subsys_tbl_ent *p;
8731
8732                         /* No eeprom signature?  Try the hardcoded
8733                          * subsys device table.
8734                          */
8735                         p = lookup_by_subsys(tp);
8736                         if (!p)
8737                                 return -ENODEV;
8738
8739                         tp->phy_id = p->phy_id;
8740                         if (!tp->phy_id ||
8741                             tp->phy_id == PHY_ID_BCM8002)
8742                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8743                 }
8744         }
8745
8746         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8747             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8748                 u32 bmsr, adv_reg, tg3_ctrl;
8749
8750                 tg3_readphy(tp, MII_BMSR, &bmsr);
8751                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8752                     (bmsr & BMSR_LSTATUS))
8753                         goto skip_phy_reset;
8754                     
8755                 err = tg3_phy_reset(tp);
8756                 if (err)
8757                         return err;
8758
8759                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8760                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8761                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8762                 tg3_ctrl = 0;
8763                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8764                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8765                                     MII_TG3_CTRL_ADV_1000_FULL);
8766                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8767                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8768                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8769                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8770                 }
8771
8772                 if (!tg3_copper_is_advertising_all(tp)) {
8773                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8774
8775                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8776                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8777
8778                         tg3_writephy(tp, MII_BMCR,
8779                                      BMCR_ANENABLE | BMCR_ANRESTART);
8780                 }
8781                 tg3_phy_set_wirespeed(tp);
8782
8783                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8784                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8785                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8786         }
8787
8788 skip_phy_reset:
8789         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8790                 err = tg3_init_5401phy_dsp(tp);
8791                 if (err)
8792                         return err;
8793         }
8794
8795         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8796                 err = tg3_init_5401phy_dsp(tp);
8797         }
8798
8799         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8800                 tp->link_config.advertising =
8801                         (ADVERTISED_1000baseT_Half |
8802                          ADVERTISED_1000baseT_Full |
8803                          ADVERTISED_Autoneg |
8804                          ADVERTISED_FIBRE);
8805         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8806                 tp->link_config.advertising &=
8807                         ~(ADVERTISED_1000baseT_Half |
8808                           ADVERTISED_1000baseT_Full);
8809
8810         return err;
8811 }
8812
8813 static void __devinit tg3_read_partno(struct tg3 *tp)
8814 {
8815         unsigned char vpd_data[256];
8816         int i;
8817
8818         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8819                 /* Sun decided not to put the necessary bits in the
8820                  * NVRAM of their onboard tg3 parts :(
8821                  */
8822                 strcpy(tp->board_part_number, "Sun 570X");
8823                 return;
8824         }
8825
8826         for (i = 0; i < 256; i += 4) {
8827                 u32 tmp;
8828
8829                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8830                         goto out_not_found;
8831
8832                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8833                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8834                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8835                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8836         }
8837
8838         /* Now parse and find the part number. */
8839         for (i = 0; i < 256; ) {
8840                 unsigned char val = vpd_data[i];
8841                 int block_end;
8842
8843                 if (val == 0x82 || val == 0x91) {
8844                         i = (i + 3 +
8845                              (vpd_data[i + 1] +
8846                               (vpd_data[i + 2] << 8)));
8847                         continue;
8848                 }
8849
8850                 if (val != 0x90)
8851                         goto out_not_found;
8852
8853                 block_end = (i + 3 +
8854                              (vpd_data[i + 1] +
8855                               (vpd_data[i + 2] << 8)));
8856                 i += 3;
8857                 while (i < block_end) {
8858                         if (vpd_data[i + 0] == 'P' &&
8859                             vpd_data[i + 1] == 'N') {
8860                                 int partno_len = vpd_data[i + 2];
8861
8862                                 if (partno_len > 24)
8863                                         goto out_not_found;
8864
8865                                 memcpy(tp->board_part_number,
8866                                        &vpd_data[i + 3],
8867                                        partno_len);
8868
8869                                 /* Success. */
8870                                 return;
8871                         }
8872                 }
8873
8874                 /* Part number not found. */
8875                 goto out_not_found;
8876         }
8877
8878 out_not_found:
8879         strcpy(tp->board_part_number, "none");
8880 }
8881
8882 #ifdef CONFIG_SPARC64
8883 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8884 {
8885         struct pci_dev *pdev = tp->pdev;
8886         struct pcidev_cookie *pcp = pdev->sysdata;
8887
8888         if (pcp != NULL) {
8889                 int node = pcp->prom_node;
8890                 u32 venid;
8891                 int err;
8892
8893                 err = prom_getproperty(node, "subsystem-vendor-id",
8894                                        (char *) &venid, sizeof(venid));
8895                 if (err == 0 || err == -1)
8896                         return 0;
8897                 if (venid == PCI_VENDOR_ID_SUN)
8898                         return 1;
8899         }
8900         return 0;
8901 }
8902 #endif
8903
8904 static int __devinit tg3_get_invariants(struct tg3 *tp)
8905 {
8906         static struct pci_device_id write_reorder_chipsets[] = {
8907                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8908                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8909                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8910                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8911                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8912                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8913                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8914                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8915                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8916                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8917                 { },
8918         };
8919         u32 misc_ctrl_reg;
8920         u32 cacheline_sz_reg;
8921         u32 pci_state_reg, grc_misc_cfg;
8922         u32 val;
8923         u16 pci_cmd;
8924         int err;
8925
8926 #ifdef CONFIG_SPARC64
8927         if (tg3_is_sun_570X(tp))
8928                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8929 #endif
8930
8931         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8932          * reordering to the mailbox registers done by the host
8933          * controller can cause major troubles.  We read back from
8934          * every mailbox register write to force the writes to be
8935          * posted to the chip in order.
8936          */
8937         if (pci_dev_present(write_reorder_chipsets))
8938                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8939
8940         /* Force memory write invalidate off.  If we leave it on,
8941          * then on 5700_BX chips we have to enable a workaround.
8942          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8943          * to match the cacheline size.  The Broadcom driver have this
8944          * workaround but turns MWI off all the times so never uses
8945          * it.  This seems to suggest that the workaround is insufficient.
8946          */
8947         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8948         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8949         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8950
8951         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8952          * has the register indirect write enable bit set before
8953          * we try to access any of the MMIO registers.  It is also
8954          * critical that the PCI-X hw workaround situation is decided
8955          * before that as well.
8956          */
8957         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8958                               &misc_ctrl_reg);
8959
8960         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8961                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8962
8963         /* Wrong chip ID in 5752 A0. This code can be removed later
8964          * as A0 is not in production.
8965          */
8966         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8967                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8968
8969         /* Find msi capability. */
8970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8971                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
8972
8973         /* Initialize misc host control in PCI block. */
8974         tp->misc_host_ctrl |= (misc_ctrl_reg &
8975                                MISC_HOST_CTRL_CHIPREV);
8976         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8977                                tp->misc_host_ctrl);
8978
8979         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8980                               &cacheline_sz_reg);
8981
8982         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8983         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8984         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8985         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8986
8987         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8988             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8990                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8991
8992         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8993             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8994                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8995
8996         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8997                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8998
8999         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9000             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9001             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9002                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9003
9004         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9005                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9006
9007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9008             tp->pci_lat_timer < 64) {
9009                 tp->pci_lat_timer = 64;
9010
9011                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9012                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9013                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9014                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9015
9016                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9017                                        cacheline_sz_reg);
9018         }
9019
9020         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9021                               &pci_state_reg);
9022
9023         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9024                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9025
9026                 /* If this is a 5700 BX chipset, and we are in PCI-X
9027                  * mode, enable register write workaround.
9028                  *
9029                  * The workaround is to use indirect register accesses
9030                  * for all chip writes not to mailbox registers.
9031                  */
9032                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9033                         u32 pm_reg;
9034                         u16 pci_cmd;
9035
9036                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9037
9038                         /* The chip can have it's power management PCI config
9039                          * space registers clobbered due to this bug.
9040                          * So explicitly force the chip into D0 here.
9041                          */
9042                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9043                                               &pm_reg);
9044                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9045                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9046                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9047                                                pm_reg);
9048
9049                         /* Also, force SERR#/PERR# in PCI command. */
9050                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9051                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9052                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9053                 }
9054         }
9055
9056         /* Back to back register writes can cause problems on this chip,
9057          * the workaround is to read back all reg writes except those to
9058          * mailbox regs.  See tg3_write_indirect_reg32().
9059          *
9060          * PCI Express 5750_A0 rev chips need this workaround too.
9061          */
9062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9063             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9064              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9065                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9066
9067         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9068                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9069         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9070                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9071
9072         /* Chip-specific fixup from Broadcom driver */
9073         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9074             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9075                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9076                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9077         }
9078
9079         /* Get eeprom hw config before calling tg3_set_power_state().
9080          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9081          * determined before calling tg3_set_power_state() so that
9082          * we know whether or not to switch out of Vaux power.
9083          * When the flag is set, it means that GPIO1 is used for eeprom
9084          * write protect and also implies that it is a LOM where GPIOs
9085          * are not used to switch power.
9086          */ 
9087         tg3_get_eeprom_hw_cfg(tp);
9088
9089         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9090          * GPIO1 driven high will bring 5700's external PHY out of reset.
9091          * It is also used as eeprom write protect on LOMs.
9092          */
9093         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9094         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9095             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9096                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9097                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9098         /* Unused GPIO3 must be driven as output on 5752 because there
9099          * are no pull-up resistors on unused GPIO pins.
9100          */
9101         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9102                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9103
9104         /* Force the chip into D0. */
9105         err = tg3_set_power_state(tp, 0);
9106         if (err) {
9107                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9108                        pci_name(tp->pdev));
9109                 return err;
9110         }
9111
9112         /* 5700 B0 chips do not support checksumming correctly due
9113          * to hardware bugs.
9114          */
9115         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9116                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9117
9118         /* Pseudo-header checksum is done by hardware logic and not
9119          * the offload processers, so make the chip do the pseudo-
9120          * header checksums on receive.  For transmit it is more
9121          * convenient to do the pseudo-header checksum in software
9122          * as Linux does that on transmit for us in all cases.
9123          */
9124         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9125         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9126
9127         /* Derive initial jumbo mode from MTU assigned in
9128          * ether_setup() via the alloc_etherdev() call
9129          */
9130         if (tp->dev->mtu > ETH_DATA_LEN &&
9131             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9132                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9133
9134         /* Determine WakeOnLan speed to use. */
9135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9136             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9137             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9138             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9139                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9140         } else {
9141                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9142         }
9143
9144         /* A few boards don't want Ethernet@WireSpeed phy feature */
9145         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9146             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9147              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9148              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
9149                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9150
9151         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9152             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9153                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9154         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9155                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9156
9157         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9158                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9159
9160         tp->coalesce_mode = 0;
9161         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9162             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9163                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9164
9165         /* Initialize MAC MI mode, polling disabled. */
9166         tw32_f(MAC_MI_MODE, tp->mi_mode);
9167         udelay(80);
9168
9169         /* Initialize data/descriptor byte/word swapping. */
9170         val = tr32(GRC_MODE);
9171         val &= GRC_MODE_HOST_STACKUP;
9172         tw32(GRC_MODE, val | tp->grc_mode);
9173
9174         tg3_switch_clocks(tp);
9175
9176         /* Clear this out for sanity. */
9177         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9178
9179         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9180                               &pci_state_reg);
9181         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9182             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9183                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9184
9185                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9186                     chiprevid == CHIPREV_ID_5701_B0 ||
9187                     chiprevid == CHIPREV_ID_5701_B2 ||
9188                     chiprevid == CHIPREV_ID_5701_B5) {
9189                         void __iomem *sram_base;
9190
9191                         /* Write some dummy words into the SRAM status block
9192                          * area, see if it reads back correctly.  If the return
9193                          * value is bad, force enable the PCIX workaround.
9194                          */
9195                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9196
9197                         writel(0x00000000, sram_base);
9198                         writel(0x00000000, sram_base + 4);
9199                         writel(0xffffffff, sram_base + 4);
9200                         if (readl(sram_base) != 0x00000000)
9201                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9202                 }
9203         }
9204
9205         udelay(50);
9206         tg3_nvram_init(tp);
9207
9208         grc_misc_cfg = tr32(GRC_MISC_CFG);
9209         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9210
9211         /* Broadcom's driver says that CIOBE multisplit has a bug */
9212 #if 0
9213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9214             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9215                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9216                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9217         }
9218 #endif
9219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9220             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9221              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9222                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9223
9224         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9225             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9226                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9227         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9228                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9229                                       HOSTCC_MODE_CLRTICK_TXBD);
9230
9231                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9232                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9233                                        tp->misc_host_ctrl);
9234         }
9235
9236         /* these are limited to 10/100 only */
9237         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9238              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9239             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9240              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9241              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9242               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9243               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9244             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9245              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9246               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9247                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9248
9249         err = tg3_phy_probe(tp);
9250         if (err) {
9251                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9252                        pci_name(tp->pdev), err);
9253                 /* ... but do not return immediately ... */
9254         }
9255
9256         tg3_read_partno(tp);
9257
9258         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9259                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9260         } else {
9261                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9262                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9263                 else
9264                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9265         }
9266
9267         /* 5700 {AX,BX} chips have a broken status block link
9268          * change bit implementation, so we must use the
9269          * status register in those cases.
9270          */
9271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9272                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9273         else
9274                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9275
9276         /* The led_ctrl is set during tg3_phy_probe, here we might
9277          * have to force the link status polling mechanism based
9278          * upon subsystem IDs.
9279          */
9280         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9281             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9282                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9283                                   TG3_FLAG_USE_LINKCHG_REG);
9284         }
9285
9286         /* For all SERDES we poll the MAC status register. */
9287         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9288                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9289         else
9290                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9291
9292         /* 5700 BX chips need to have their TX producer index mailboxes
9293          * written twice to workaround a bug.
9294          */
9295         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9296                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9297         else
9298                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9299
9300         /* It seems all chips can get confused if TX buffers
9301          * straddle the 4GB address boundary in some cases.
9302          */
9303         tp->dev->hard_start_xmit = tg3_start_xmit;
9304
9305         tp->rx_offset = 2;
9306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9307             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9308                 tp->rx_offset = 0;
9309
9310         /* By default, disable wake-on-lan.  User can change this
9311          * using ETHTOOL_SWOL.
9312          */
9313         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9314
9315         return err;
9316 }
9317
9318 #ifdef CONFIG_SPARC64
9319 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9320 {
9321         struct net_device *dev = tp->dev;
9322         struct pci_dev *pdev = tp->pdev;
9323         struct pcidev_cookie *pcp = pdev->sysdata;
9324
9325         if (pcp != NULL) {
9326                 int node = pcp->prom_node;
9327
9328                 if (prom_getproplen(node, "local-mac-address") == 6) {
9329                         prom_getproperty(node, "local-mac-address",
9330                                          dev->dev_addr, 6);
9331                         return 0;
9332                 }
9333         }
9334         return -ENODEV;
9335 }
9336
9337 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9338 {
9339         struct net_device *dev = tp->dev;
9340
9341         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9342         return 0;
9343 }
9344 #endif
9345
9346 static int __devinit tg3_get_device_address(struct tg3 *tp)
9347 {
9348         struct net_device *dev = tp->dev;
9349         u32 hi, lo, mac_offset;
9350
9351 #ifdef CONFIG_SPARC64
9352         if (!tg3_get_macaddr_sparc(tp))
9353                 return 0;
9354 #endif
9355
9356         mac_offset = 0x7c;
9357         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9358              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9360                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9361                         mac_offset = 0xcc;
9362                 if (tg3_nvram_lock(tp))
9363                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9364                 else
9365                         tg3_nvram_unlock(tp);
9366         }
9367
9368         /* First try to get it from MAC address mailbox. */
9369         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9370         if ((hi >> 16) == 0x484b) {
9371                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9372                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9373
9374                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9375                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9376                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9377                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9378                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9379         }
9380         /* Next, try NVRAM. */
9381         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9382                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9383                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9384                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9385                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9386                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9387                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9388                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9389                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9390         }
9391         /* Finally just fetch it out of the MAC control regs. */
9392         else {
9393                 hi = tr32(MAC_ADDR_0_HIGH);
9394                 lo = tr32(MAC_ADDR_0_LOW);
9395
9396                 dev->dev_addr[5] = lo & 0xff;
9397                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9398                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9399                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9400                 dev->dev_addr[1] = hi & 0xff;
9401                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9402         }
9403
9404         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9405 #ifdef CONFIG_SPARC64
9406                 if (!tg3_get_default_macaddr_sparc(tp))
9407                         return 0;
9408 #endif
9409                 return -EINVAL;
9410         }
9411         return 0;
9412 }
9413
9414 #define BOUNDARY_SINGLE_CACHELINE       1
9415 #define BOUNDARY_MULTI_CACHELINE        2
9416
9417 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9418 {
9419         int cacheline_size;
9420         u8 byte;
9421         int goal;
9422
9423         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9424         if (byte == 0)
9425                 cacheline_size = 1024;
9426         else
9427                 cacheline_size = (int) byte * 4;
9428
9429         /* On 5703 and later chips, the boundary bits have no
9430          * effect.
9431          */
9432         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9433             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9434             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9435                 goto out;
9436
9437 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9438         goal = BOUNDARY_MULTI_CACHELINE;
9439 #else
9440 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9441         goal = BOUNDARY_SINGLE_CACHELINE;
9442 #else
9443         goal = 0;
9444 #endif
9445 #endif
9446
9447         if (!goal)
9448                 goto out;
9449
9450         /* PCI controllers on most RISC systems tend to disconnect
9451          * when a device tries to burst across a cache-line boundary.
9452          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9453          *
9454          * Unfortunately, for PCI-E there are only limited
9455          * write-side controls for this, and thus for reads
9456          * we will still get the disconnects.  We'll also waste
9457          * these PCI cycles for both read and write for chips
9458          * other than 5700 and 5701 which do not implement the
9459          * boundary bits.
9460          */
9461         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9462             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9463                 switch (cacheline_size) {
9464                 case 16:
9465                 case 32:
9466                 case 64:
9467                 case 128:
9468                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9469                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9470                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9471                         } else {
9472                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9473                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9474                         }
9475                         break;
9476
9477                 case 256:
9478                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9479                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9480                         break;
9481
9482                 default:
9483                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9484                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9485                         break;
9486                 };
9487         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9488                 switch (cacheline_size) {
9489                 case 16:
9490                 case 32:
9491                 case 64:
9492                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9493                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9494                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9495                                 break;
9496                         }
9497                         /* fallthrough */
9498                 case 128:
9499                 default:
9500                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9501                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9502                         break;
9503                 };
9504         } else {
9505                 switch (cacheline_size) {
9506                 case 16:
9507                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9508                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9509                                         DMA_RWCTRL_WRITE_BNDRY_16);
9510                                 break;
9511                         }
9512                         /* fallthrough */
9513                 case 32:
9514                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9515                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9516                                         DMA_RWCTRL_WRITE_BNDRY_32);
9517                                 break;
9518                         }
9519                         /* fallthrough */
9520                 case 64:
9521                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9522                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9523                                         DMA_RWCTRL_WRITE_BNDRY_64);
9524                                 break;
9525                         }
9526                         /* fallthrough */
9527                 case 128:
9528                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9529                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9530                                         DMA_RWCTRL_WRITE_BNDRY_128);
9531                                 break;
9532                         }
9533                         /* fallthrough */
9534                 case 256:
9535                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9536                                 DMA_RWCTRL_WRITE_BNDRY_256);
9537                         break;
9538                 case 512:
9539                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9540                                 DMA_RWCTRL_WRITE_BNDRY_512);
9541                         break;
9542                 case 1024:
9543                 default:
9544                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9545                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9546                         break;
9547                 };
9548         }
9549
9550 out:
9551         return val;
9552 }
9553
9554 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9555 {
9556         struct tg3_internal_buffer_desc test_desc;
9557         u32 sram_dma_descs;
9558         int i, ret;
9559
9560         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9561
9562         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9563         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9564         tw32(RDMAC_STATUS, 0);
9565         tw32(WDMAC_STATUS, 0);
9566
9567         tw32(BUFMGR_MODE, 0);
9568         tw32(FTQ_RESET, 0);
9569
9570         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9571         test_desc.addr_lo = buf_dma & 0xffffffff;
9572         test_desc.nic_mbuf = 0x00002100;
9573         test_desc.len = size;
9574
9575         /*
9576          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9577          * the *second* time the tg3 driver was getting loaded after an
9578          * initial scan.
9579          *
9580          * Broadcom tells me:
9581          *   ...the DMA engine is connected to the GRC block and a DMA
9582          *   reset may affect the GRC block in some unpredictable way...
9583          *   The behavior of resets to individual blocks has not been tested.
9584          *
9585          * Broadcom noted the GRC reset will also reset all sub-components.
9586          */
9587         if (to_device) {
9588                 test_desc.cqid_sqid = (13 << 8) | 2;
9589
9590                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9591                 udelay(40);
9592         } else {
9593                 test_desc.cqid_sqid = (16 << 8) | 7;
9594
9595                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9596                 udelay(40);
9597         }
9598         test_desc.flags = 0x00000005;
9599
9600         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9601                 u32 val;
9602
9603                 val = *(((u32 *)&test_desc) + i);
9604                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9605                                        sram_dma_descs + (i * sizeof(u32)));
9606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9607         }
9608         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9609
9610         if (to_device) {
9611                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9612         } else {
9613                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9614         }
9615
9616         ret = -ENODEV;
9617         for (i = 0; i < 40; i++) {
9618                 u32 val;
9619
9620                 if (to_device)
9621                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9622                 else
9623                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9624                 if ((val & 0xffff) == sram_dma_descs) {
9625                         ret = 0;
9626                         break;
9627                 }
9628
9629                 udelay(100);
9630         }
9631
9632         return ret;
9633 }
9634
9635 #define TEST_BUFFER_SIZE        0x2000
9636
9637 static int __devinit tg3_test_dma(struct tg3 *tp)
9638 {
9639         dma_addr_t buf_dma;
9640         u32 *buf, saved_dma_rwctrl;
9641         int ret;
9642
9643         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9644         if (!buf) {
9645                 ret = -ENOMEM;
9646                 goto out_nofree;
9647         }
9648
9649         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9650                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9651
9652         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9653
9654         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9655                 /* DMA read watermark not used on PCIE */
9656                 tp->dma_rwctrl |= 0x00180000;
9657         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9658                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9659                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9660                         tp->dma_rwctrl |= 0x003f0000;
9661                 else
9662                         tp->dma_rwctrl |= 0x003f000f;
9663         } else {
9664                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9665                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9666                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9667
9668                         if (ccval == 0x6 || ccval == 0x7)
9669                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9670
9671                         /* Set bit 23 to enable PCIX hw bug fix */
9672                         tp->dma_rwctrl |= 0x009f0000;
9673                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9674                         /* 5780 always in PCIX mode */
9675                         tp->dma_rwctrl |= 0x00144000;
9676                 } else {
9677                         tp->dma_rwctrl |= 0x001b000f;
9678                 }
9679         }
9680
9681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9683                 tp->dma_rwctrl &= 0xfffffff0;
9684
9685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9687                 /* Remove this if it causes problems for some boards. */
9688                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9689
9690                 /* On 5700/5701 chips, we need to set this bit.
9691                  * Otherwise the chip will issue cacheline transactions
9692                  * to streamable DMA memory with not all the byte
9693                  * enables turned on.  This is an error on several
9694                  * RISC PCI controllers, in particular sparc64.
9695                  *
9696                  * On 5703/5704 chips, this bit has been reassigned
9697                  * a different meaning.  In particular, it is used
9698                  * on those chips to enable a PCI-X workaround.
9699                  */
9700                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9701         }
9702
9703         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9704
9705 #if 0
9706         /* Unneeded, already done by tg3_get_invariants.  */
9707         tg3_switch_clocks(tp);
9708 #endif
9709
9710         ret = 0;
9711         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9712             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9713                 goto out;
9714
9715         /* It is best to perform DMA test with maximum write burst size
9716          * to expose the 5700/5701 write DMA bug.
9717          */
9718         saved_dma_rwctrl = tp->dma_rwctrl;
9719         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9720         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9721
9722         while (1) {
9723                 u32 *p = buf, i;
9724
9725                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9726                         p[i] = i;
9727
9728                 /* Send the buffer to the chip. */
9729                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9730                 if (ret) {
9731                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9732                         break;
9733                 }
9734
9735 #if 0
9736                 /* validate data reached card RAM correctly. */
9737                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9738                         u32 val;
9739                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
9740                         if (le32_to_cpu(val) != p[i]) {
9741                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
9742                                 /* ret = -ENODEV here? */
9743                         }
9744                         p[i] = 0;
9745                 }
9746 #endif
9747                 /* Now read it back. */
9748                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9749                 if (ret) {
9750                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9751
9752                         break;
9753                 }
9754
9755                 /* Verify it. */
9756                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9757                         if (p[i] == i)
9758                                 continue;
9759
9760                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9761                             DMA_RWCTRL_WRITE_BNDRY_16) {
9762                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9763                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9764                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9765                                 break;
9766                         } else {
9767                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9768                                 ret = -ENODEV;
9769                                 goto out;
9770                         }
9771                 }
9772
9773                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9774                         /* Success. */
9775                         ret = 0;
9776                         break;
9777                 }
9778         }
9779         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9780             DMA_RWCTRL_WRITE_BNDRY_16) {
9781                 static struct pci_device_id dma_wait_state_chipsets[] = {
9782                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9783                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9784                         { },
9785                 };
9786
9787                 /* DMA test passed without adjusting DMA boundary,
9788                  * now look for chipsets that are known to expose the
9789                  * DMA bug without failing the test.
9790                  */
9791                 if (pci_dev_present(dma_wait_state_chipsets)) {
9792                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9793                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9794                 }
9795                 else
9796                         /* Safe to use the calculated DMA boundary. */
9797                         tp->dma_rwctrl = saved_dma_rwctrl;
9798
9799                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9800         }
9801
9802 out:
9803         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9804 out_nofree:
9805         return ret;
9806 }
9807
9808 static void __devinit tg3_init_link_config(struct tg3 *tp)
9809 {
9810         tp->link_config.advertising =
9811                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9812                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9813                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9814                  ADVERTISED_Autoneg | ADVERTISED_MII);
9815         tp->link_config.speed = SPEED_INVALID;
9816         tp->link_config.duplex = DUPLEX_INVALID;
9817         tp->link_config.autoneg = AUTONEG_ENABLE;
9818         netif_carrier_off(tp->dev);
9819         tp->link_config.active_speed = SPEED_INVALID;
9820         tp->link_config.active_duplex = DUPLEX_INVALID;
9821         tp->link_config.phy_is_low_power = 0;
9822         tp->link_config.orig_speed = SPEED_INVALID;
9823         tp->link_config.orig_duplex = DUPLEX_INVALID;
9824         tp->link_config.orig_autoneg = AUTONEG_INVALID;
9825 }
9826
9827 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9828 {
9829         tp->bufmgr_config.mbuf_read_dma_low_water =
9830                 DEFAULT_MB_RDMA_LOW_WATER;
9831         tp->bufmgr_config.mbuf_mac_rx_low_water =
9832                 DEFAULT_MB_MACRX_LOW_WATER;
9833         tp->bufmgr_config.mbuf_high_water =
9834                 DEFAULT_MB_HIGH_WATER;
9835
9836         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9837                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9838         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9839                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9840         tp->bufmgr_config.mbuf_high_water_jumbo =
9841                 DEFAULT_MB_HIGH_WATER_JUMBO;
9842
9843         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9844         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9845 }
9846
9847 static char * __devinit tg3_phy_string(struct tg3 *tp)
9848 {
9849         switch (tp->phy_id & PHY_ID_MASK) {
9850         case PHY_ID_BCM5400:    return "5400";
9851         case PHY_ID_BCM5401:    return "5401";
9852         case PHY_ID_BCM5411:    return "5411";
9853         case PHY_ID_BCM5701:    return "5701";
9854         case PHY_ID_BCM5703:    return "5703";
9855         case PHY_ID_BCM5704:    return "5704";
9856         case PHY_ID_BCM5705:    return "5705";
9857         case PHY_ID_BCM5750:    return "5750";
9858         case PHY_ID_BCM5752:    return "5752";
9859         case PHY_ID_BCM5780:    return "5780";
9860         case PHY_ID_BCM8002:    return "8002/serdes";
9861         case 0:                 return "serdes";
9862         default:                return "unknown";
9863         };
9864 }
9865
9866 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9867 {
9868         struct pci_dev *peer;
9869         unsigned int func, devnr = tp->pdev->devfn & ~7;
9870
9871         for (func = 0; func < 8; func++) {
9872                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9873                 if (peer && peer != tp->pdev)
9874                         break;
9875                 pci_dev_put(peer);
9876         }
9877         if (!peer || peer == tp->pdev)
9878                 BUG();
9879
9880         /*
9881          * We don't need to keep the refcount elevated; there's no way
9882          * to remove one half of this device without removing the other
9883          */
9884         pci_dev_put(peer);
9885
9886         return peer;
9887 }
9888
9889 static void __devinit tg3_init_coal(struct tg3 *tp)
9890 {
9891         struct ethtool_coalesce *ec = &tp->coal;
9892
9893         memset(ec, 0, sizeof(*ec));
9894         ec->cmd = ETHTOOL_GCOALESCE;
9895         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9896         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9897         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9898         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9899         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9900         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9901         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9902         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9903         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9904
9905         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9906                                  HOSTCC_MODE_CLRTICK_TXBD)) {
9907                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9908                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9909                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9910                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9911         }
9912
9913         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9914                 ec->rx_coalesce_usecs_irq = 0;
9915                 ec->tx_coalesce_usecs_irq = 0;
9916                 ec->stats_block_coalesce_usecs = 0;
9917         }
9918 }
9919
9920 static int __devinit tg3_init_one(struct pci_dev *pdev,
9921                                   const struct pci_device_id *ent)
9922 {
9923         static int tg3_version_printed = 0;
9924         unsigned long tg3reg_base, tg3reg_len;
9925         struct net_device *dev;
9926         struct tg3 *tp;
9927         int i, err, pci_using_dac, pm_cap;
9928
9929         if (tg3_version_printed++ == 0)
9930                 printk(KERN_INFO "%s", version);
9931
9932         err = pci_enable_device(pdev);
9933         if (err) {
9934                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9935                        "aborting.\n");
9936                 return err;
9937         }
9938
9939         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9940                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9941                        "base address, aborting.\n");
9942                 err = -ENODEV;
9943                 goto err_out_disable_pdev;
9944         }
9945
9946         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9947         if (err) {
9948                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9949                        "aborting.\n");
9950                 goto err_out_disable_pdev;
9951         }
9952
9953         pci_set_master(pdev);
9954
9955         /* Find power-management capability. */
9956         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9957         if (pm_cap == 0) {
9958                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9959                        "aborting.\n");
9960                 err = -EIO;
9961                 goto err_out_free_res;
9962         }
9963
9964         /* Configure DMA attributes. */
9965         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9966         if (!err) {
9967                 pci_using_dac = 1;
9968                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9969                 if (err < 0) {
9970                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9971                                "for consistent allocations\n");
9972                         goto err_out_free_res;
9973                 }
9974         } else {
9975                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9976                 if (err) {
9977                         printk(KERN_ERR PFX "No usable DMA configuration, "
9978                                "aborting.\n");
9979                         goto err_out_free_res;
9980                 }
9981                 pci_using_dac = 0;
9982         }
9983
9984         tg3reg_base = pci_resource_start(pdev, 0);
9985         tg3reg_len = pci_resource_len(pdev, 0);
9986
9987         dev = alloc_etherdev(sizeof(*tp));
9988         if (!dev) {
9989                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9990                 err = -ENOMEM;
9991                 goto err_out_free_res;
9992         }
9993
9994         SET_MODULE_OWNER(dev);
9995         SET_NETDEV_DEV(dev, &pdev->dev);
9996
9997         if (pci_using_dac)
9998                 dev->features |= NETIF_F_HIGHDMA;
9999         dev->features |= NETIF_F_LLTX;
10000 #if TG3_VLAN_TAG_USED
10001         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10002         dev->vlan_rx_register = tg3_vlan_rx_register;
10003         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10004 #endif
10005
10006         tp = netdev_priv(dev);
10007         tp->pdev = pdev;
10008         tp->dev = dev;
10009         tp->pm_cap = pm_cap;
10010         tp->mac_mode = TG3_DEF_MAC_MODE;
10011         tp->rx_mode = TG3_DEF_RX_MODE;
10012         tp->tx_mode = TG3_DEF_TX_MODE;
10013         tp->mi_mode = MAC_MI_MODE_BASE;
10014         if (tg3_debug > 0)
10015                 tp->msg_enable = tg3_debug;
10016         else
10017                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10018
10019         /* The word/byte swap controls here control register access byte
10020          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10021          * setting below.
10022          */
10023         tp->misc_host_ctrl =
10024                 MISC_HOST_CTRL_MASK_PCI_INT |
10025                 MISC_HOST_CTRL_WORD_SWAP |
10026                 MISC_HOST_CTRL_INDIR_ACCESS |
10027                 MISC_HOST_CTRL_PCISTATE_RW;
10028
10029         /* The NONFRM (non-frame) byte/word swap controls take effect
10030          * on descriptor entries, anything which isn't packet data.
10031          *
10032          * The StrongARM chips on the board (one for tx, one for rx)
10033          * are running in big-endian mode.
10034          */
10035         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10036                         GRC_MODE_WSWAP_NONFRM_DATA);
10037 #ifdef __BIG_ENDIAN
10038         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10039 #endif
10040         spin_lock_init(&tp->lock);
10041         spin_lock_init(&tp->tx_lock);
10042         spin_lock_init(&tp->indirect_lock);
10043         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10044
10045         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10046         if (tp->regs == 0UL) {
10047                 printk(KERN_ERR PFX "Cannot map device registers, "
10048                        "aborting.\n");
10049                 err = -ENOMEM;
10050                 goto err_out_free_dev;
10051         }
10052
10053         tg3_init_link_config(tp);
10054
10055         tg3_init_bufmgr_config(tp);
10056
10057         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10058         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10059         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10060
10061         dev->open = tg3_open;
10062         dev->stop = tg3_close;
10063         dev->get_stats = tg3_get_stats;
10064         dev->set_multicast_list = tg3_set_rx_mode;
10065         dev->set_mac_address = tg3_set_mac_addr;
10066         dev->do_ioctl = tg3_ioctl;
10067         dev->tx_timeout = tg3_tx_timeout;
10068         dev->poll = tg3_poll;
10069         dev->ethtool_ops = &tg3_ethtool_ops;
10070         dev->weight = 64;
10071         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10072         dev->change_mtu = tg3_change_mtu;
10073         dev->irq = pdev->irq;
10074 #ifdef CONFIG_NET_POLL_CONTROLLER
10075         dev->poll_controller = tg3_poll_controller;
10076 #endif
10077
10078         err = tg3_get_invariants(tp);
10079         if (err) {
10080                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10081                        "aborting.\n");
10082                 goto err_out_iounmap;
10083         }
10084
10085         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10086                 tp->bufmgr_config.mbuf_read_dma_low_water =
10087                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10088                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10089                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10090                 tp->bufmgr_config.mbuf_high_water =
10091                         DEFAULT_MB_HIGH_WATER_5705;
10092         }
10093
10094 #if TG3_TSO_SUPPORT != 0
10095         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10096                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10097         }
10098         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10100             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10101             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10102                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10103         } else {
10104                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10105         }
10106
10107         /* TSO is off by default, user can enable using ethtool.  */
10108 #if 0
10109         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10110                 dev->features |= NETIF_F_TSO;
10111 #endif
10112
10113 #endif
10114
10115         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10116             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10117             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10118                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10119                 tp->rx_pending = 63;
10120         }
10121
10122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10123                 tp->pdev_peer = tg3_find_5704_peer(tp);
10124
10125         err = tg3_get_device_address(tp);
10126         if (err) {
10127                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10128                        "aborting.\n");
10129                 goto err_out_iounmap;
10130         }
10131
10132         /*
10133          * Reset chip in case UNDI or EFI driver did not shutdown
10134          * DMA self test will enable WDMAC and we'll see (spurious)
10135          * pending DMA on the PCI bus at that point.
10136          */
10137         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10138             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10139                 pci_save_state(tp->pdev);
10140                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10141                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10142         }
10143
10144         err = tg3_test_dma(tp);
10145         if (err) {
10146                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10147                 goto err_out_iounmap;
10148         }
10149
10150         /* Tigon3 can do ipv4 only... and some chips have buggy
10151          * checksumming.
10152          */
10153         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10154                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10155                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10156         } else
10157                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10158
10159         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10160                 dev->features &= ~NETIF_F_HIGHDMA;
10161
10162         /* flow control autonegotiation is default behavior */
10163         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10164
10165         tg3_init_coal(tp);
10166
10167         err = register_netdev(dev);
10168         if (err) {
10169                 printk(KERN_ERR PFX "Cannot register net device, "
10170                        "aborting.\n");
10171                 goto err_out_iounmap;
10172         }
10173
10174         pci_set_drvdata(pdev, dev);
10175
10176         /* Now that we have fully setup the chip, save away a snapshot
10177          * of the PCI config space.  We need to restore this after
10178          * GRC_MISC_CFG core clock resets and some resume events.
10179          */
10180         pci_save_state(tp->pdev);
10181
10182         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10183                dev->name,
10184                tp->board_part_number,
10185                tp->pci_chip_rev_id,
10186                tg3_phy_string(tp),
10187                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10188                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10189                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10190                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10191                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10192                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10193
10194         for (i = 0; i < 6; i++)
10195                 printk("%2.2x%c", dev->dev_addr[i],
10196                        i == 5 ? '\n' : ':');
10197
10198         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10199                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10200                "TSOcap[%d] \n",
10201                dev->name,
10202                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10203                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10204                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10205                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10206                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10207                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10208                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10209         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10210                dev->name, tp->dma_rwctrl);
10211
10212         return 0;
10213
10214 err_out_iounmap:
10215         iounmap(tp->regs);
10216
10217 err_out_free_dev:
10218         free_netdev(dev);
10219
10220 err_out_free_res:
10221         pci_release_regions(pdev);
10222
10223 err_out_disable_pdev:
10224         pci_disable_device(pdev);
10225         pci_set_drvdata(pdev, NULL);
10226         return err;
10227 }
10228
10229 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10230 {
10231         struct net_device *dev = pci_get_drvdata(pdev);
10232
10233         if (dev) {
10234                 struct tg3 *tp = netdev_priv(dev);
10235
10236                 unregister_netdev(dev);
10237                 iounmap(tp->regs);
10238                 free_netdev(dev);
10239                 pci_release_regions(pdev);
10240                 pci_disable_device(pdev);
10241                 pci_set_drvdata(pdev, NULL);
10242         }
10243 }
10244
10245 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10246 {
10247         struct net_device *dev = pci_get_drvdata(pdev);
10248         struct tg3 *tp = netdev_priv(dev);
10249         int err;
10250
10251         if (!netif_running(dev))
10252                 return 0;
10253
10254         tg3_netif_stop(tp);
10255
10256         del_timer_sync(&tp->timer);
10257
10258         tg3_full_lock(tp, 1);
10259         tg3_disable_ints(tp);
10260         tg3_full_unlock(tp);
10261
10262         netif_device_detach(dev);
10263
10264         tg3_full_lock(tp, 0);
10265         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10266         tg3_full_unlock(tp);
10267
10268         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10269         if (err) {
10270                 tg3_full_lock(tp, 0);
10271
10272                 tg3_init_hw(tp);
10273
10274                 tp->timer.expires = jiffies + tp->timer_offset;
10275                 add_timer(&tp->timer);
10276
10277                 netif_device_attach(dev);
10278                 tg3_netif_start(tp);
10279
10280                 tg3_full_unlock(tp);
10281         }
10282
10283         return err;
10284 }
10285
10286 static int tg3_resume(struct pci_dev *pdev)
10287 {
10288         struct net_device *dev = pci_get_drvdata(pdev);
10289         struct tg3 *tp = netdev_priv(dev);
10290         int err;
10291
10292         if (!netif_running(dev))
10293                 return 0;
10294
10295         pci_restore_state(tp->pdev);
10296
10297         err = tg3_set_power_state(tp, 0);
10298         if (err)
10299                 return err;
10300
10301         netif_device_attach(dev);
10302
10303         tg3_full_lock(tp, 0);
10304
10305         tg3_init_hw(tp);
10306
10307         tp->timer.expires = jiffies + tp->timer_offset;
10308         add_timer(&tp->timer);
10309
10310         tg3_netif_start(tp);
10311
10312         tg3_full_unlock(tp);
10313
10314         return 0;
10315 }
10316
10317 static struct pci_driver tg3_driver = {
10318         .name           = DRV_MODULE_NAME,
10319         .id_table       = tg3_pci_tbl,
10320         .probe          = tg3_init_one,
10321         .remove         = __devexit_p(tg3_remove_one),
10322         .suspend        = tg3_suspend,
10323         .resume         = tg3_resume
10324 };
10325
10326 static int __init tg3_init(void)
10327 {
10328         return pci_module_init(&tg3_driver);
10329 }
10330
10331 static void __exit tg3_cleanup(void)
10332 {
10333         pci_unregister_driver(&tg3_driver);
10334 }
10335
10336 module_init(tg3_init);
10337 module_exit(tg3_cleanup);