2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
61 #define TG3_TSO_SUPPORT 1
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.91"
68 #define DRV_MODULE_RELDATE "April 18, 2008"
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
86 #define TG3_TX_TIMEOUT (5 * HZ)
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133 #define TG3_NUM_TEST 6
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
218 static const struct {
219 const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
253 { "tx_flow_control" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
286 { "rx_threshold_hit" },
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
299 static const struct {
300 const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
312 writel(val, tp->regs + off);
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
317 return (readl(tp->regs + off));
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
322 writel(val, tp->aperegs + off);
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
327 return (readl(tp->aperegs + off));
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 spin_lock_irqsave(&tp->indirect_lock, flags);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
413 tg3_write32(tp, off, val);
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
427 tp->write32_mbox(tp, off, val);
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
435 void __iomem *mbox = tp->regs + off;
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
445 return (readl(tp->regs + off + GRCMBOX_BASE));
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
450 writel(val, tp->regs + off + GRCMBOX_BASE);
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
472 spin_lock_irqsave(&tp->indirect_lock, flags);
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_ape_lock_init(struct tg3 *tp)
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536 case TG3_APE_LOCK_MEM:
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
573 case TG3_APE_LOCK_MEM:
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
583 static void tg3_disable_ints(struct tg3 *tp)
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
590 static inline void tg3_cond_int(struct tg3 *tp)
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
600 static void tg3_enable_ints(struct tg3 *tp)
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
640 static void tg3_restart_ints(struct tg3 *tp)
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
656 static inline void tg3_netif_stop(struct tg3 *tp)
658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
659 napi_disable(&tp->napi);
660 netif_tx_disable(tp->dev);
663 static inline void tg3_netif_start(struct tg3 *tp)
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
670 napi_enable(&tp->napi);
671 tp->hw_status->status |= SD_STATUS_UPDATED;
675 static void tg3_switch_clocks(struct tg3 *tp)
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
688 tp->pci_clock_ctrl = clock_ctrl;
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
707 #define PHY_BUSY_LOOPS 5000
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
729 tw32_f(MAC_MI_COM, frame_val);
731 loops = PHY_BUSY_LOOPS;
734 frame_val = tr32(MAC_MI_COM);
736 if ((frame_val & MI_COM_BUSY) == 0) {
738 frame_val = tr32(MAC_MI_COM);
746 *val = frame_val & MI_COM_DATA_MASK;
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
781 tw32_f(MAC_MI_COM, frame_val);
783 loops = PHY_BUSY_LOOPS;
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
789 frame_val = tr32(MAC_MI_COM);
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
817 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
824 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825 tg3_writephy(tp, MII_TG3_EPHY_TEST,
826 ephy | MII_TG3_EPHY_SHADOW_EN);
827 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
829 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
831 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
834 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
837 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838 MII_TG3_AUXCTL_SHDWSEL_MISC;
839 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
842 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
844 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845 phy |= MII_TG3_AUXCTL_MISC_WREN;
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
855 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
858 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861 (val | (1 << 15) | (1 << 4)));
864 static int tg3_bmcr_reset(struct tg3 *tp)
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
883 if ((phy_control & BMCR_RESET) == 0) {
895 static void tg3_phy_apply_otp(struct tg3 *tp)
904 /* Enable SM_DSP clock and tx 6dB coding. */
905 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907 MII_TG3_AUXCTL_ACTL_TX_6DB;
908 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
910 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
914 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
918 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
922 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
925 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
928 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
932 /* Turn off SM_DSP clock. */
933 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934 MII_TG3_AUXCTL_ACTL_TX_6DB;
935 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
938 static int tg3_wait_macro_done(struct tg3 *tp)
945 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946 if ((tmp32 & 0x1000) == 0)
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
958 static const u32 test_pat[4][6] = {
959 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
966 for (chan = 0; chan < 4; chan++) {
969 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970 (chan * 0x2000) | 0x0200);
971 tg3_writephy(tp, 0x16, 0x0002);
973 for (i = 0; i < 6; i++)
974 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
977 tg3_writephy(tp, 0x16, 0x0202);
978 if (tg3_wait_macro_done(tp)) {
983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984 (chan * 0x2000) | 0x0200);
985 tg3_writephy(tp, 0x16, 0x0082);
986 if (tg3_wait_macro_done(tp)) {
991 tg3_writephy(tp, 0x16, 0x0802);
992 if (tg3_wait_macro_done(tp)) {
997 for (i = 0; i < 6; i += 2) {
1000 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002 tg3_wait_macro_done(tp)) {
1008 if (low != test_pat[chan][i] ||
1009 high != test_pat[chan][i+1]) {
1010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1026 for (chan = 0; chan < 4; chan++) {
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030 (chan * 0x2000) | 0x0200);
1031 tg3_writephy(tp, 0x16, 0x0002);
1032 for (i = 0; i < 6; i++)
1033 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034 tg3_writephy(tp, 0x16, 0x0202);
1035 if (tg3_wait_macro_done(tp))
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1044 u32 reg32, phy9_orig;
1045 int retries, do_phy_reset, err;
1051 err = tg3_bmcr_reset(tp);
1057 /* Disable transmitter and interrupt. */
1058 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1064 /* Set full-duplex, 1000 mbps. */
1065 tg3_writephy(tp, MII_BMCR,
1066 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1068 /* Set to master mode. */
1069 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1072 tg3_writephy(tp, MII_TG3_CTRL,
1073 (MII_TG3_CTRL_AS_MASTER |
1074 MII_TG3_CTRL_ENABLE_AS_MASTER));
1076 /* Enable SM_DSP_CLOCK and 6dB. */
1077 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1079 /* Block the PHY control access. */
1080 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1083 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1086 } while (--retries);
1088 err = tg3_phy_reset_chanpat(tp);
1092 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1095 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096 tg3_writephy(tp, 0x16, 0x0000);
1098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100 /* Set Extended packet length bit for jumbo frames */
1101 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1107 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1109 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1111 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1118 static void tg3_link_report(struct tg3 *);
1120 /* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero.
1123 static int tg3_phy_reset(struct tg3 *tp)
1129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1132 val = tr32(GRC_MISC_CFG);
1133 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1136 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1137 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1141 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142 netif_carrier_off(tp->dev);
1143 tg3_link_report(tp);
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149 err = tg3_phy_reset_5703_4_5(tp);
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1161 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1164 err = tg3_bmcr_reset(tp);
1168 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1171 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1174 tw32(TG3_CPMU_CTRL, cpmuctrl);
1177 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1180 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182 CPMU_LSPD_1000MB_MACCLK_12_5) {
1183 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1185 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1188 /* Disable GPHY autopowerdown. */
1189 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190 MII_TG3_MISC_SHDW_WREN |
1191 MII_TG3_MISC_SHDW_APD_SEL |
1192 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1195 tg3_phy_apply_otp(tp);
1198 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1206 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207 tg3_writephy(tp, 0x1c, 0x8d68);
1208 tg3_writephy(tp, 0x1c, 0x8d68);
1210 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1220 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225 tg3_writephy(tp, MII_TG3_TEST1,
1226 MII_TG3_TEST1_TRIM_EN | 0x4);
1228 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1231 /* Set Extended packet length bit (bit 14) on all chips that */
1232 /* support jumbo frames */
1233 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234 /* Cannot do read-modify-write on 5401 */
1235 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1239 /* Set bit 14 with read-modify-write to preserve other bits */
1240 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1245 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246 * jumbo frames transmission.
1248 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1251 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257 /* adjust output voltage */
1258 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1261 tg3_phy_toggle_automdix(tp, 1);
1262 tg3_phy_set_wirespeed(tp);
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1268 struct tg3 *tp_peer = tp;
1270 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275 struct net_device *dev_peer;
1277 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278 /* remove_one() may have been run on the peer. */
1282 tp_peer = netdev_priv(dev_peer);
1285 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292 (GRC_LCLCTRL_GPIO_OE0 |
1293 GRC_LCLCTRL_GPIO_OE1 |
1294 GRC_LCLCTRL_GPIO_OE2 |
1295 GRC_LCLCTRL_GPIO_OUTPUT0 |
1296 GRC_LCLCTRL_GPIO_OUTPUT1),
1300 u32 grc_local_ctrl = 0;
1302 if (tp_peer != tp &&
1303 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1306 /* Workaround to prevent overdrawing Amps. */
1307 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1309 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1310 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1311 grc_local_ctrl, 100);
1314 /* On 5753 and variants, GPIO2 cannot be used. */
1315 no_gpio2 = tp->nic_sram_data_cfg &
1316 NIC_SRAM_DATA_CFG_NO_GPIO2;
1318 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1319 GRC_LCLCTRL_GPIO_OE1 |
1320 GRC_LCLCTRL_GPIO_OE2 |
1321 GRC_LCLCTRL_GPIO_OUTPUT1 |
1322 GRC_LCLCTRL_GPIO_OUTPUT2;
1324 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1325 GRC_LCLCTRL_GPIO_OUTPUT2);
1327 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1328 grc_local_ctrl, 100);
1330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1332 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1333 grc_local_ctrl, 100);
1336 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1337 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1338 grc_local_ctrl, 100);
1342 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1344 if (tp_peer != tp &&
1345 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1348 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1349 (GRC_LCLCTRL_GPIO_OE1 |
1350 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1352 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353 GRC_LCLCTRL_GPIO_OE1, 100);
1355 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1356 (GRC_LCLCTRL_GPIO_OE1 |
1357 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1362 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1364 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1366 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1367 if (speed != SPEED_10)
1369 } else if (speed == SPEED_10)
1375 static int tg3_setup_phy(struct tg3 *, int);
1377 #define RESET_KIND_SHUTDOWN 0
1378 #define RESET_KIND_INIT 1
1379 #define RESET_KIND_SUSPEND 2
1381 static void tg3_write_sig_post_reset(struct tg3 *, int);
1382 static int tg3_halt_cpu(struct tg3 *, u32);
1383 static int tg3_nvram_lock(struct tg3 *);
1384 static void tg3_nvram_unlock(struct tg3 *);
1386 static void tg3_power_down_phy(struct tg3 *tp)
1390 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1392 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1393 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1396 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1397 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1398 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1405 val = tr32(GRC_MISC_CFG);
1406 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1410 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1415 /* The PHY should not be powered down on some chips because
1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1420 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1421 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1424 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1425 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1426 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1427 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1428 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1431 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1434 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1437 u16 power_control, power_caps;
1438 int pm = tp->pm_cap;
1440 /* Make sure register accesses (indirect or otherwise)
1441 * will function correctly.
1443 pci_write_config_dword(tp->pdev,
1444 TG3PCI_MISC_HOST_CTRL,
1445 tp->misc_host_ctrl);
1447 pci_read_config_word(tp->pdev,
1450 power_control |= PCI_PM_CTRL_PME_STATUS;
1451 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1455 pci_write_config_word(tp->pdev,
1458 udelay(100); /* Delay after power state change */
1460 /* Switch out of Vaux if it is a NIC */
1461 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1462 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1479 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1481 tp->dev->name, state);
1485 power_control |= PCI_PM_CTRL_PME_ENABLE;
1487 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1488 tw32(TG3PCI_MISC_HOST_CTRL,
1489 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1491 if (tp->link_config.phy_is_low_power == 0) {
1492 tp->link_config.phy_is_low_power = 1;
1493 tp->link_config.orig_speed = tp->link_config.speed;
1494 tp->link_config.orig_duplex = tp->link_config.duplex;
1495 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1498 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1499 tp->link_config.speed = SPEED_10;
1500 tp->link_config.duplex = DUPLEX_HALF;
1501 tp->link_config.autoneg = AUTONEG_ENABLE;
1502 tg3_setup_phy(tp, 0);
1505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1508 val = tr32(GRC_VCPU_EXT_CTRL);
1509 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1510 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1514 for (i = 0; i < 200; i++) {
1515 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1516 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1521 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1522 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1523 WOL_DRV_STATE_SHUTDOWN |
1527 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1529 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1532 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1536 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537 mac_mode = MAC_MODE_PORT_MODE_GMII;
1539 mac_mode = MAC_MODE_PORT_MODE_MII;
1541 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1542 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1544 u32 speed = (tp->tg3_flags &
1545 TG3_FLAG_WOL_SPEED_100MB) ?
1546 SPEED_100 : SPEED_10;
1547 if (tg3_5700_link_polarity(tp, speed))
1548 mac_mode |= MAC_MODE_LINK_POLARITY;
1550 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1553 mac_mode = MAC_MODE_PORT_MODE_TBI;
1556 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1557 tw32(MAC_LED_CTRL, tp->led_ctrl);
1559 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1560 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1561 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1563 tw32_f(MAC_MODE, mac_mode);
1566 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1570 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1571 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1575 base_val = tp->pci_clock_ctrl;
1576 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1577 CLOCK_CTRL_TXCLK_DISABLE);
1579 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1580 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1581 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1582 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1583 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1585 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1586 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1587 u32 newbits1, newbits2;
1589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1591 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1592 CLOCK_CTRL_TXCLK_DISABLE |
1594 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1595 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1596 newbits1 = CLOCK_CTRL_625_CORE;
1597 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1599 newbits1 = CLOCK_CTRL_ALTCLK;
1600 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1603 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1606 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1609 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1614 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1615 CLOCK_CTRL_TXCLK_DISABLE |
1616 CLOCK_CTRL_44MHZ_CORE);
1618 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1621 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1622 tp->pci_clock_ctrl | newbits3, 40);
1626 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1627 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1628 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1629 tg3_power_down_phy(tp);
1631 tg3_frob_aux_power(tp);
1633 /* Workaround for unstable PLL clock */
1634 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1635 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1636 u32 val = tr32(0x7d00);
1638 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1640 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1643 err = tg3_nvram_lock(tp);
1644 tg3_halt_cpu(tp, RX_CPU_BASE);
1646 tg3_nvram_unlock(tp);
1650 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1652 /* Finally, set the new power state. */
1653 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1654 udelay(100); /* Delay after power state change */
1659 static void tg3_link_report(struct tg3 *tp)
1661 if (!netif_carrier_ok(tp->dev)) {
1662 if (netif_msg_link(tp))
1663 printk(KERN_INFO PFX "%s: Link is down.\n",
1665 } else if (netif_msg_link(tp)) {
1666 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1668 (tp->link_config.active_speed == SPEED_1000 ?
1670 (tp->link_config.active_speed == SPEED_100 ?
1672 (tp->link_config.active_duplex == DUPLEX_FULL ?
1675 printk(KERN_INFO PFX
1676 "%s: Flow control is %s for TX and %s for RX.\n",
1678 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1680 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1685 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1689 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1690 miireg = ADVERTISE_PAUSE_CAP;
1691 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1692 miireg = ADVERTISE_PAUSE_ASYM;
1693 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1694 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1701 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1705 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1706 miireg = ADVERTISE_1000XPAUSE;
1707 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1708 miireg = ADVERTISE_1000XPSE_ASYM;
1709 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1710 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1717 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1721 if (lcladv & ADVERTISE_PAUSE_CAP) {
1722 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1723 if (rmtadv & LPA_PAUSE_CAP)
1724 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1725 else if (rmtadv & LPA_PAUSE_ASYM)
1726 cap = TG3_FLOW_CTRL_RX;
1728 if (rmtadv & LPA_PAUSE_CAP)
1729 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1731 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1732 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1733 cap = TG3_FLOW_CTRL_TX;
1739 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1743 if (lcladv & ADVERTISE_1000XPAUSE) {
1744 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1745 if (rmtadv & LPA_1000XPAUSE)
1746 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1747 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1748 cap = TG3_FLOW_CTRL_RX;
1750 if (rmtadv & LPA_1000XPAUSE)
1751 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1753 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1754 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1755 cap = TG3_FLOW_CTRL_TX;
1761 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1763 u8 new_tg3_flags = 0;
1764 u32 old_rx_mode = tp->rx_mode;
1765 u32 old_tx_mode = tp->tx_mode;
1767 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1768 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1769 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1770 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1773 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1776 new_tg3_flags = tp->link_config.flowctrl;
1779 tp->link_config.active_flowctrl = new_tg3_flags;
1781 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1782 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1784 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1786 if (old_rx_mode != tp->rx_mode) {
1787 tw32_f(MAC_RX_MODE, tp->rx_mode);
1790 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1791 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1793 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1795 if (old_tx_mode != tp->tx_mode) {
1796 tw32_f(MAC_TX_MODE, tp->tx_mode);
1800 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1802 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1803 case MII_TG3_AUX_STAT_10HALF:
1805 *duplex = DUPLEX_HALF;
1808 case MII_TG3_AUX_STAT_10FULL:
1810 *duplex = DUPLEX_FULL;
1813 case MII_TG3_AUX_STAT_100HALF:
1815 *duplex = DUPLEX_HALF;
1818 case MII_TG3_AUX_STAT_100FULL:
1820 *duplex = DUPLEX_FULL;
1823 case MII_TG3_AUX_STAT_1000HALF:
1824 *speed = SPEED_1000;
1825 *duplex = DUPLEX_HALF;
1828 case MII_TG3_AUX_STAT_1000FULL:
1829 *speed = SPEED_1000;
1830 *duplex = DUPLEX_FULL;
1834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1835 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1837 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1841 *speed = SPEED_INVALID;
1842 *duplex = DUPLEX_INVALID;
1847 static void tg3_phy_copper_begin(struct tg3 *tp)
1852 if (tp->link_config.phy_is_low_power) {
1853 /* Entering low power mode. Disable gigabit and
1854 * 100baseT advertisements.
1856 tg3_writephy(tp, MII_TG3_CTRL, 0);
1858 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1859 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1860 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1861 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1864 } else if (tp->link_config.speed == SPEED_INVALID) {
1865 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1866 tp->link_config.advertising &=
1867 ~(ADVERTISED_1000baseT_Half |
1868 ADVERTISED_1000baseT_Full);
1870 new_adv = ADVERTISE_CSMA;
1871 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1872 new_adv |= ADVERTISE_10HALF;
1873 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1874 new_adv |= ADVERTISE_10FULL;
1875 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1876 new_adv |= ADVERTISE_100HALF;
1877 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1878 new_adv |= ADVERTISE_100FULL;
1880 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1882 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1884 if (tp->link_config.advertising &
1885 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1887 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1888 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1889 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1890 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1891 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1892 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1893 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1894 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1895 MII_TG3_CTRL_ENABLE_AS_MASTER);
1896 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1898 tg3_writephy(tp, MII_TG3_CTRL, 0);
1901 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1902 new_adv |= ADVERTISE_CSMA;
1904 /* Asking for a specific link mode. */
1905 if (tp->link_config.speed == SPEED_1000) {
1906 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1908 if (tp->link_config.duplex == DUPLEX_FULL)
1909 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1911 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1912 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1913 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1914 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1915 MII_TG3_CTRL_ENABLE_AS_MASTER);
1917 if (tp->link_config.speed == SPEED_100) {
1918 if (tp->link_config.duplex == DUPLEX_FULL)
1919 new_adv |= ADVERTISE_100FULL;
1921 new_adv |= ADVERTISE_100HALF;
1923 if (tp->link_config.duplex == DUPLEX_FULL)
1924 new_adv |= ADVERTISE_10FULL;
1926 new_adv |= ADVERTISE_10HALF;
1928 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1933 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1936 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1937 tp->link_config.speed != SPEED_INVALID) {
1938 u32 bmcr, orig_bmcr;
1940 tp->link_config.active_speed = tp->link_config.speed;
1941 tp->link_config.active_duplex = tp->link_config.duplex;
1944 switch (tp->link_config.speed) {
1950 bmcr |= BMCR_SPEED100;
1954 bmcr |= TG3_BMCR_SPEED1000;
1958 if (tp->link_config.duplex == DUPLEX_FULL)
1959 bmcr |= BMCR_FULLDPLX;
1961 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1962 (bmcr != orig_bmcr)) {
1963 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1964 for (i = 0; i < 1500; i++) {
1968 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1969 tg3_readphy(tp, MII_BMSR, &tmp))
1971 if (!(tmp & BMSR_LSTATUS)) {
1976 tg3_writephy(tp, MII_BMCR, bmcr);
1980 tg3_writephy(tp, MII_BMCR,
1981 BMCR_ANENABLE | BMCR_ANRESTART);
1985 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1989 /* Turn off tap power management. */
1990 /* Set Extended packet length bit */
1991 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1993 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1994 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1996 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1997 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1999 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2000 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2002 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2003 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2005 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2006 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2013 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2015 u32 adv_reg, all_mask = 0;
2017 if (mask & ADVERTISED_10baseT_Half)
2018 all_mask |= ADVERTISE_10HALF;
2019 if (mask & ADVERTISED_10baseT_Full)
2020 all_mask |= ADVERTISE_10FULL;
2021 if (mask & ADVERTISED_100baseT_Half)
2022 all_mask |= ADVERTISE_100HALF;
2023 if (mask & ADVERTISED_100baseT_Full)
2024 all_mask |= ADVERTISE_100FULL;
2026 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2029 if ((adv_reg & all_mask) != all_mask)
2031 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2035 if (mask & ADVERTISED_1000baseT_Half)
2036 all_mask |= ADVERTISE_1000HALF;
2037 if (mask & ADVERTISED_1000baseT_Full)
2038 all_mask |= ADVERTISE_1000FULL;
2040 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2043 if ((tg3_ctrl & all_mask) != all_mask)
2049 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2053 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2056 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2057 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2059 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2060 if (curadv != reqadv)
2063 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2064 tg3_readphy(tp, MII_LPA, rmtadv);
2066 /* Reprogram the advertisement register, even if it
2067 * does not affect the current link. If the link
2068 * gets renegotiated in the future, we can save an
2069 * additional renegotiation cycle by advertising
2070 * it correctly in the first place.
2072 if (curadv != reqadv) {
2073 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2074 ADVERTISE_PAUSE_ASYM);
2075 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2084 int current_link_up;
2086 u32 lcl_adv, rmt_adv;
2094 (MAC_STATUS_SYNC_CHANGED |
2095 MAC_STATUS_CFG_CHANGED |
2096 MAC_STATUS_MI_COMPLETION |
2097 MAC_STATUS_LNKSTATE_CHANGED));
2100 tp->mi_mode = MAC_MI_MODE_BASE;
2101 tw32_f(MAC_MI_MODE, tp->mi_mode);
2104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2106 /* Some third-party PHYs need to be reset on link going
2109 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2112 netif_carrier_ok(tp->dev)) {
2113 tg3_readphy(tp, MII_BMSR, &bmsr);
2114 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2115 !(bmsr & BMSR_LSTATUS))
2121 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2122 tg3_readphy(tp, MII_BMSR, &bmsr);
2123 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2124 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2127 if (!(bmsr & BMSR_LSTATUS)) {
2128 err = tg3_init_5401phy_dsp(tp);
2132 tg3_readphy(tp, MII_BMSR, &bmsr);
2133 for (i = 0; i < 1000; i++) {
2135 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2136 (bmsr & BMSR_LSTATUS)) {
2142 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2143 !(bmsr & BMSR_LSTATUS) &&
2144 tp->link_config.active_speed == SPEED_1000) {
2145 err = tg3_phy_reset(tp);
2147 err = tg3_init_5401phy_dsp(tp);
2152 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2153 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2154 /* 5701 {A0,B0} CRC bug workaround */
2155 tg3_writephy(tp, 0x15, 0x0a75);
2156 tg3_writephy(tp, 0x1c, 0x8c68);
2157 tg3_writephy(tp, 0x1c, 0x8d68);
2158 tg3_writephy(tp, 0x1c, 0x8c68);
2161 /* Clear pending interrupts... */
2162 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2163 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2165 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2166 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2167 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2168 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2172 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2173 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2174 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2176 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2179 current_link_up = 0;
2180 current_speed = SPEED_INVALID;
2181 current_duplex = DUPLEX_INVALID;
2183 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2186 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2187 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2188 if (!(val & (1 << 10))) {
2190 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2196 for (i = 0; i < 100; i++) {
2197 tg3_readphy(tp, MII_BMSR, &bmsr);
2198 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2199 (bmsr & BMSR_LSTATUS))
2204 if (bmsr & BMSR_LSTATUS) {
2207 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2208 for (i = 0; i < 2000; i++) {
2210 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2215 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2220 for (i = 0; i < 200; i++) {
2221 tg3_readphy(tp, MII_BMCR, &bmcr);
2222 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2224 if (bmcr && bmcr != 0x7fff)
2232 tp->link_config.active_speed = current_speed;
2233 tp->link_config.active_duplex = current_duplex;
2235 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2236 if ((bmcr & BMCR_ANENABLE) &&
2237 tg3_copper_is_advertising_all(tp,
2238 tp->link_config.advertising)) {
2239 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2241 current_link_up = 1;
2244 if (!(bmcr & BMCR_ANENABLE) &&
2245 tp->link_config.speed == current_speed &&
2246 tp->link_config.duplex == current_duplex &&
2247 tp->link_config.flowctrl ==
2248 tp->link_config.active_flowctrl) {
2249 current_link_up = 1;
2253 if (current_link_up == 1 &&
2254 tp->link_config.active_duplex == DUPLEX_FULL)
2255 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2259 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2262 tg3_phy_copper_begin(tp);
2264 tg3_readphy(tp, MII_BMSR, &tmp);
2265 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2266 (tmp & BMSR_LSTATUS))
2267 current_link_up = 1;
2270 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2271 if (current_link_up == 1) {
2272 if (tp->link_config.active_speed == SPEED_100 ||
2273 tp->link_config.active_speed == SPEED_10)
2274 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2276 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2278 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2280 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2281 if (tp->link_config.active_duplex == DUPLEX_HALF)
2282 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2285 if (current_link_up == 1 &&
2286 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2287 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2289 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2292 /* ??? Without this setting Netgear GA302T PHY does not
2293 * ??? send/receive packets...
2295 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2296 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2297 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2298 tw32_f(MAC_MI_MODE, tp->mi_mode);
2302 tw32_f(MAC_MODE, tp->mac_mode);
2305 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2306 /* Polled via timer. */
2307 tw32_f(MAC_EVENT, 0);
2309 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2314 current_link_up == 1 &&
2315 tp->link_config.active_speed == SPEED_1000 &&
2316 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2317 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2320 (MAC_STATUS_SYNC_CHANGED |
2321 MAC_STATUS_CFG_CHANGED));
2324 NIC_SRAM_FIRMWARE_MBOX,
2325 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2328 if (current_link_up != netif_carrier_ok(tp->dev)) {
2329 if (current_link_up)
2330 netif_carrier_on(tp->dev);
2332 netif_carrier_off(tp->dev);
2333 tg3_link_report(tp);
2339 struct tg3_fiber_aneginfo {
2341 #define ANEG_STATE_UNKNOWN 0
2342 #define ANEG_STATE_AN_ENABLE 1
2343 #define ANEG_STATE_RESTART_INIT 2
2344 #define ANEG_STATE_RESTART 3
2345 #define ANEG_STATE_DISABLE_LINK_OK 4
2346 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2347 #define ANEG_STATE_ABILITY_DETECT 6
2348 #define ANEG_STATE_ACK_DETECT_INIT 7
2349 #define ANEG_STATE_ACK_DETECT 8
2350 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2351 #define ANEG_STATE_COMPLETE_ACK 10
2352 #define ANEG_STATE_IDLE_DETECT_INIT 11
2353 #define ANEG_STATE_IDLE_DETECT 12
2354 #define ANEG_STATE_LINK_OK 13
2355 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2356 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2359 #define MR_AN_ENABLE 0x00000001
2360 #define MR_RESTART_AN 0x00000002
2361 #define MR_AN_COMPLETE 0x00000004
2362 #define MR_PAGE_RX 0x00000008
2363 #define MR_NP_LOADED 0x00000010
2364 #define MR_TOGGLE_TX 0x00000020
2365 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2366 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2367 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2368 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2369 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2370 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2371 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2372 #define MR_TOGGLE_RX 0x00002000
2373 #define MR_NP_RX 0x00004000
2375 #define MR_LINK_OK 0x80000000
2377 unsigned long link_time, cur_time;
2379 u32 ability_match_cfg;
2380 int ability_match_count;
2382 char ability_match, idle_match, ack_match;
2384 u32 txconfig, rxconfig;
2385 #define ANEG_CFG_NP 0x00000080
2386 #define ANEG_CFG_ACK 0x00000040
2387 #define ANEG_CFG_RF2 0x00000020
2388 #define ANEG_CFG_RF1 0x00000010
2389 #define ANEG_CFG_PS2 0x00000001
2390 #define ANEG_CFG_PS1 0x00008000
2391 #define ANEG_CFG_HD 0x00004000
2392 #define ANEG_CFG_FD 0x00002000
2393 #define ANEG_CFG_INVAL 0x00001f06
2398 #define ANEG_TIMER_ENAB 2
2399 #define ANEG_FAILED -1
2401 #define ANEG_STATE_SETTLE_TIME 10000
2403 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2404 struct tg3_fiber_aneginfo *ap)
2407 unsigned long delta;
2411 if (ap->state == ANEG_STATE_UNKNOWN) {
2415 ap->ability_match_cfg = 0;
2416 ap->ability_match_count = 0;
2417 ap->ability_match = 0;
2423 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2424 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2426 if (rx_cfg_reg != ap->ability_match_cfg) {
2427 ap->ability_match_cfg = rx_cfg_reg;
2428 ap->ability_match = 0;
2429 ap->ability_match_count = 0;
2431 if (++ap->ability_match_count > 1) {
2432 ap->ability_match = 1;
2433 ap->ability_match_cfg = rx_cfg_reg;
2436 if (rx_cfg_reg & ANEG_CFG_ACK)
2444 ap->ability_match_cfg = 0;
2445 ap->ability_match_count = 0;
2446 ap->ability_match = 0;
2452 ap->rxconfig = rx_cfg_reg;
2456 case ANEG_STATE_UNKNOWN:
2457 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2458 ap->state = ANEG_STATE_AN_ENABLE;
2461 case ANEG_STATE_AN_ENABLE:
2462 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2463 if (ap->flags & MR_AN_ENABLE) {
2466 ap->ability_match_cfg = 0;
2467 ap->ability_match_count = 0;
2468 ap->ability_match = 0;
2472 ap->state = ANEG_STATE_RESTART_INIT;
2474 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2478 case ANEG_STATE_RESTART_INIT:
2479 ap->link_time = ap->cur_time;
2480 ap->flags &= ~(MR_NP_LOADED);
2482 tw32(MAC_TX_AUTO_NEG, 0);
2483 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2484 tw32_f(MAC_MODE, tp->mac_mode);
2487 ret = ANEG_TIMER_ENAB;
2488 ap->state = ANEG_STATE_RESTART;
2491 case ANEG_STATE_RESTART:
2492 delta = ap->cur_time - ap->link_time;
2493 if (delta > ANEG_STATE_SETTLE_TIME) {
2494 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2496 ret = ANEG_TIMER_ENAB;
2500 case ANEG_STATE_DISABLE_LINK_OK:
2504 case ANEG_STATE_ABILITY_DETECT_INIT:
2505 ap->flags &= ~(MR_TOGGLE_TX);
2506 ap->txconfig = ANEG_CFG_FD;
2507 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2508 if (flowctrl & ADVERTISE_1000XPAUSE)
2509 ap->txconfig |= ANEG_CFG_PS1;
2510 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2511 ap->txconfig |= ANEG_CFG_PS2;
2512 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2513 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2514 tw32_f(MAC_MODE, tp->mac_mode);
2517 ap->state = ANEG_STATE_ABILITY_DETECT;
2520 case ANEG_STATE_ABILITY_DETECT:
2521 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2522 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2526 case ANEG_STATE_ACK_DETECT_INIT:
2527 ap->txconfig |= ANEG_CFG_ACK;
2528 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2529 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2530 tw32_f(MAC_MODE, tp->mac_mode);
2533 ap->state = ANEG_STATE_ACK_DETECT;
2536 case ANEG_STATE_ACK_DETECT:
2537 if (ap->ack_match != 0) {
2538 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2539 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2540 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2542 ap->state = ANEG_STATE_AN_ENABLE;
2544 } else if (ap->ability_match != 0 &&
2545 ap->rxconfig == 0) {
2546 ap->state = ANEG_STATE_AN_ENABLE;
2550 case ANEG_STATE_COMPLETE_ACK_INIT:
2551 if (ap->rxconfig & ANEG_CFG_INVAL) {
2555 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2556 MR_LP_ADV_HALF_DUPLEX |
2557 MR_LP_ADV_SYM_PAUSE |
2558 MR_LP_ADV_ASYM_PAUSE |
2559 MR_LP_ADV_REMOTE_FAULT1 |
2560 MR_LP_ADV_REMOTE_FAULT2 |
2561 MR_LP_ADV_NEXT_PAGE |
2564 if (ap->rxconfig & ANEG_CFG_FD)
2565 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2566 if (ap->rxconfig & ANEG_CFG_HD)
2567 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2568 if (ap->rxconfig & ANEG_CFG_PS1)
2569 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2570 if (ap->rxconfig & ANEG_CFG_PS2)
2571 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2572 if (ap->rxconfig & ANEG_CFG_RF1)
2573 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2574 if (ap->rxconfig & ANEG_CFG_RF2)
2575 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2576 if (ap->rxconfig & ANEG_CFG_NP)
2577 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2579 ap->link_time = ap->cur_time;
2581 ap->flags ^= (MR_TOGGLE_TX);
2582 if (ap->rxconfig & 0x0008)
2583 ap->flags |= MR_TOGGLE_RX;
2584 if (ap->rxconfig & ANEG_CFG_NP)
2585 ap->flags |= MR_NP_RX;
2586 ap->flags |= MR_PAGE_RX;
2588 ap->state = ANEG_STATE_COMPLETE_ACK;
2589 ret = ANEG_TIMER_ENAB;
2592 case ANEG_STATE_COMPLETE_ACK:
2593 if (ap->ability_match != 0 &&
2594 ap->rxconfig == 0) {
2595 ap->state = ANEG_STATE_AN_ENABLE;
2598 delta = ap->cur_time - ap->link_time;
2599 if (delta > ANEG_STATE_SETTLE_TIME) {
2600 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2601 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2603 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2604 !(ap->flags & MR_NP_RX)) {
2605 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2613 case ANEG_STATE_IDLE_DETECT_INIT:
2614 ap->link_time = ap->cur_time;
2615 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2616 tw32_f(MAC_MODE, tp->mac_mode);
2619 ap->state = ANEG_STATE_IDLE_DETECT;
2620 ret = ANEG_TIMER_ENAB;
2623 case ANEG_STATE_IDLE_DETECT:
2624 if (ap->ability_match != 0 &&
2625 ap->rxconfig == 0) {
2626 ap->state = ANEG_STATE_AN_ENABLE;
2629 delta = ap->cur_time - ap->link_time;
2630 if (delta > ANEG_STATE_SETTLE_TIME) {
2631 /* XXX another gem from the Broadcom driver :( */
2632 ap->state = ANEG_STATE_LINK_OK;
2636 case ANEG_STATE_LINK_OK:
2637 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2641 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2642 /* ??? unimplemented */
2645 case ANEG_STATE_NEXT_PAGE_WAIT:
2646 /* ??? unimplemented */
2657 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2660 struct tg3_fiber_aneginfo aninfo;
2661 int status = ANEG_FAILED;
2665 tw32_f(MAC_TX_AUTO_NEG, 0);
2667 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2668 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2671 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2674 memset(&aninfo, 0, sizeof(aninfo));
2675 aninfo.flags |= MR_AN_ENABLE;
2676 aninfo.state = ANEG_STATE_UNKNOWN;
2677 aninfo.cur_time = 0;
2679 while (++tick < 195000) {
2680 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2681 if (status == ANEG_DONE || status == ANEG_FAILED)
2687 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2688 tw32_f(MAC_MODE, tp->mac_mode);
2691 *txflags = aninfo.txconfig;
2692 *rxflags = aninfo.flags;
2694 if (status == ANEG_DONE &&
2695 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2696 MR_LP_ADV_FULL_DUPLEX)))
2702 static void tg3_init_bcm8002(struct tg3 *tp)
2704 u32 mac_status = tr32(MAC_STATUS);
2707 /* Reset when initting first time or we have a link. */
2708 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2709 !(mac_status & MAC_STATUS_PCS_SYNCED))
2712 /* Set PLL lock range. */
2713 tg3_writephy(tp, 0x16, 0x8007);
2716 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2718 /* Wait for reset to complete. */
2719 /* XXX schedule_timeout() ... */
2720 for (i = 0; i < 500; i++)
2723 /* Config mode; select PMA/Ch 1 regs. */
2724 tg3_writephy(tp, 0x10, 0x8411);
2726 /* Enable auto-lock and comdet, select txclk for tx. */
2727 tg3_writephy(tp, 0x11, 0x0a10);
2729 tg3_writephy(tp, 0x18, 0x00a0);
2730 tg3_writephy(tp, 0x16, 0x41ff);
2732 /* Assert and deassert POR. */
2733 tg3_writephy(tp, 0x13, 0x0400);
2735 tg3_writephy(tp, 0x13, 0x0000);
2737 tg3_writephy(tp, 0x11, 0x0a50);
2739 tg3_writephy(tp, 0x11, 0x0a10);
2741 /* Wait for signal to stabilize */
2742 /* XXX schedule_timeout() ... */
2743 for (i = 0; i < 15000; i++)
2746 /* Deselect the channel register so we can read the PHYID
2749 tg3_writephy(tp, 0x10, 0x8011);
2752 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2755 u32 sg_dig_ctrl, sg_dig_status;
2756 u32 serdes_cfg, expected_sg_dig_ctrl;
2757 int workaround, port_a;
2758 int current_link_up;
2761 expected_sg_dig_ctrl = 0;
2764 current_link_up = 0;
2766 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2767 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2769 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2772 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2773 /* preserve bits 20-23 for voltage regulator */
2774 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2777 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2779 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2780 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2782 u32 val = serdes_cfg;
2788 tw32_f(MAC_SERDES_CFG, val);
2791 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2793 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2794 tg3_setup_flow_control(tp, 0, 0);
2795 current_link_up = 1;
2800 /* Want auto-negotiation. */
2801 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2803 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2804 if (flowctrl & ADVERTISE_1000XPAUSE)
2805 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2806 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2807 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2809 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2810 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2811 tp->serdes_counter &&
2812 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2813 MAC_STATUS_RCVD_CFG)) ==
2814 MAC_STATUS_PCS_SYNCED)) {
2815 tp->serdes_counter--;
2816 current_link_up = 1;
2821 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2822 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2824 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2826 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2827 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2828 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2829 MAC_STATUS_SIGNAL_DET)) {
2830 sg_dig_status = tr32(SG_DIG_STATUS);
2831 mac_status = tr32(MAC_STATUS);
2833 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2834 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2835 u32 local_adv = 0, remote_adv = 0;
2837 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2838 local_adv |= ADVERTISE_1000XPAUSE;
2839 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2840 local_adv |= ADVERTISE_1000XPSE_ASYM;
2842 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2843 remote_adv |= LPA_1000XPAUSE;
2844 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2845 remote_adv |= LPA_1000XPAUSE_ASYM;
2847 tg3_setup_flow_control(tp, local_adv, remote_adv);
2848 current_link_up = 1;
2849 tp->serdes_counter = 0;
2850 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2851 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2852 if (tp->serdes_counter)
2853 tp->serdes_counter--;
2856 u32 val = serdes_cfg;
2863 tw32_f(MAC_SERDES_CFG, val);
2866 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2869 /* Link parallel detection - link is up */
2870 /* only if we have PCS_SYNC and not */
2871 /* receiving config code words */
2872 mac_status = tr32(MAC_STATUS);
2873 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2874 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2875 tg3_setup_flow_control(tp, 0, 0);
2876 current_link_up = 1;
2878 TG3_FLG2_PARALLEL_DETECT;
2879 tp->serdes_counter =
2880 SERDES_PARALLEL_DET_TIMEOUT;
2882 goto restart_autoneg;
2886 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2887 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2891 return current_link_up;
2894 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2896 int current_link_up = 0;
2898 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2901 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2902 u32 txflags, rxflags;
2905 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2906 u32 local_adv = 0, remote_adv = 0;
2908 if (txflags & ANEG_CFG_PS1)
2909 local_adv |= ADVERTISE_1000XPAUSE;
2910 if (txflags & ANEG_CFG_PS2)
2911 local_adv |= ADVERTISE_1000XPSE_ASYM;
2913 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2914 remote_adv |= LPA_1000XPAUSE;
2915 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2916 remote_adv |= LPA_1000XPAUSE_ASYM;
2918 tg3_setup_flow_control(tp, local_adv, remote_adv);
2920 current_link_up = 1;
2922 for (i = 0; i < 30; i++) {
2925 (MAC_STATUS_SYNC_CHANGED |
2926 MAC_STATUS_CFG_CHANGED));
2928 if ((tr32(MAC_STATUS) &
2929 (MAC_STATUS_SYNC_CHANGED |
2930 MAC_STATUS_CFG_CHANGED)) == 0)
2934 mac_status = tr32(MAC_STATUS);
2935 if (current_link_up == 0 &&
2936 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2937 !(mac_status & MAC_STATUS_RCVD_CFG))
2938 current_link_up = 1;
2940 tg3_setup_flow_control(tp, 0, 0);
2942 /* Forcing 1000FD link up. */
2943 current_link_up = 1;
2945 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2948 tw32_f(MAC_MODE, tp->mac_mode);
2953 return current_link_up;
2956 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2959 u16 orig_active_speed;
2960 u8 orig_active_duplex;
2962 int current_link_up;
2965 orig_pause_cfg = tp->link_config.active_flowctrl;
2966 orig_active_speed = tp->link_config.active_speed;
2967 orig_active_duplex = tp->link_config.active_duplex;
2969 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2970 netif_carrier_ok(tp->dev) &&
2971 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2972 mac_status = tr32(MAC_STATUS);
2973 mac_status &= (MAC_STATUS_PCS_SYNCED |
2974 MAC_STATUS_SIGNAL_DET |
2975 MAC_STATUS_CFG_CHANGED |
2976 MAC_STATUS_RCVD_CFG);
2977 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2978 MAC_STATUS_SIGNAL_DET)) {
2979 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2980 MAC_STATUS_CFG_CHANGED));
2985 tw32_f(MAC_TX_AUTO_NEG, 0);
2987 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2988 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2989 tw32_f(MAC_MODE, tp->mac_mode);
2992 if (tp->phy_id == PHY_ID_BCM8002)
2993 tg3_init_bcm8002(tp);
2995 /* Enable link change event even when serdes polling. */
2996 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2999 current_link_up = 0;
3000 mac_status = tr32(MAC_STATUS);
3002 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3003 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3005 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3007 tp->hw_status->status =
3008 (SD_STATUS_UPDATED |
3009 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3011 for (i = 0; i < 100; i++) {
3012 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3013 MAC_STATUS_CFG_CHANGED));
3015 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3016 MAC_STATUS_CFG_CHANGED |
3017 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3021 mac_status = tr32(MAC_STATUS);
3022 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3023 current_link_up = 0;
3024 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3025 tp->serdes_counter == 0) {
3026 tw32_f(MAC_MODE, (tp->mac_mode |
3027 MAC_MODE_SEND_CONFIGS));
3029 tw32_f(MAC_MODE, tp->mac_mode);
3033 if (current_link_up == 1) {
3034 tp->link_config.active_speed = SPEED_1000;
3035 tp->link_config.active_duplex = DUPLEX_FULL;
3036 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3037 LED_CTRL_LNKLED_OVERRIDE |
3038 LED_CTRL_1000MBPS_ON));
3040 tp->link_config.active_speed = SPEED_INVALID;
3041 tp->link_config.active_duplex = DUPLEX_INVALID;
3042 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3043 LED_CTRL_LNKLED_OVERRIDE |
3044 LED_CTRL_TRAFFIC_OVERRIDE));
3047 if (current_link_up != netif_carrier_ok(tp->dev)) {
3048 if (current_link_up)
3049 netif_carrier_on(tp->dev);
3051 netif_carrier_off(tp->dev);
3052 tg3_link_report(tp);
3054 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3055 if (orig_pause_cfg != now_pause_cfg ||
3056 orig_active_speed != tp->link_config.active_speed ||
3057 orig_active_duplex != tp->link_config.active_duplex)
3058 tg3_link_report(tp);
3064 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3066 int current_link_up, err = 0;
3070 u32 local_adv, remote_adv;
3072 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3073 tw32_f(MAC_MODE, tp->mac_mode);
3079 (MAC_STATUS_SYNC_CHANGED |
3080 MAC_STATUS_CFG_CHANGED |
3081 MAC_STATUS_MI_COMPLETION |
3082 MAC_STATUS_LNKSTATE_CHANGED));
3088 current_link_up = 0;
3089 current_speed = SPEED_INVALID;
3090 current_duplex = DUPLEX_INVALID;
3092 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3093 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3095 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3096 bmsr |= BMSR_LSTATUS;
3098 bmsr &= ~BMSR_LSTATUS;
3101 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3103 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3104 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3105 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3106 /* do nothing, just check for link up at the end */
3107 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3110 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3111 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3112 ADVERTISE_1000XPAUSE |
3113 ADVERTISE_1000XPSE_ASYM |
3116 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3118 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3119 new_adv |= ADVERTISE_1000XHALF;
3120 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3121 new_adv |= ADVERTISE_1000XFULL;
3123 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3124 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3125 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3126 tg3_writephy(tp, MII_BMCR, bmcr);
3128 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3129 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3130 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3137 bmcr &= ~BMCR_SPEED1000;
3138 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3140 if (tp->link_config.duplex == DUPLEX_FULL)
3141 new_bmcr |= BMCR_FULLDPLX;
3143 if (new_bmcr != bmcr) {
3144 /* BMCR_SPEED1000 is a reserved bit that needs
3145 * to be set on write.
3147 new_bmcr |= BMCR_SPEED1000;
3149 /* Force a linkdown */
3150 if (netif_carrier_ok(tp->dev)) {
3153 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3154 adv &= ~(ADVERTISE_1000XFULL |
3155 ADVERTISE_1000XHALF |
3157 tg3_writephy(tp, MII_ADVERTISE, adv);
3158 tg3_writephy(tp, MII_BMCR, bmcr |
3162 netif_carrier_off(tp->dev);
3164 tg3_writephy(tp, MII_BMCR, new_bmcr);
3166 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3167 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3168 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3170 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3171 bmsr |= BMSR_LSTATUS;
3173 bmsr &= ~BMSR_LSTATUS;
3175 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3179 if (bmsr & BMSR_LSTATUS) {
3180 current_speed = SPEED_1000;
3181 current_link_up = 1;
3182 if (bmcr & BMCR_FULLDPLX)
3183 current_duplex = DUPLEX_FULL;
3185 current_duplex = DUPLEX_HALF;
3190 if (bmcr & BMCR_ANENABLE) {
3193 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3194 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3195 common = local_adv & remote_adv;
3196 if (common & (ADVERTISE_1000XHALF |
3197 ADVERTISE_1000XFULL)) {
3198 if (common & ADVERTISE_1000XFULL)
3199 current_duplex = DUPLEX_FULL;
3201 current_duplex = DUPLEX_HALF;
3204 current_link_up = 0;
3208 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3209 tg3_setup_flow_control(tp, local_adv, remote_adv);
3211 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3212 if (tp->link_config.active_duplex == DUPLEX_HALF)
3213 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3215 tw32_f(MAC_MODE, tp->mac_mode);
3218 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3220 tp->link_config.active_speed = current_speed;
3221 tp->link_config.active_duplex = current_duplex;
3223 if (current_link_up != netif_carrier_ok(tp->dev)) {
3224 if (current_link_up)
3225 netif_carrier_on(tp->dev);
3227 netif_carrier_off(tp->dev);
3228 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3230 tg3_link_report(tp);
3235 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3237 if (tp->serdes_counter) {
3238 /* Give autoneg time to complete. */
3239 tp->serdes_counter--;
3242 if (!netif_carrier_ok(tp->dev) &&
3243 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3246 tg3_readphy(tp, MII_BMCR, &bmcr);
3247 if (bmcr & BMCR_ANENABLE) {
3250 /* Select shadow register 0x1f */
3251 tg3_writephy(tp, 0x1c, 0x7c00);
3252 tg3_readphy(tp, 0x1c, &phy1);
3254 /* Select expansion interrupt status register */
3255 tg3_writephy(tp, 0x17, 0x0f01);
3256 tg3_readphy(tp, 0x15, &phy2);
3257 tg3_readphy(tp, 0x15, &phy2);
3259 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3260 /* We have signal detect and not receiving
3261 * config code words, link is up by parallel
3265 bmcr &= ~BMCR_ANENABLE;
3266 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3267 tg3_writephy(tp, MII_BMCR, bmcr);
3268 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3272 else if (netif_carrier_ok(tp->dev) &&
3273 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3274 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3277 /* Select expansion interrupt status register */
3278 tg3_writephy(tp, 0x17, 0x0f01);
3279 tg3_readphy(tp, 0x15, &phy2);
3283 /* Config code words received, turn on autoneg. */
3284 tg3_readphy(tp, MII_BMCR, &bmcr);
3285 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3287 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3293 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3297 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3298 err = tg3_setup_fiber_phy(tp, force_reset);
3299 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3300 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3302 err = tg3_setup_copper_phy(tp, force_reset);
3305 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3306 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3309 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3310 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3312 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3317 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3318 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3319 tw32(GRC_MISC_CFG, val);
3322 if (tp->link_config.active_speed == SPEED_1000 &&
3323 tp->link_config.active_duplex == DUPLEX_HALF)
3324 tw32(MAC_TX_LENGTHS,
3325 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3326 (6 << TX_LENGTHS_IPG_SHIFT) |
3327 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3329 tw32(MAC_TX_LENGTHS,
3330 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3331 (6 << TX_LENGTHS_IPG_SHIFT) |
3332 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3334 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3335 if (netif_carrier_ok(tp->dev)) {
3336 tw32(HOSTCC_STAT_COAL_TICKS,
3337 tp->coal.stats_block_coalesce_usecs);
3339 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3343 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3344 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3345 if (!netif_carrier_ok(tp->dev))
3346 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3349 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3350 tw32(PCIE_PWR_MGMT_THRESH, val);
3356 /* This is called whenever we suspect that the system chipset is re-
3357 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3358 * is bogus tx completions. We try to recover by setting the
3359 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3362 static void tg3_tx_recover(struct tg3 *tp)
3364 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3365 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3367 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3368 "mapped I/O cycles to the network device, attempting to "
3369 "recover. Please report the problem to the driver maintainer "
3370 "and include system chipset information.\n", tp->dev->name);
3372 spin_lock(&tp->lock);
3373 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3374 spin_unlock(&tp->lock);
3377 static inline u32 tg3_tx_avail(struct tg3 *tp)
3380 return (tp->tx_pending -
3381 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3384 /* Tigon3 never reports partial packet sends. So we do not
3385 * need special logic to handle SKBs that have not had all
3386 * of their frags sent yet, like SunGEM does.
3388 static void tg3_tx(struct tg3 *tp)
3390 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3391 u32 sw_idx = tp->tx_cons;
3393 while (sw_idx != hw_idx) {
3394 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3395 struct sk_buff *skb = ri->skb;
3398 if (unlikely(skb == NULL)) {
3403 pci_unmap_single(tp->pdev,
3404 pci_unmap_addr(ri, mapping),
3410 sw_idx = NEXT_TX(sw_idx);
3412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3413 ri = &tp->tx_buffers[sw_idx];
3414 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3417 pci_unmap_page(tp->pdev,
3418 pci_unmap_addr(ri, mapping),
3419 skb_shinfo(skb)->frags[i].size,
3422 sw_idx = NEXT_TX(sw_idx);
3427 if (unlikely(tx_bug)) {
3433 tp->tx_cons = sw_idx;
3435 /* Need to make the tx_cons update visible to tg3_start_xmit()
3436 * before checking for netif_queue_stopped(). Without the
3437 * memory barrier, there is a small possibility that tg3_start_xmit()
3438 * will miss it and cause the queue to be stopped forever.
3442 if (unlikely(netif_queue_stopped(tp->dev) &&
3443 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3444 netif_tx_lock(tp->dev);
3445 if (netif_queue_stopped(tp->dev) &&
3446 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3447 netif_wake_queue(tp->dev);
3448 netif_tx_unlock(tp->dev);
3452 /* Returns size of skb allocated or < 0 on error.
3454 * We only need to fill in the address because the other members
3455 * of the RX descriptor are invariant, see tg3_init_rings.
3457 * Note the purposeful assymetry of cpu vs. chip accesses. For
3458 * posting buffers we only dirty the first cache line of the RX
3459 * descriptor (containing the address). Whereas for the RX status
3460 * buffers the cpu only reads the last cacheline of the RX descriptor
3461 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3463 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3464 int src_idx, u32 dest_idx_unmasked)
3466 struct tg3_rx_buffer_desc *desc;
3467 struct ring_info *map, *src_map;
3468 struct sk_buff *skb;
3470 int skb_size, dest_idx;
3473 switch (opaque_key) {
3474 case RXD_OPAQUE_RING_STD:
3475 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3476 desc = &tp->rx_std[dest_idx];
3477 map = &tp->rx_std_buffers[dest_idx];
3479 src_map = &tp->rx_std_buffers[src_idx];
3480 skb_size = tp->rx_pkt_buf_sz;
3483 case RXD_OPAQUE_RING_JUMBO:
3484 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3485 desc = &tp->rx_jumbo[dest_idx];
3486 map = &tp->rx_jumbo_buffers[dest_idx];
3488 src_map = &tp->rx_jumbo_buffers[src_idx];
3489 skb_size = RX_JUMBO_PKT_BUF_SZ;
3496 /* Do not overwrite any of the map or rp information
3497 * until we are sure we can commit to a new buffer.
3499 * Callers depend upon this behavior and assume that
3500 * we leave everything unchanged if we fail.
3502 skb = netdev_alloc_skb(tp->dev, skb_size);
3506 skb_reserve(skb, tp->rx_offset);
3508 mapping = pci_map_single(tp->pdev, skb->data,
3509 skb_size - tp->rx_offset,
3510 PCI_DMA_FROMDEVICE);
3513 pci_unmap_addr_set(map, mapping, mapping);
3515 if (src_map != NULL)
3516 src_map->skb = NULL;
3518 desc->addr_hi = ((u64)mapping >> 32);
3519 desc->addr_lo = ((u64)mapping & 0xffffffff);
3524 /* We only need to move over in the address because the other
3525 * members of the RX descriptor are invariant. See notes above
3526 * tg3_alloc_rx_skb for full details.
3528 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3529 int src_idx, u32 dest_idx_unmasked)
3531 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3532 struct ring_info *src_map, *dest_map;
3535 switch (opaque_key) {
3536 case RXD_OPAQUE_RING_STD:
3537 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3538 dest_desc = &tp->rx_std[dest_idx];
3539 dest_map = &tp->rx_std_buffers[dest_idx];
3540 src_desc = &tp->rx_std[src_idx];
3541 src_map = &tp->rx_std_buffers[src_idx];
3544 case RXD_OPAQUE_RING_JUMBO:
3545 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3546 dest_desc = &tp->rx_jumbo[dest_idx];
3547 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3548 src_desc = &tp->rx_jumbo[src_idx];
3549 src_map = &tp->rx_jumbo_buffers[src_idx];
3556 dest_map->skb = src_map->skb;
3557 pci_unmap_addr_set(dest_map, mapping,
3558 pci_unmap_addr(src_map, mapping));
3559 dest_desc->addr_hi = src_desc->addr_hi;
3560 dest_desc->addr_lo = src_desc->addr_lo;
3562 src_map->skb = NULL;
3565 #if TG3_VLAN_TAG_USED
3566 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3568 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3572 /* The RX ring scheme is composed of multiple rings which post fresh
3573 * buffers to the chip, and one special ring the chip uses to report
3574 * status back to the host.
3576 * The special ring reports the status of received packets to the
3577 * host. The chip does not write into the original descriptor the
3578 * RX buffer was obtained from. The chip simply takes the original
3579 * descriptor as provided by the host, updates the status and length
3580 * field, then writes this into the next status ring entry.
3582 * Each ring the host uses to post buffers to the chip is described
3583 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3584 * it is first placed into the on-chip ram. When the packet's length
3585 * is known, it walks down the TG3_BDINFO entries to select the ring.
3586 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3587 * which is within the range of the new packet's length is chosen.
3589 * The "separate ring for rx status" scheme may sound queer, but it makes
3590 * sense from a cache coherency perspective. If only the host writes
3591 * to the buffer post rings, and only the chip writes to the rx status
3592 * rings, then cache lines never move beyond shared-modified state.
3593 * If both the host and chip were to write into the same ring, cache line
3594 * eviction could occur since both entities want it in an exclusive state.
3596 static int tg3_rx(struct tg3 *tp, int budget)
3598 u32 work_mask, rx_std_posted = 0;
3599 u32 sw_idx = tp->rx_rcb_ptr;
3603 hw_idx = tp->hw_status->idx[0].rx_producer;
3605 * We need to order the read of hw_idx and the read of
3606 * the opaque cookie.
3611 while (sw_idx != hw_idx && budget > 0) {
3612 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3614 struct sk_buff *skb;
3615 dma_addr_t dma_addr;
3616 u32 opaque_key, desc_idx, *post_ptr;
3618 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3619 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3620 if (opaque_key == RXD_OPAQUE_RING_STD) {
3621 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3623 skb = tp->rx_std_buffers[desc_idx].skb;
3624 post_ptr = &tp->rx_std_ptr;
3626 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3627 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3629 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3630 post_ptr = &tp->rx_jumbo_ptr;
3633 goto next_pkt_nopost;
3636 work_mask |= opaque_key;
3638 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3639 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3641 tg3_recycle_rx(tp, opaque_key,
3642 desc_idx, *post_ptr);
3644 /* Other statistics kept track of by card. */
3645 tp->net_stats.rx_dropped++;
3649 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3651 if (len > RX_COPY_THRESHOLD
3652 && tp->rx_offset == 2
3653 /* rx_offset != 2 iff this is a 5701 card running
3654 * in PCI-X mode [see tg3_get_invariants()] */
3658 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3659 desc_idx, *post_ptr);
3663 pci_unmap_single(tp->pdev, dma_addr,
3664 skb_size - tp->rx_offset,
3665 PCI_DMA_FROMDEVICE);
3669 struct sk_buff *copy_skb;
3671 tg3_recycle_rx(tp, opaque_key,
3672 desc_idx, *post_ptr);
3674 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3675 if (copy_skb == NULL)
3676 goto drop_it_no_recycle;
3678 skb_reserve(copy_skb, 2);
3679 skb_put(copy_skb, len);
3680 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3681 skb_copy_from_linear_data(skb, copy_skb->data, len);
3682 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3684 /* We'll reuse the original ring buffer. */
3688 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3689 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3690 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3691 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3692 skb->ip_summed = CHECKSUM_UNNECESSARY;
3694 skb->ip_summed = CHECKSUM_NONE;
3696 skb->protocol = eth_type_trans(skb, tp->dev);
3697 #if TG3_VLAN_TAG_USED
3698 if (tp->vlgrp != NULL &&
3699 desc->type_flags & RXD_FLAG_VLAN) {
3700 tg3_vlan_rx(tp, skb,
3701 desc->err_vlan & RXD_VLAN_MASK);
3704 netif_receive_skb(skb);
3706 tp->dev->last_rx = jiffies;
3713 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3714 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3716 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3717 TG3_64BIT_REG_LOW, idx);
3718 work_mask &= ~RXD_OPAQUE_RING_STD;
3723 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3725 /* Refresh hw_idx to see if there is new work */
3726 if (sw_idx == hw_idx) {
3727 hw_idx = tp->hw_status->idx[0].rx_producer;
3732 /* ACK the status ring. */
3733 tp->rx_rcb_ptr = sw_idx;
3734 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3736 /* Refill RX ring(s). */
3737 if (work_mask & RXD_OPAQUE_RING_STD) {
3738 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3739 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3742 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3743 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3744 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3752 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3754 struct tg3_hw_status *sblk = tp->hw_status;
3756 /* handle link change and other phy events */
3757 if (!(tp->tg3_flags &
3758 (TG3_FLAG_USE_LINKCHG_REG |
3759 TG3_FLAG_POLL_SERDES))) {
3760 if (sblk->status & SD_STATUS_LINK_CHG) {
3761 sblk->status = SD_STATUS_UPDATED |
3762 (sblk->status & ~SD_STATUS_LINK_CHG);
3763 spin_lock(&tp->lock);
3764 tg3_setup_phy(tp, 0);
3765 spin_unlock(&tp->lock);
3769 /* run TX completion thread */
3770 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3772 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3776 /* run RX thread, within the bounds set by NAPI.
3777 * All RX "locking" is done by ensuring outside
3778 * code synchronizes with tg3->napi.poll()
3780 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3781 work_done += tg3_rx(tp, budget - work_done);
3786 static int tg3_poll(struct napi_struct *napi, int budget)
3788 struct tg3 *tp = container_of(napi, struct tg3, napi);
3790 struct tg3_hw_status *sblk = tp->hw_status;
3793 work_done = tg3_poll_work(tp, work_done, budget);
3795 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3798 if (unlikely(work_done >= budget))
3801 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3802 /* tp->last_tag is used in tg3_restart_ints() below
3803 * to tell the hw how much work has been processed,
3804 * so we must read it before checking for more work.
3806 tp->last_tag = sblk->status_tag;
3809 sblk->status &= ~SD_STATUS_UPDATED;
3811 if (likely(!tg3_has_work(tp))) {
3812 netif_rx_complete(tp->dev, napi);
3813 tg3_restart_ints(tp);
3821 /* work_done is guaranteed to be less than budget. */
3822 netif_rx_complete(tp->dev, napi);
3823 schedule_work(&tp->reset_task);
3827 static void tg3_irq_quiesce(struct tg3 *tp)
3829 BUG_ON(tp->irq_sync);
3834 synchronize_irq(tp->pdev->irq);
3837 static inline int tg3_irq_sync(struct tg3 *tp)
3839 return tp->irq_sync;
3842 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3843 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3844 * with as well. Most of the time, this is not necessary except when
3845 * shutting down the device.
3847 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3849 spin_lock_bh(&tp->lock);
3851 tg3_irq_quiesce(tp);
3854 static inline void tg3_full_unlock(struct tg3 *tp)
3856 spin_unlock_bh(&tp->lock);
3859 /* One-shot MSI handler - Chip automatically disables interrupt
3860 * after sending MSI so driver doesn't have to do it.
3862 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3864 struct net_device *dev = dev_id;
3865 struct tg3 *tp = netdev_priv(dev);
3867 prefetch(tp->hw_status);
3868 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3870 if (likely(!tg3_irq_sync(tp)))
3871 netif_rx_schedule(dev, &tp->napi);
3876 /* MSI ISR - No need to check for interrupt sharing and no need to
3877 * flush status block and interrupt mailbox. PCI ordering rules
3878 * guarantee that MSI will arrive after the status block.
3880 static irqreturn_t tg3_msi(int irq, void *dev_id)
3882 struct net_device *dev = dev_id;
3883 struct tg3 *tp = netdev_priv(dev);
3885 prefetch(tp->hw_status);
3886 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3888 * Writing any value to intr-mbox-0 clears PCI INTA# and
3889 * chip-internal interrupt pending events.
3890 * Writing non-zero to intr-mbox-0 additional tells the
3891 * NIC to stop sending us irqs, engaging "in-intr-handler"
3894 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3895 if (likely(!tg3_irq_sync(tp)))
3896 netif_rx_schedule(dev, &tp->napi);
3898 return IRQ_RETVAL(1);
3901 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3903 struct net_device *dev = dev_id;
3904 struct tg3 *tp = netdev_priv(dev);
3905 struct tg3_hw_status *sblk = tp->hw_status;
3906 unsigned int handled = 1;
3908 /* In INTx mode, it is possible for the interrupt to arrive at
3909 * the CPU before the status block posted prior to the interrupt.
3910 * Reading the PCI State register will confirm whether the
3911 * interrupt is ours and will flush the status block.
3913 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3914 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3915 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3922 * Writing any value to intr-mbox-0 clears PCI INTA# and
3923 * chip-internal interrupt pending events.
3924 * Writing non-zero to intr-mbox-0 additional tells the
3925 * NIC to stop sending us irqs, engaging "in-intr-handler"
3928 * Flush the mailbox to de-assert the IRQ immediately to prevent
3929 * spurious interrupts. The flush impacts performance but
3930 * excessive spurious interrupts can be worse in some cases.
3932 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3933 if (tg3_irq_sync(tp))
3935 sblk->status &= ~SD_STATUS_UPDATED;
3936 if (likely(tg3_has_work(tp))) {
3937 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3938 netif_rx_schedule(dev, &tp->napi);
3940 /* No work, shared interrupt perhaps? re-enable
3941 * interrupts, and flush that PCI write
3943 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3947 return IRQ_RETVAL(handled);
3950 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3952 struct net_device *dev = dev_id;
3953 struct tg3 *tp = netdev_priv(dev);
3954 struct tg3_hw_status *sblk = tp->hw_status;
3955 unsigned int handled = 1;
3957 /* In INTx mode, it is possible for the interrupt to arrive at
3958 * the CPU before the status block posted prior to the interrupt.
3959 * Reading the PCI State register will confirm whether the
3960 * interrupt is ours and will flush the status block.
3962 if (unlikely(sblk->status_tag == tp->last_tag)) {
3963 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3964 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3971 * writing any value to intr-mbox-0 clears PCI INTA# and
3972 * chip-internal interrupt pending events.
3973 * writing non-zero to intr-mbox-0 additional tells the
3974 * NIC to stop sending us irqs, engaging "in-intr-handler"
3977 * Flush the mailbox to de-assert the IRQ immediately to prevent
3978 * spurious interrupts. The flush impacts performance but
3979 * excessive spurious interrupts can be worse in some cases.
3981 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3982 if (tg3_irq_sync(tp))
3984 if (netif_rx_schedule_prep(dev, &tp->napi)) {
3985 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3986 /* Update last_tag to mark that this status has been
3987 * seen. Because interrupt may be shared, we may be
3988 * racing with tg3_poll(), so only update last_tag
3989 * if tg3_poll() is not scheduled.
3991 tp->last_tag = sblk->status_tag;
3992 __netif_rx_schedule(dev, &tp->napi);
3995 return IRQ_RETVAL(handled);
3998 /* ISR for interrupt test */
3999 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4001 struct net_device *dev = dev_id;
4002 struct tg3 *tp = netdev_priv(dev);
4003 struct tg3_hw_status *sblk = tp->hw_status;
4005 if ((sblk->status & SD_STATUS_UPDATED) ||
4006 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4007 tg3_disable_ints(tp);
4008 return IRQ_RETVAL(1);
4010 return IRQ_RETVAL(0);
4013 static int tg3_init_hw(struct tg3 *, int);
4014 static int tg3_halt(struct tg3 *, int, int);
4016 /* Restart hardware after configuration changes, self-test, etc.
4017 * Invoked with tp->lock held.
4019 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4020 __releases(tp->lock)
4021 __acquires(tp->lock)
4025 err = tg3_init_hw(tp, reset_phy);
4027 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4028 "aborting.\n", tp->dev->name);
4029 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4030 tg3_full_unlock(tp);
4031 del_timer_sync(&tp->timer);
4033 napi_enable(&tp->napi);
4035 tg3_full_lock(tp, 0);
4040 #ifdef CONFIG_NET_POLL_CONTROLLER
4041 static void tg3_poll_controller(struct net_device *dev)
4043 struct tg3 *tp = netdev_priv(dev);
4045 tg3_interrupt(tp->pdev->irq, dev);
4049 static void tg3_reset_task(struct work_struct *work)
4051 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4052 unsigned int restart_timer;
4054 tg3_full_lock(tp, 0);
4056 if (!netif_running(tp->dev)) {
4057 tg3_full_unlock(tp);
4061 tg3_full_unlock(tp);
4065 tg3_full_lock(tp, 1);
4067 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4068 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4070 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4071 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4072 tp->write32_rx_mbox = tg3_write_flush_reg32;
4073 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4074 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4078 if (tg3_init_hw(tp, 1))
4081 tg3_netif_start(tp);
4084 mod_timer(&tp->timer, jiffies + 1);
4087 tg3_full_unlock(tp);
4090 static void tg3_dump_short_state(struct tg3 *tp)
4092 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4093 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4094 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4095 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4098 static void tg3_tx_timeout(struct net_device *dev)
4100 struct tg3 *tp = netdev_priv(dev);
4102 if (netif_msg_tx_err(tp)) {
4103 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4105 tg3_dump_short_state(tp);
4108 schedule_work(&tp->reset_task);
4111 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4112 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4114 u32 base = (u32) mapping & 0xffffffff;
4116 return ((base > 0xffffdcc0) &&
4117 (base + len + 8 < base));
4120 /* Test for DMA addresses > 40-bit */
4121 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4124 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4125 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4126 return (((u64) mapping + len) > DMA_40BIT_MASK);
4133 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4135 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4136 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4137 u32 last_plus_one, u32 *start,
4138 u32 base_flags, u32 mss)
4140 struct sk_buff *new_skb;
4141 dma_addr_t new_addr = 0;
4145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4146 new_skb = skb_copy(skb, GFP_ATOMIC);
4148 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4150 new_skb = skb_copy_expand(skb,
4151 skb_headroom(skb) + more_headroom,
4152 skb_tailroom(skb), GFP_ATOMIC);
4158 /* New SKB is guaranteed to be linear. */
4160 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4162 /* Make sure new skb does not cross any 4G boundaries.
4163 * Drop the packet if it does.
4165 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4167 dev_kfree_skb(new_skb);
4170 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4171 base_flags, 1 | (mss << 1));
4172 *start = NEXT_TX(entry);
4176 /* Now clean up the sw ring entries. */
4178 while (entry != last_plus_one) {
4182 len = skb_headlen(skb);
4184 len = skb_shinfo(skb)->frags[i-1].size;
4185 pci_unmap_single(tp->pdev,
4186 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4187 len, PCI_DMA_TODEVICE);
4189 tp->tx_buffers[entry].skb = new_skb;
4190 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4192 tp->tx_buffers[entry].skb = NULL;
4194 entry = NEXT_TX(entry);
4203 static void tg3_set_txd(struct tg3 *tp, int entry,
4204 dma_addr_t mapping, int len, u32 flags,
4207 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4208 int is_end = (mss_and_is_end & 0x1);
4209 u32 mss = (mss_and_is_end >> 1);
4213 flags |= TXD_FLAG_END;
4214 if (flags & TXD_FLAG_VLAN) {
4215 vlan_tag = flags >> 16;
4218 vlan_tag |= (mss << TXD_MSS_SHIFT);
4220 txd->addr_hi = ((u64) mapping >> 32);
4221 txd->addr_lo = ((u64) mapping & 0xffffffff);
4222 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4223 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4226 /* hard_start_xmit for devices that don't have any bugs and
4227 * support TG3_FLG2_HW_TSO_2 only.
4229 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4231 struct tg3 *tp = netdev_priv(dev);
4233 u32 len, entry, base_flags, mss;
4235 len = skb_headlen(skb);
4237 /* We are running in BH disabled context with netif_tx_lock
4238 * and TX reclaim runs via tp->napi.poll inside of a software
4239 * interrupt. Furthermore, IRQ processing runs lockless so we have
4240 * no IRQ context deadlocks to worry about either. Rejoice!
4242 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4243 if (!netif_queue_stopped(dev)) {
4244 netif_stop_queue(dev);
4246 /* This is a hard error, log it. */
4247 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4248 "queue awake!\n", dev->name);
4250 return NETDEV_TX_BUSY;
4253 entry = tp->tx_prod;
4256 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4257 int tcp_opt_len, ip_tcp_len;
4259 if (skb_header_cloned(skb) &&
4260 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4265 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4266 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4268 struct iphdr *iph = ip_hdr(skb);
4270 tcp_opt_len = tcp_optlen(skb);
4271 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4274 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4275 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4278 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4279 TXD_FLAG_CPU_POST_DMA);
4281 tcp_hdr(skb)->check = 0;
4284 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4285 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4286 #if TG3_VLAN_TAG_USED
4287 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4288 base_flags |= (TXD_FLAG_VLAN |
4289 (vlan_tx_tag_get(skb) << 16));
4292 /* Queue skb data, a.k.a. the main skb fragment. */
4293 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4295 tp->tx_buffers[entry].skb = skb;
4296 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4298 tg3_set_txd(tp, entry, mapping, len, base_flags,
4299 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4301 entry = NEXT_TX(entry);
4303 /* Now loop through additional data fragments, and queue them. */
4304 if (skb_shinfo(skb)->nr_frags > 0) {
4305 unsigned int i, last;
4307 last = skb_shinfo(skb)->nr_frags - 1;
4308 for (i = 0; i <= last; i++) {
4309 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4312 mapping = pci_map_page(tp->pdev,
4315 len, PCI_DMA_TODEVICE);
4317 tp->tx_buffers[entry].skb = NULL;
4318 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4320 tg3_set_txd(tp, entry, mapping, len,
4321 base_flags, (i == last) | (mss << 1));
4323 entry = NEXT_TX(entry);
4327 /* Packets are ready, update Tx producer idx local and on card. */
4328 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4330 tp->tx_prod = entry;
4331 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4332 netif_stop_queue(dev);
4333 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4334 netif_wake_queue(tp->dev);
4340 dev->trans_start = jiffies;
4342 return NETDEV_TX_OK;
4345 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4347 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4348 * TSO header is greater than 80 bytes.
4350 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4352 struct sk_buff *segs, *nskb;
4354 /* Estimate the number of fragments in the worst case */
4355 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4356 netif_stop_queue(tp->dev);
4357 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4358 return NETDEV_TX_BUSY;
4360 netif_wake_queue(tp->dev);
4363 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4364 if (unlikely(IS_ERR(segs)))
4365 goto tg3_tso_bug_end;
4371 tg3_start_xmit_dma_bug(nskb, tp->dev);
4377 return NETDEV_TX_OK;
4380 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4381 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4383 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4385 struct tg3 *tp = netdev_priv(dev);
4387 u32 len, entry, base_flags, mss;
4388 int would_hit_hwbug;
4390 len = skb_headlen(skb);
4392 /* We are running in BH disabled context with netif_tx_lock
4393 * and TX reclaim runs via tp->napi.poll inside of a software
4394 * interrupt. Furthermore, IRQ processing runs lockless so we have
4395 * no IRQ context deadlocks to worry about either. Rejoice!
4397 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4398 if (!netif_queue_stopped(dev)) {
4399 netif_stop_queue(dev);
4401 /* This is a hard error, log it. */
4402 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4403 "queue awake!\n", dev->name);
4405 return NETDEV_TX_BUSY;
4408 entry = tp->tx_prod;
4410 if (skb->ip_summed == CHECKSUM_PARTIAL)
4411 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4413 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4415 int tcp_opt_len, ip_tcp_len, hdr_len;
4417 if (skb_header_cloned(skb) &&
4418 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4423 tcp_opt_len = tcp_optlen(skb);
4424 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4426 hdr_len = ip_tcp_len + tcp_opt_len;
4427 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4428 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4429 return (tg3_tso_bug(tp, skb));
4431 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4432 TXD_FLAG_CPU_POST_DMA);
4436 iph->tot_len = htons(mss + hdr_len);
4437 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4438 tcp_hdr(skb)->check = 0;
4439 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4441 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4446 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4447 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4448 if (tcp_opt_len || iph->ihl > 5) {
4451 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4452 mss |= (tsflags << 11);
4455 if (tcp_opt_len || iph->ihl > 5) {
4458 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4459 base_flags |= tsflags << 12;
4463 #if TG3_VLAN_TAG_USED
4464 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4465 base_flags |= (TXD_FLAG_VLAN |
4466 (vlan_tx_tag_get(skb) << 16));
4469 /* Queue skb data, a.k.a. the main skb fragment. */
4470 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4472 tp->tx_buffers[entry].skb = skb;
4473 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4475 would_hit_hwbug = 0;
4477 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4478 would_hit_hwbug = 1;
4479 else if (tg3_4g_overflow_test(mapping, len))
4480 would_hit_hwbug = 1;
4482 tg3_set_txd(tp, entry, mapping, len, base_flags,
4483 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4485 entry = NEXT_TX(entry);
4487 /* Now loop through additional data fragments, and queue them. */
4488 if (skb_shinfo(skb)->nr_frags > 0) {
4489 unsigned int i, last;
4491 last = skb_shinfo(skb)->nr_frags - 1;
4492 for (i = 0; i <= last; i++) {
4493 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4496 mapping = pci_map_page(tp->pdev,
4499 len, PCI_DMA_TODEVICE);
4501 tp->tx_buffers[entry].skb = NULL;
4502 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4504 if (tg3_4g_overflow_test(mapping, len))
4505 would_hit_hwbug = 1;
4507 if (tg3_40bit_overflow_test(tp, mapping, len))
4508 would_hit_hwbug = 1;
4510 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4511 tg3_set_txd(tp, entry, mapping, len,
4512 base_flags, (i == last)|(mss << 1));
4514 tg3_set_txd(tp, entry, mapping, len,
4515 base_flags, (i == last));
4517 entry = NEXT_TX(entry);
4521 if (would_hit_hwbug) {
4522 u32 last_plus_one = entry;
4525 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4526 start &= (TG3_TX_RING_SIZE - 1);
4528 /* If the workaround fails due to memory/mapping
4529 * failure, silently drop this packet.
4531 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4532 &start, base_flags, mss))
4538 /* Packets are ready, update Tx producer idx local and on card. */
4539 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4541 tp->tx_prod = entry;
4542 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4543 netif_stop_queue(dev);
4544 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4545 netif_wake_queue(tp->dev);
4551 dev->trans_start = jiffies;
4553 return NETDEV_TX_OK;
4556 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4561 if (new_mtu > ETH_DATA_LEN) {
4562 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4563 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4564 ethtool_op_set_tso(dev, 0);
4567 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4569 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4570 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4571 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4575 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4577 struct tg3 *tp = netdev_priv(dev);
4580 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4583 if (!netif_running(dev)) {
4584 /* We'll just catch it later when the
4587 tg3_set_mtu(dev, tp, new_mtu);
4593 tg3_full_lock(tp, 1);
4595 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4597 tg3_set_mtu(dev, tp, new_mtu);
4599 err = tg3_restart_hw(tp, 0);
4602 tg3_netif_start(tp);
4604 tg3_full_unlock(tp);
4609 /* Free up pending packets in all rx/tx rings.
4611 * The chip has been shut down and the driver detached from
4612 * the networking, so no interrupts or new tx packets will
4613 * end up in the driver. tp->{tx,}lock is not held and we are not
4614 * in an interrupt context and thus may sleep.
4616 static void tg3_free_rings(struct tg3 *tp)
4618 struct ring_info *rxp;
4621 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4622 rxp = &tp->rx_std_buffers[i];
4624 if (rxp->skb == NULL)
4626 pci_unmap_single(tp->pdev,
4627 pci_unmap_addr(rxp, mapping),
4628 tp->rx_pkt_buf_sz - tp->rx_offset,
4629 PCI_DMA_FROMDEVICE);
4630 dev_kfree_skb_any(rxp->skb);
4634 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4635 rxp = &tp->rx_jumbo_buffers[i];
4637 if (rxp->skb == NULL)
4639 pci_unmap_single(tp->pdev,
4640 pci_unmap_addr(rxp, mapping),
4641 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4642 PCI_DMA_FROMDEVICE);
4643 dev_kfree_skb_any(rxp->skb);
4647 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4648 struct tx_ring_info *txp;
4649 struct sk_buff *skb;
4652 txp = &tp->tx_buffers[i];
4660 pci_unmap_single(tp->pdev,
4661 pci_unmap_addr(txp, mapping),
4668 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4669 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4670 pci_unmap_page(tp->pdev,
4671 pci_unmap_addr(txp, mapping),
4672 skb_shinfo(skb)->frags[j].size,
4677 dev_kfree_skb_any(skb);
4681 /* Initialize tx/rx rings for packet processing.
4683 * The chip has been shut down and the driver detached from
4684 * the networking, so no interrupts or new tx packets will
4685 * end up in the driver. tp->{tx,}lock are held and thus
4688 static int tg3_init_rings(struct tg3 *tp)
4692 /* Free up all the SKBs. */
4695 /* Zero out all descriptors. */
4696 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4697 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4698 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4699 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4701 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4702 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4703 (tp->dev->mtu > ETH_DATA_LEN))
4704 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4706 /* Initialize invariants of the rings, we only set this
4707 * stuff once. This works because the card does not
4708 * write into the rx buffer posting rings.
4710 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4711 struct tg3_rx_buffer_desc *rxd;
4713 rxd = &tp->rx_std[i];
4714 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4716 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4717 rxd->opaque = (RXD_OPAQUE_RING_STD |
4718 (i << RXD_OPAQUE_INDEX_SHIFT));
4721 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4722 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4723 struct tg3_rx_buffer_desc *rxd;
4725 rxd = &tp->rx_jumbo[i];
4726 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4728 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4730 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4731 (i << RXD_OPAQUE_INDEX_SHIFT));
4735 /* Now allocate fresh SKBs for each rx ring. */
4736 for (i = 0; i < tp->rx_pending; i++) {
4737 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4738 printk(KERN_WARNING PFX
4739 "%s: Using a smaller RX standard ring, "
4740 "only %d out of %d buffers were allocated "
4742 tp->dev->name, i, tp->rx_pending);
4750 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4751 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4752 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4754 printk(KERN_WARNING PFX
4755 "%s: Using a smaller RX jumbo ring, "
4756 "only %d out of %d buffers were "
4757 "allocated successfully.\n",
4758 tp->dev->name, i, tp->rx_jumbo_pending);
4763 tp->rx_jumbo_pending = i;
4772 * Must not be invoked with interrupt sources disabled and
4773 * the hardware shutdown down.
4775 static void tg3_free_consistent(struct tg3 *tp)
4777 kfree(tp->rx_std_buffers);
4778 tp->rx_std_buffers = NULL;
4780 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4781 tp->rx_std, tp->rx_std_mapping);
4785 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4786 tp->rx_jumbo, tp->rx_jumbo_mapping);
4787 tp->rx_jumbo = NULL;
4790 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4791 tp->rx_rcb, tp->rx_rcb_mapping);
4795 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4796 tp->tx_ring, tp->tx_desc_mapping);
4799 if (tp->hw_status) {
4800 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4801 tp->hw_status, tp->status_mapping);
4802 tp->hw_status = NULL;
4805 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4806 tp->hw_stats, tp->stats_mapping);
4807 tp->hw_stats = NULL;
4812 * Must not be invoked with interrupt sources disabled and
4813 * the hardware shutdown down. Can sleep.
4815 static int tg3_alloc_consistent(struct tg3 *tp)
4817 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4819 TG3_RX_JUMBO_RING_SIZE)) +
4820 (sizeof(struct tx_ring_info) *
4823 if (!tp->rx_std_buffers)
4826 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4827 tp->tx_buffers = (struct tx_ring_info *)
4828 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4830 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4831 &tp->rx_std_mapping);
4835 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4836 &tp->rx_jumbo_mapping);
4841 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4842 &tp->rx_rcb_mapping);
4846 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4847 &tp->tx_desc_mapping);
4851 tp->hw_status = pci_alloc_consistent(tp->pdev,
4853 &tp->status_mapping);
4857 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4858 sizeof(struct tg3_hw_stats),
4859 &tp->stats_mapping);
4863 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4864 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4869 tg3_free_consistent(tp);
4873 #define MAX_WAIT_CNT 1000
4875 /* To stop a block, clear the enable bit and poll till it
4876 * clears. tp->lock is held.
4878 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4883 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4890 /* We can't enable/disable these bits of the
4891 * 5705/5750, just say success.
4904 for (i = 0; i < MAX_WAIT_CNT; i++) {
4907 if ((val & enable_bit) == 0)
4911 if (i == MAX_WAIT_CNT && !silent) {
4912 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4913 "ofs=%lx enable_bit=%x\n",
4921 /* tp->lock is held. */
4922 static int tg3_abort_hw(struct tg3 *tp, int silent)
4926 tg3_disable_ints(tp);
4928 tp->rx_mode &= ~RX_MODE_ENABLE;
4929 tw32_f(MAC_RX_MODE, tp->rx_mode);
4932 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4933 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4934 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4935 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4936 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4937 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4939 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4940 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4941 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4942 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4943 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4944 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4945 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4947 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4948 tw32_f(MAC_MODE, tp->mac_mode);
4951 tp->tx_mode &= ~TX_MODE_ENABLE;
4952 tw32_f(MAC_TX_MODE, tp->tx_mode);
4954 for (i = 0; i < MAX_WAIT_CNT; i++) {
4956 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4959 if (i >= MAX_WAIT_CNT) {
4960 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4961 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4962 tp->dev->name, tr32(MAC_TX_MODE));
4966 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4967 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4968 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4970 tw32(FTQ_RESET, 0xffffffff);
4971 tw32(FTQ_RESET, 0x00000000);
4973 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4974 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4977 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4979 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4984 /* tp->lock is held. */
4985 static int tg3_nvram_lock(struct tg3 *tp)
4987 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4990 if (tp->nvram_lock_cnt == 0) {
4991 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4992 for (i = 0; i < 8000; i++) {
4993 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4998 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5002 tp->nvram_lock_cnt++;
5007 /* tp->lock is held. */
5008 static void tg3_nvram_unlock(struct tg3 *tp)
5010 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5011 if (tp->nvram_lock_cnt > 0)
5012 tp->nvram_lock_cnt--;
5013 if (tp->nvram_lock_cnt == 0)
5014 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5018 /* tp->lock is held. */
5019 static void tg3_enable_nvram_access(struct tg3 *tp)
5021 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5022 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5023 u32 nvaccess = tr32(NVRAM_ACCESS);
5025 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5029 /* tp->lock is held. */
5030 static void tg3_disable_nvram_access(struct tg3 *tp)
5032 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5033 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5034 u32 nvaccess = tr32(NVRAM_ACCESS);
5036 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5040 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5045 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5046 if (apedata != APE_SEG_SIG_MAGIC)
5049 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5050 if (apedata != APE_FW_STATUS_READY)
5053 /* Wait for up to 1 millisecond for APE to service previous event. */
5054 for (i = 0; i < 10; i++) {
5055 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5058 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5060 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5061 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5062 event | APE_EVENT_STATUS_EVENT_PENDING);
5064 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5066 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5072 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5073 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5076 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5081 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5085 case RESET_KIND_INIT:
5086 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5087 APE_HOST_SEG_SIG_MAGIC);
5088 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5089 APE_HOST_SEG_LEN_MAGIC);
5090 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5091 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5092 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5093 APE_HOST_DRIVER_ID_MAGIC);
5094 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5095 APE_HOST_BEHAV_NO_PHYLOCK);
5097 event = APE_EVENT_STATUS_STATE_START;
5099 case RESET_KIND_SHUTDOWN:
5100 event = APE_EVENT_STATUS_STATE_UNLOAD;
5102 case RESET_KIND_SUSPEND:
5103 event = APE_EVENT_STATUS_STATE_SUSPEND;
5109 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5111 tg3_ape_send_event(tp, event);
5114 /* tp->lock is held. */
5115 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5117 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5118 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5120 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5122 case RESET_KIND_INIT:
5123 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5127 case RESET_KIND_SHUTDOWN:
5128 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5132 case RESET_KIND_SUSPEND:
5133 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5142 if (kind == RESET_KIND_INIT ||
5143 kind == RESET_KIND_SUSPEND)
5144 tg3_ape_driver_state_change(tp, kind);
5147 /* tp->lock is held. */
5148 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5150 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5152 case RESET_KIND_INIT:
5153 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5154 DRV_STATE_START_DONE);
5157 case RESET_KIND_SHUTDOWN:
5158 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5159 DRV_STATE_UNLOAD_DONE);
5167 if (kind == RESET_KIND_SHUTDOWN)
5168 tg3_ape_driver_state_change(tp, kind);
5171 /* tp->lock is held. */
5172 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5174 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5176 case RESET_KIND_INIT:
5177 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5181 case RESET_KIND_SHUTDOWN:
5182 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5186 case RESET_KIND_SUSPEND:
5187 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5197 static int tg3_poll_fw(struct tg3 *tp)
5202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5203 /* Wait up to 20ms for init done. */
5204 for (i = 0; i < 200; i++) {
5205 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5212 /* Wait for firmware initialization to complete. */
5213 for (i = 0; i < 100000; i++) {
5214 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5215 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5220 /* Chip might not be fitted with firmware. Some Sun onboard
5221 * parts are configured like that. So don't signal the timeout
5222 * of the above loop as an error, but do report the lack of
5223 * running firmware once.
5226 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5227 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5229 printk(KERN_INFO PFX "%s: No firmware running.\n",
5236 /* Save PCI command register before chip reset */
5237 static void tg3_save_pci_state(struct tg3 *tp)
5239 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5242 /* Restore PCI state after chip reset */
5243 static void tg3_restore_pci_state(struct tg3 *tp)
5247 /* Re-enable indirect register accesses. */
5248 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5249 tp->misc_host_ctrl);
5251 /* Set MAX PCI retry to zero. */
5252 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5253 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5254 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5255 val |= PCISTATE_RETRY_SAME_DMA;
5256 /* Allow reads and writes to the APE register and memory space. */
5257 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5258 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5259 PCISTATE_ALLOW_APE_SHMEM_WR;
5260 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5262 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5264 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5265 pcie_set_readrq(tp->pdev, 4096);
5267 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5268 tp->pci_cacheline_sz);
5269 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5273 /* Make sure PCI-X relaxed ordering bit is clear. */
5277 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5279 pcix_cmd &= ~PCI_X_CMD_ERO;
5280 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5284 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5286 /* Chip reset on 5780 will reset MSI enable bit,
5287 * so need to restore it.
5289 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5292 pci_read_config_word(tp->pdev,
5293 tp->msi_cap + PCI_MSI_FLAGS,
5295 pci_write_config_word(tp->pdev,
5296 tp->msi_cap + PCI_MSI_FLAGS,
5297 ctrl | PCI_MSI_FLAGS_ENABLE);
5298 val = tr32(MSGINT_MODE);
5299 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5304 static void tg3_stop_fw(struct tg3 *);
5306 /* tp->lock is held. */
5307 static int tg3_chip_reset(struct tg3 *tp)
5310 void (*write_op)(struct tg3 *, u32, u32);
5315 /* No matching tg3_nvram_unlock() after this because
5316 * chip reset below will undo the nvram lock.
5318 tp->nvram_lock_cnt = 0;
5320 /* GRC_MISC_CFG core clock reset will clear the memory
5321 * enable bit in PCI register 4 and the MSI enable bit
5322 * on some chips, so we save relevant registers here.
5324 tg3_save_pci_state(tp);
5326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5331 tw32(GRC_FASTBOOT_PC, 0);
5334 * We must avoid the readl() that normally takes place.
5335 * It locks machines, causes machine checks, and other
5336 * fun things. So, temporarily disable the 5701
5337 * hardware workaround, while we do the reset.
5339 write_op = tp->write32;
5340 if (write_op == tg3_write_flush_reg32)
5341 tp->write32 = tg3_write32;
5343 /* Prevent the irq handler from reading or writing PCI registers
5344 * during chip reset when the memory enable bit in the PCI command
5345 * register may be cleared. The chip does not generate interrupt
5346 * at this time, but the irq handler may still be called due to irq
5347 * sharing or irqpoll.
5349 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5350 if (tp->hw_status) {
5351 tp->hw_status->status = 0;
5352 tp->hw_status->status_tag = 0;
5356 synchronize_irq(tp->pdev->irq);
5359 val = GRC_MISC_CFG_CORECLK_RESET;
5361 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5362 if (tr32(0x7e2c) == 0x60) {
5365 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5366 tw32(GRC_MISC_CFG, (1 << 29));
5371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5372 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5373 tw32(GRC_VCPU_EXT_CTRL,
5374 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5377 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5378 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5379 tw32(GRC_MISC_CFG, val);
5381 /* restore 5701 hardware bug workaround write method */
5382 tp->write32 = write_op;
5384 /* Unfortunately, we have to delay before the PCI read back.
5385 * Some 575X chips even will not respond to a PCI cfg access
5386 * when the reset command is given to the chip.
5388 * How do these hardware designers expect things to work
5389 * properly if the PCI write is posted for a long period
5390 * of time? It is always necessary to have some method by
5391 * which a register read back can occur to push the write
5392 * out which does the reset.
5394 * For most tg3 variants the trick below was working.
5399 /* Flush PCI posted writes. The normal MMIO registers
5400 * are inaccessible at this time so this is the only
5401 * way to make this reliably (actually, this is no longer
5402 * the case, see above). I tried to use indirect
5403 * register read/write but this upset some 5701 variants.
5405 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5409 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5410 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5414 /* Wait for link training to complete. */
5415 for (i = 0; i < 5000; i++)
5418 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5419 pci_write_config_dword(tp->pdev, 0xc4,
5420 cfg_val | (1 << 15));
5422 /* Set PCIE max payload size and clear error status. */
5423 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5426 tg3_restore_pci_state(tp);
5428 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5431 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5432 val = tr32(MEMARB_MODE);
5433 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5435 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5437 tw32(0x5000, 0x400);
5440 tw32(GRC_MODE, tp->grc_mode);
5442 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5445 tw32(0xc4, val | (1 << 15));
5448 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5450 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5451 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5452 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5453 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5456 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5457 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5458 tw32_f(MAC_MODE, tp->mac_mode);
5459 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5460 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5461 tw32_f(MAC_MODE, tp->mac_mode);
5463 tw32_f(MAC_MODE, 0);
5466 err = tg3_poll_fw(tp);
5470 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5471 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5474 tw32(0x7c00, val | (1 << 25));
5477 /* Reprobe ASF enable state. */
5478 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5479 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5480 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5481 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5484 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5485 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5486 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5487 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5488 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5495 /* tp->lock is held. */
5496 static void tg3_stop_fw(struct tg3 *tp)
5498 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5499 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5504 val = tr32(GRC_RX_CPU_EVENT);
5506 tw32(GRC_RX_CPU_EVENT, val);
5508 /* Wait for RX cpu to ACK the event. */
5509 for (i = 0; i < 100; i++) {
5510 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5517 /* tp->lock is held. */
5518 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5524 tg3_write_sig_pre_reset(tp, kind);
5526 tg3_abort_hw(tp, silent);
5527 err = tg3_chip_reset(tp);
5529 tg3_write_sig_legacy(tp, kind);
5530 tg3_write_sig_post_reset(tp, kind);
5538 #define TG3_FW_RELEASE_MAJOR 0x0
5539 #define TG3_FW_RELASE_MINOR 0x0
5540 #define TG3_FW_RELEASE_FIX 0x0
5541 #define TG3_FW_START_ADDR 0x08000000
5542 #define TG3_FW_TEXT_ADDR 0x08000000
5543 #define TG3_FW_TEXT_LEN 0x9c0
5544 #define TG3_FW_RODATA_ADDR 0x080009c0
5545 #define TG3_FW_RODATA_LEN 0x60
5546 #define TG3_FW_DATA_ADDR 0x08000a40
5547 #define TG3_FW_DATA_LEN 0x20
5548 #define TG3_FW_SBSS_ADDR 0x08000a60
5549 #define TG3_FW_SBSS_LEN 0xc
5550 #define TG3_FW_BSS_ADDR 0x08000a70
5551 #define TG3_FW_BSS_LEN 0x10
5553 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5554 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5555 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5556 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5557 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5558 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5559 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5560 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5561 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5562 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5563 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5564 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5565 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5566 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5567 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5568 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5569 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5570 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5571 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5572 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5573 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5574 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5575 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5576 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5577 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5578 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5580 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5581 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5582 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5583 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5584 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5585 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5586 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5587 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5588 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5589 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5590 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5591 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5592 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5593 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5594 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5595 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5596 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5597 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5598 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5599 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5600 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5601 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5602 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5603 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5604 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5605 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5606 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5607 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5608 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5609 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5610 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5611 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5612 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5613 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5614 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5615 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5616 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5617 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5618 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5619 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5620 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5621 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5622 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5623 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5624 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5625 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5626 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5627 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5628 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5629 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5630 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5631 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5632 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5633 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5634 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5635 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5636 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5637 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5638 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5639 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5640 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5641 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5642 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5643 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5644 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5647 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5648 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5649 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5650 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5651 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5655 #if 0 /* All zeros, don't eat up space with it. */
5656 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5657 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5658 0x00000000, 0x00000000, 0x00000000, 0x00000000
5662 #define RX_CPU_SCRATCH_BASE 0x30000
5663 #define RX_CPU_SCRATCH_SIZE 0x04000
5664 #define TX_CPU_SCRATCH_BASE 0x34000
5665 #define TX_CPU_SCRATCH_SIZE 0x04000
5667 /* tp->lock is held. */
5668 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5672 BUG_ON(offset == TX_CPU_BASE &&
5673 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5676 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5678 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5681 if (offset == RX_CPU_BASE) {
5682 for (i = 0; i < 10000; i++) {
5683 tw32(offset + CPU_STATE, 0xffffffff);
5684 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5685 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5689 tw32(offset + CPU_STATE, 0xffffffff);
5690 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5693 for (i = 0; i < 10000; i++) {
5694 tw32(offset + CPU_STATE, 0xffffffff);
5695 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5696 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5702 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5705 (offset == RX_CPU_BASE ? "RX" : "TX"));
5709 /* Clear firmware's nvram arbitration. */
5710 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5711 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5716 unsigned int text_base;
5717 unsigned int text_len;
5718 const u32 *text_data;
5719 unsigned int rodata_base;
5720 unsigned int rodata_len;
5721 const u32 *rodata_data;
5722 unsigned int data_base;
5723 unsigned int data_len;
5724 const u32 *data_data;
5727 /* tp->lock is held. */
5728 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5729 int cpu_scratch_size, struct fw_info *info)
5731 int err, lock_err, i;
5732 void (*write_op)(struct tg3 *, u32, u32);
5734 if (cpu_base == TX_CPU_BASE &&
5735 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5736 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5737 "TX cpu firmware on %s which is 5705.\n",
5742 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5743 write_op = tg3_write_mem;
5745 write_op = tg3_write_indirect_reg32;
5747 /* It is possible that bootcode is still loading at this point.
5748 * Get the nvram lock first before halting the cpu.
5750 lock_err = tg3_nvram_lock(tp);
5751 err = tg3_halt_cpu(tp, cpu_base);
5753 tg3_nvram_unlock(tp);
5757 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5758 write_op(tp, cpu_scratch_base + i, 0);
5759 tw32(cpu_base + CPU_STATE, 0xffffffff);
5760 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5761 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5762 write_op(tp, (cpu_scratch_base +
5763 (info->text_base & 0xffff) +
5766 info->text_data[i] : 0));
5767 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5768 write_op(tp, (cpu_scratch_base +
5769 (info->rodata_base & 0xffff) +
5771 (info->rodata_data ?
5772 info->rodata_data[i] : 0));
5773 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5774 write_op(tp, (cpu_scratch_base +
5775 (info->data_base & 0xffff) +
5778 info->data_data[i] : 0));
5786 /* tp->lock is held. */
5787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5789 struct fw_info info;
5792 info.text_base = TG3_FW_TEXT_ADDR;
5793 info.text_len = TG3_FW_TEXT_LEN;
5794 info.text_data = &tg3FwText[0];
5795 info.rodata_base = TG3_FW_RODATA_ADDR;
5796 info.rodata_len = TG3_FW_RODATA_LEN;
5797 info.rodata_data = &tg3FwRodata[0];
5798 info.data_base = TG3_FW_DATA_ADDR;
5799 info.data_len = TG3_FW_DATA_LEN;
5800 info.data_data = NULL;
5802 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5803 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5808 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5809 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5814 /* Now startup only the RX cpu. */
5815 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5816 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5818 for (i = 0; i < 5; i++) {
5819 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5821 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5822 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5823 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5827 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5828 "to set RX CPU PC, is %08x should be %08x\n",
5829 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5833 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5834 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5840 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5841 #define TG3_TSO_FW_RELASE_MINOR 0x6
5842 #define TG3_TSO_FW_RELEASE_FIX 0x0
5843 #define TG3_TSO_FW_START_ADDR 0x08000000
5844 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5845 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5846 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5847 #define TG3_TSO_FW_RODATA_LEN 0x60
5848 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5849 #define TG3_TSO_FW_DATA_LEN 0x30
5850 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5851 #define TG3_TSO_FW_SBSS_LEN 0x2c
5852 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5853 #define TG3_TSO_FW_BSS_LEN 0x894
5855 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5856 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5857 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5858 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5859 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5860 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5861 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5862 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5863 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5864 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5865 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5866 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5867 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5868 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5869 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5870 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5871 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5872 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5873 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5874 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5875 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5876 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5877 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5878 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5879 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5880 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5881 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5882 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5883 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5884 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5885 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5886 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5887 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5888 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5889 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5890 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5891 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5892 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5893 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5894 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5895 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5896 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5897 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5898 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5899 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5900 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5901 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5902 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5903 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5904 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5905 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5906 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5907 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5908 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5909 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5910 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5911 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5912 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5913 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5914 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5915 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5916 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5917 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5918 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5919 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5920 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5921 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5922 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5923 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5924 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5925 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5926 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5927 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5928 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5929 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5930 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5931 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5932 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5933 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5934 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5935 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5936 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5937 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5938 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5939 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5940 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5941 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5942 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5943 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5944 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5945 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5946 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5947 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5948 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5949 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5950 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5951 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5952 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5953 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5954 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5955 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5956 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5957 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5958 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5959 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5960 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5961 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5962 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5963 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5964 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5965 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5966 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5967 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5968 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5969 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5970 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5971 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5972 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5973 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5974 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5975 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5976 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5977 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5978 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5979 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5980 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5981 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5982 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5983 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5984 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5985 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5986 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5987 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5988 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5989 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5990 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5991 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5992 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5993 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5994 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5995 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5996 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5997 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5998 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5999 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6000 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6001 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6002 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6003 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6004 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6005 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6006 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6007 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6008 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6009 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6010 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6011 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6012 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6013 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6014 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6015 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6016 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6017 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6018 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6019 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6020 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6021 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6022 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6023 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6024 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6025 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6026 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6027 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6028 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6029 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6030 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6031 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6032 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6033 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6034 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6035 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6036 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6037 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6038 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6039 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6040 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6041 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6042 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6043 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6044 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6045 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6046 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6047 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6048 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6049 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6050 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6051 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6052 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6053 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6054 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6055 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6056 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6057 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6058 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6059 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6060 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6061 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6062 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6063 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6064 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6065 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6066 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6067 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6068 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6069 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6070 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6071 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6072 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6073 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6074 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6075 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6076 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6077 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6078 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6079 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6080 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6081 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6082 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6083 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6084 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6085 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6086 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6087 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6088 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6089 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6090 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6091 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6092 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6093 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6094 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6095 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6096 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6097 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6098 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6099 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6100 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6101 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6102 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6103 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6104 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6105 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6106 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6107 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6108 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6109 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6110 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6111 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6112 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6113 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6114 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6115 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6116 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6117 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6118 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6119 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6120 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6121 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6122 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6123 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6124 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6125 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6126 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6127 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6128 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6129 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6130 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6131 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6132 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6133 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6134 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6135 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6136 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6137 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6138 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6139 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6142 static const u32 tg3TsoFwRodata[] = {
6143 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6144 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6145 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6146 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6150 static const u32 tg3TsoFwData[] = {
6151 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6152 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6156 /* 5705 needs a special version of the TSO firmware. */
6157 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6158 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6159 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6160 #define TG3_TSO5_FW_START_ADDR 0x00010000
6161 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6162 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6163 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6164 #define TG3_TSO5_FW_RODATA_LEN 0x50
6165 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6166 #define TG3_TSO5_FW_DATA_LEN 0x20
6167 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6168 #define TG3_TSO5_FW_SBSS_LEN 0x28
6169 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6170 #define TG3_TSO5_FW_BSS_LEN 0x88
6172 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6173 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6174 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6175 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6176 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6177 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6178 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6179 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6180 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6181 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6182 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6183 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6184 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6185 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6186 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6187 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6188 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6189 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6190 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6191 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6192 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6193 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6194 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6195 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6196 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6197 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6198 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6199 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6200 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6201 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6202 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6203 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6204 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6205 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6206 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6207 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6208 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6209 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6210 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6211 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6212 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6213 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6214 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6215 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6216 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6217 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6218 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6219 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6220 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6221 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6222 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6223 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6224 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6225 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6226 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6227 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6228 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6229 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6230 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6231 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6232 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6233 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6234 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6235 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6236 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6237 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6238 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6239 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6240 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6241 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6242 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6243 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6244 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6245 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6246 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6247 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6248 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6249 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6250 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6251 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6252 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6253 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6254 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6255 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6256 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6257 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6258 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6259 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6260 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6261 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6262 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6263 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6264 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6265 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6266 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6267 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6268 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6269 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6270 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6271 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6272 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6273 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6274 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6275 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6276 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6277 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6278 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6279 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6280 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6281 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6282 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6283 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6284 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6285 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6286 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6287 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6288 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6289 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6290 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6291 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6292 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6293 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6294 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6295 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6296 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6297 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6298 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6299 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6300 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6301 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6302 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6303 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6304 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6305 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6306 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6307 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6308 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6309 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6310 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6311 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6312 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6313 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6314 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6315 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6316 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6317 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6318 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6319 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6320 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6321 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6322 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6323 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6324 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6325 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6326 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6327 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6328 0x00000000, 0x00000000, 0x00000000,
6331 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6332 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6333 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6334 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6335 0x00000000, 0x00000000, 0x00000000,
6338 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6339 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6340 0x00000000, 0x00000000, 0x00000000,
6343 /* tp->lock is held. */
6344 static int tg3_load_tso_firmware(struct tg3 *tp)
6346 struct fw_info info;
6347 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6350 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6354 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6355 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6356 info.text_data = &tg3Tso5FwText[0];
6357 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6358 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6359 info.rodata_data = &tg3Tso5FwRodata[0];
6360 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6361 info.data_len = TG3_TSO5_FW_DATA_LEN;
6362 info.data_data = &tg3Tso5FwData[0];
6363 cpu_base = RX_CPU_BASE;
6364 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6365 cpu_scratch_size = (info.text_len +
6368 TG3_TSO5_FW_SBSS_LEN +
6369 TG3_TSO5_FW_BSS_LEN);
6371 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6372 info.text_len = TG3_TSO_FW_TEXT_LEN;
6373 info.text_data = &tg3TsoFwText[0];
6374 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6375 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6376 info.rodata_data = &tg3TsoFwRodata[0];
6377 info.data_base = TG3_TSO_FW_DATA_ADDR;
6378 info.data_len = TG3_TSO_FW_DATA_LEN;
6379 info.data_data = &tg3TsoFwData[0];
6380 cpu_base = TX_CPU_BASE;
6381 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6382 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6385 err = tg3_load_firmware_cpu(tp, cpu_base,
6386 cpu_scratch_base, cpu_scratch_size,
6391 /* Now startup the cpu. */
6392 tw32(cpu_base + CPU_STATE, 0xffffffff);
6393 tw32_f(cpu_base + CPU_PC, info.text_base);
6395 for (i = 0; i < 5; i++) {
6396 if (tr32(cpu_base + CPU_PC) == info.text_base)
6398 tw32(cpu_base + CPU_STATE, 0xffffffff);
6399 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6400 tw32_f(cpu_base + CPU_PC, info.text_base);
6404 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6405 "to set CPU PC, is %08x should be %08x\n",
6406 tp->dev->name, tr32(cpu_base + CPU_PC),
6410 tw32(cpu_base + CPU_STATE, 0xffffffff);
6411 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6416 /* tp->lock is held. */
6417 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6419 u32 addr_high, addr_low;
6422 addr_high = ((tp->dev->dev_addr[0] << 8) |
6423 tp->dev->dev_addr[1]);
6424 addr_low = ((tp->dev->dev_addr[2] << 24) |
6425 (tp->dev->dev_addr[3] << 16) |
6426 (tp->dev->dev_addr[4] << 8) |
6427 (tp->dev->dev_addr[5] << 0));
6428 for (i = 0; i < 4; i++) {
6429 if (i == 1 && skip_mac_1)
6431 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6432 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6437 for (i = 0; i < 12; i++) {
6438 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6439 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6443 addr_high = (tp->dev->dev_addr[0] +
6444 tp->dev->dev_addr[1] +
6445 tp->dev->dev_addr[2] +
6446 tp->dev->dev_addr[3] +
6447 tp->dev->dev_addr[4] +
6448 tp->dev->dev_addr[5]) &
6449 TX_BACKOFF_SEED_MASK;
6450 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6453 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6455 struct tg3 *tp = netdev_priv(dev);
6456 struct sockaddr *addr = p;
6457 int err = 0, skip_mac_1 = 0;
6459 if (!is_valid_ether_addr(addr->sa_data))
6462 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6464 if (!netif_running(dev))
6467 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6468 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6470 addr0_high = tr32(MAC_ADDR_0_HIGH);
6471 addr0_low = tr32(MAC_ADDR_0_LOW);
6472 addr1_high = tr32(MAC_ADDR_1_HIGH);
6473 addr1_low = tr32(MAC_ADDR_1_LOW);
6475 /* Skip MAC addr 1 if ASF is using it. */
6476 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6477 !(addr1_high == 0 && addr1_low == 0))
6480 spin_lock_bh(&tp->lock);
6481 __tg3_set_mac_addr(tp, skip_mac_1);
6482 spin_unlock_bh(&tp->lock);
6487 /* tp->lock is held. */
6488 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6489 dma_addr_t mapping, u32 maxlen_flags,
6493 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6494 ((u64) mapping >> 32));
6496 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6497 ((u64) mapping & 0xffffffff));
6499 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6502 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6504 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6508 static void __tg3_set_rx_mode(struct net_device *);
6509 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6511 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6512 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6513 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6514 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6515 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6516 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6517 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6519 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6520 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6521 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6522 u32 val = ec->stats_block_coalesce_usecs;
6524 if (!netif_carrier_ok(tp->dev))
6527 tw32(HOSTCC_STAT_COAL_TICKS, val);
6531 /* tp->lock is held. */
6532 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6534 u32 val, rdmac_mode;
6537 tg3_disable_ints(tp);
6541 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6543 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6544 tg3_abort_hw(tp, 1);
6550 err = tg3_chip_reset(tp);
6554 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6556 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6557 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6558 val = tr32(TG3_CPMU_CTRL);
6559 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6560 tw32(TG3_CPMU_CTRL, val);
6562 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6563 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6564 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6565 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6567 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6568 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6569 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6570 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6572 val = tr32(TG3_CPMU_HST_ACC);
6573 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6574 val |= CPMU_HST_ACC_MACCLK_6_25;
6575 tw32(TG3_CPMU_HST_ACC, val);
6578 /* This works around an issue with Athlon chipsets on
6579 * B3 tigon3 silicon. This bit has no effect on any
6580 * other revision. But do not set this on PCI Express
6581 * chips and don't even touch the clocks if the CPMU is present.
6583 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6584 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6585 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6586 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6589 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6590 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6591 val = tr32(TG3PCI_PCISTATE);
6592 val |= PCISTATE_RETRY_SAME_DMA;
6593 tw32(TG3PCI_PCISTATE, val);
6596 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6597 /* Allow reads and writes to the
6598 * APE register and memory space.
6600 val = tr32(TG3PCI_PCISTATE);
6601 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6602 PCISTATE_ALLOW_APE_SHMEM_WR;
6603 tw32(TG3PCI_PCISTATE, val);
6606 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6607 /* Enable some hw fixes. */
6608 val = tr32(TG3PCI_MSI_DATA);
6609 val |= (1 << 26) | (1 << 28) | (1 << 29);
6610 tw32(TG3PCI_MSI_DATA, val);
6613 /* Descriptor ring init may make accesses to the
6614 * NIC SRAM area to setup the TX descriptors, so we
6615 * can only do this after the hardware has been
6616 * successfully reset.
6618 err = tg3_init_rings(tp);
6622 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6623 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6624 /* This value is determined during the probe time DMA
6625 * engine test, tg3_test_dma.
6627 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6630 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6631 GRC_MODE_4X_NIC_SEND_RINGS |
6632 GRC_MODE_NO_TX_PHDR_CSUM |
6633 GRC_MODE_NO_RX_PHDR_CSUM);
6634 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6636 /* Pseudo-header checksum is done by hardware logic and not
6637 * the offload processers, so make the chip do the pseudo-
6638 * header checksums on receive. For transmit it is more
6639 * convenient to do the pseudo-header checksum in software
6640 * as Linux does that on transmit for us in all cases.
6642 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6646 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6648 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6649 val = tr32(GRC_MISC_CFG);
6651 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6652 tw32(GRC_MISC_CFG, val);
6654 /* Initialize MBUF/DESC pool. */
6655 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6657 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6658 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6659 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6660 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6662 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6663 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6664 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6666 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6669 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6670 TG3_TSO5_FW_RODATA_LEN +
6671 TG3_TSO5_FW_DATA_LEN +
6672 TG3_TSO5_FW_SBSS_LEN +
6673 TG3_TSO5_FW_BSS_LEN);
6674 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6675 tw32(BUFMGR_MB_POOL_ADDR,
6676 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6677 tw32(BUFMGR_MB_POOL_SIZE,
6678 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6681 if (tp->dev->mtu <= ETH_DATA_LEN) {
6682 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6683 tp->bufmgr_config.mbuf_read_dma_low_water);
6684 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6685 tp->bufmgr_config.mbuf_mac_rx_low_water);
6686 tw32(BUFMGR_MB_HIGH_WATER,
6687 tp->bufmgr_config.mbuf_high_water);
6689 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6690 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6691 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6692 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6693 tw32(BUFMGR_MB_HIGH_WATER,
6694 tp->bufmgr_config.mbuf_high_water_jumbo);
6696 tw32(BUFMGR_DMA_LOW_WATER,
6697 tp->bufmgr_config.dma_low_water);
6698 tw32(BUFMGR_DMA_HIGH_WATER,
6699 tp->bufmgr_config.dma_high_water);
6701 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6702 for (i = 0; i < 2000; i++) {
6703 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6708 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6713 /* Setup replenish threshold. */
6714 val = tp->rx_pending / 8;
6717 else if (val > tp->rx_std_max_post)
6718 val = tp->rx_std_max_post;
6719 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6720 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6721 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6723 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6724 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6727 tw32(RCVBDI_STD_THRESH, val);
6729 /* Initialize TG3_BDINFO's at:
6730 * RCVDBDI_STD_BD: standard eth size rx ring
6731 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6732 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6735 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6736 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6737 * ring attribute flags
6738 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6740 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6741 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6743 * The size of each ring is fixed in the firmware, but the location is
6746 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6747 ((u64) tp->rx_std_mapping >> 32));
6748 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6749 ((u64) tp->rx_std_mapping & 0xffffffff));
6750 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6751 NIC_SRAM_RX_BUFFER_DESC);
6753 /* Don't even try to program the JUMBO/MINI buffer descriptor
6756 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6757 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6758 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6760 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6761 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6763 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6764 BDINFO_FLAGS_DISABLED);
6766 /* Setup replenish threshold. */
6767 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6769 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6770 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6771 ((u64) tp->rx_jumbo_mapping >> 32));
6772 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6773 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6774 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6775 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6777 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6779 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6780 BDINFO_FLAGS_DISABLED);
6785 /* There is only one send ring on 5705/5750, no need to explicitly
6786 * disable the others.
6788 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6789 /* Clear out send RCB ring in SRAM. */
6790 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6791 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6792 BDINFO_FLAGS_DISABLED);
6797 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6798 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6800 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6801 tp->tx_desc_mapping,
6802 (TG3_TX_RING_SIZE <<
6803 BDINFO_FLAGS_MAXLEN_SHIFT),
6804 NIC_SRAM_TX_BUFFER_DESC);
6806 /* There is only one receive return ring on 5705/5750, no need
6807 * to explicitly disable the others.
6809 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6810 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6811 i += TG3_BDINFO_SIZE) {
6812 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6813 BDINFO_FLAGS_DISABLED);
6818 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6820 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6822 (TG3_RX_RCB_RING_SIZE(tp) <<
6823 BDINFO_FLAGS_MAXLEN_SHIFT),
6826 tp->rx_std_ptr = tp->rx_pending;
6827 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6830 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6831 tp->rx_jumbo_pending : 0;
6832 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6835 /* Initialize MAC address and backoff seed. */
6836 __tg3_set_mac_addr(tp, 0);
6838 /* MTU + ethernet header + FCS + optional VLAN tag */
6839 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6841 /* The slot time is changed by tg3_setup_phy if we
6842 * run at gigabit with half duplex.
6844 tw32(MAC_TX_LENGTHS,
6845 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6846 (6 << TX_LENGTHS_IPG_SHIFT) |
6847 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6849 /* Receive rules. */
6850 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6851 tw32(RCVLPC_CONFIG, 0x0181);
6853 /* Calculate RDMAC_MODE setting early, we need it to determine
6854 * the RCVLPC_STATE_ENABLE mask.
6856 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6857 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6858 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6859 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6860 RDMAC_MODE_LNGREAD_ENAB);
6862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6863 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6864 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6865 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6867 /* If statement applies to 5705 and 5750 PCI devices only */
6868 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6869 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6870 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6871 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6872 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6873 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6874 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6875 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6876 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6880 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6881 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6883 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6884 rdmac_mode |= (1 << 27);
6886 /* Receive/send statistics. */
6887 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6888 val = tr32(RCVLPC_STATS_ENABLE);
6889 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6890 tw32(RCVLPC_STATS_ENABLE, val);
6891 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6892 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6893 val = tr32(RCVLPC_STATS_ENABLE);
6894 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6895 tw32(RCVLPC_STATS_ENABLE, val);
6897 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6899 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6900 tw32(SNDDATAI_STATSENAB, 0xffffff);
6901 tw32(SNDDATAI_STATSCTRL,
6902 (SNDDATAI_SCTRL_ENABLE |
6903 SNDDATAI_SCTRL_FASTUPD));
6905 /* Setup host coalescing engine. */
6906 tw32(HOSTCC_MODE, 0);
6907 for (i = 0; i < 2000; i++) {
6908 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6913 __tg3_set_coalesce(tp, &tp->coal);
6915 /* set status block DMA address */
6916 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6917 ((u64) tp->status_mapping >> 32));
6918 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6919 ((u64) tp->status_mapping & 0xffffffff));
6921 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6922 /* Status/statistics block address. See tg3_timer,
6923 * the tg3_periodic_fetch_stats call there, and
6924 * tg3_get_stats to see how this works for 5705/5750 chips.
6926 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6927 ((u64) tp->stats_mapping >> 32));
6928 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6929 ((u64) tp->stats_mapping & 0xffffffff));
6930 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6931 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6934 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6936 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6937 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6938 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6939 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6941 /* Clear statistics/status block in chip, and status block in ram. */
6942 for (i = NIC_SRAM_STATS_BLK;
6943 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6945 tg3_write_mem(tp, i, 0);
6948 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6950 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6951 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6952 /* reset to prevent losing 1st rx packet intermittently */
6953 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6957 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6958 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6959 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6960 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6961 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6962 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6963 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6966 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6967 * If TG3_FLG2_IS_NIC is zero, we should read the
6968 * register to preserve the GPIO settings for LOMs. The GPIOs,
6969 * whether used as inputs or outputs, are set by boot code after
6972 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6975 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6976 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6977 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6980 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6981 GRC_LCLCTRL_GPIO_OUTPUT3;
6983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6984 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6986 tp->grc_local_ctrl &= ~gpio_mask;
6987 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6989 /* GPIO1 must be driven high for eeprom write protect */
6990 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6991 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6992 GRC_LCLCTRL_GPIO_OUTPUT1);
6994 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6997 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7000 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7001 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7005 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7006 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7007 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7008 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7009 WDMAC_MODE_LNGREAD_ENAB);
7011 /* If statement applies to 5705 and 5750 PCI devices only */
7012 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7013 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7015 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7016 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7017 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7019 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7020 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7021 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7022 val |= WDMAC_MODE_RX_ACCEL;
7026 /* Enable host coalescing bug fix */
7027 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7028 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7029 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7030 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7033 tw32_f(WDMAC_MODE, val);
7036 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7039 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7042 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7043 pcix_cmd |= PCI_X_CMD_READ_2K;
7044 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7045 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7046 pcix_cmd |= PCI_X_CMD_READ_2K;
7048 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7052 tw32_f(RDMAC_MODE, rdmac_mode);
7055 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7056 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7057 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7061 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7063 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7065 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7066 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7067 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7068 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7069 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7070 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7071 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7072 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7074 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7075 err = tg3_load_5701_a0_firmware_fix(tp);
7080 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7081 err = tg3_load_tso_firmware(tp);
7086 tp->tx_mode = TX_MODE_ENABLE;
7087 tw32_f(MAC_TX_MODE, tp->tx_mode);
7090 tp->rx_mode = RX_MODE_ENABLE;
7091 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7093 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7095 tw32_f(MAC_RX_MODE, tp->rx_mode);
7098 if (tp->link_config.phy_is_low_power) {
7099 tp->link_config.phy_is_low_power = 0;
7100 tp->link_config.speed = tp->link_config.orig_speed;
7101 tp->link_config.duplex = tp->link_config.orig_duplex;
7102 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7105 tp->mi_mode = MAC_MI_MODE_BASE;
7106 tw32_f(MAC_MI_MODE, tp->mi_mode);
7109 tw32(MAC_LED_CTRL, tp->led_ctrl);
7111 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7112 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7113 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7116 tw32_f(MAC_RX_MODE, tp->rx_mode);
7119 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7120 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7121 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7122 /* Set drive transmission level to 1.2V */
7123 /* only if the signal pre-emphasis bit is not set */
7124 val = tr32(MAC_SERDES_CFG);
7127 tw32(MAC_SERDES_CFG, val);
7129 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7130 tw32(MAC_SERDES_CFG, 0x616000);
7133 /* Prevent chip from dropping frames when flow control
7136 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7139 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7140 /* Use hardware link auto-negotiation */
7141 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7144 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7145 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7148 tmp = tr32(SERDES_RX_CTRL);
7149 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7150 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7151 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7152 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7155 err = tg3_setup_phy(tp, 0);
7159 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7160 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7163 /* Clear CRC stats. */
7164 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7165 tg3_writephy(tp, MII_TG3_TEST1,
7166 tmp | MII_TG3_TEST1_CRC_EN);
7167 tg3_readphy(tp, 0x14, &tmp);
7171 __tg3_set_rx_mode(tp->dev);
7173 /* Initialize receive rules. */
7174 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7175 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7176 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7177 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7179 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7180 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7184 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7188 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7190 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7192 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7194 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7196 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7198 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7200 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7202 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7204 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7206 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7208 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7210 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7212 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7214 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7222 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7223 /* Write our heartbeat update interval to APE. */
7224 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7225 APE_HOST_HEARTBEAT_INT_DISABLE);
7227 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7232 /* Called at device open time to get the chip ready for
7233 * packet processing. Invoked with tp->lock held.
7235 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7239 /* Force the chip into D0. */
7240 err = tg3_set_power_state(tp, PCI_D0);
7244 tg3_switch_clocks(tp);
7246 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7248 err = tg3_reset_hw(tp, reset_phy);
7254 #define TG3_STAT_ADD32(PSTAT, REG) \
7255 do { u32 __val = tr32(REG); \
7256 (PSTAT)->low += __val; \
7257 if ((PSTAT)->low < __val) \
7258 (PSTAT)->high += 1; \
7261 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7263 struct tg3_hw_stats *sp = tp->hw_stats;
7265 if (!netif_carrier_ok(tp->dev))
7268 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7269 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7270 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7271 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7272 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7273 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7274 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7275 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7276 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7277 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7278 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7279 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7280 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7282 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7283 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7284 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7285 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7286 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7287 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7288 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7289 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7290 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7291 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7292 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7293 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7294 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7295 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7297 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7298 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7299 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7302 static void tg3_timer(unsigned long __opaque)
7304 struct tg3 *tp = (struct tg3 *) __opaque;
7309 spin_lock(&tp->lock);
7311 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7312 /* All of this garbage is because when using non-tagged
7313 * IRQ status the mailbox/status_block protocol the chip
7314 * uses with the cpu is race prone.
7316 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7317 tw32(GRC_LOCAL_CTRL,
7318 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7320 tw32(HOSTCC_MODE, tp->coalesce_mode |
7321 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7324 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7325 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7326 spin_unlock(&tp->lock);
7327 schedule_work(&tp->reset_task);
7332 /* This part only runs once per second. */
7333 if (!--tp->timer_counter) {
7334 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7335 tg3_periodic_fetch_stats(tp);
7337 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7341 mac_stat = tr32(MAC_STATUS);
7344 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7345 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7347 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7351 tg3_setup_phy(tp, 0);
7352 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7353 u32 mac_stat = tr32(MAC_STATUS);
7356 if (netif_carrier_ok(tp->dev) &&
7357 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7360 if (! netif_carrier_ok(tp->dev) &&
7361 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7362 MAC_STATUS_SIGNAL_DET))) {
7366 if (!tp->serdes_counter) {
7369 ~MAC_MODE_PORT_MODE_MASK));
7371 tw32_f(MAC_MODE, tp->mac_mode);
7374 tg3_setup_phy(tp, 0);
7376 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7377 tg3_serdes_parallel_detect(tp);
7379 tp->timer_counter = tp->timer_multiplier;
7382 /* Heartbeat is only sent once every 2 seconds.
7384 * The heartbeat is to tell the ASF firmware that the host
7385 * driver is still alive. In the event that the OS crashes,
7386 * ASF needs to reset the hardware to free up the FIFO space
7387 * that may be filled with rx packets destined for the host.
7388 * If the FIFO is full, ASF will no longer function properly.
7390 * Unintended resets have been reported on real time kernels
7391 * where the timer doesn't run on time. Netpoll will also have
7394 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7395 * to check the ring condition when the heartbeat is expiring
7396 * before doing the reset. This will prevent most unintended
7399 if (!--tp->asf_counter) {
7400 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7403 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7404 FWCMD_NICDRV_ALIVE3);
7405 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7406 /* 5 seconds timeout */
7407 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7408 val = tr32(GRC_RX_CPU_EVENT);
7410 tw32(GRC_RX_CPU_EVENT, val);
7412 tp->asf_counter = tp->asf_multiplier;
7415 spin_unlock(&tp->lock);
7418 tp->timer.expires = jiffies + tp->timer_offset;
7419 add_timer(&tp->timer);
7422 static int tg3_request_irq(struct tg3 *tp)
7425 unsigned long flags;
7426 struct net_device *dev = tp->dev;
7428 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7430 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7432 flags = IRQF_SAMPLE_RANDOM;
7435 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7436 fn = tg3_interrupt_tagged;
7437 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7439 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7442 static int tg3_test_interrupt(struct tg3 *tp)
7444 struct net_device *dev = tp->dev;
7445 int err, i, intr_ok = 0;
7447 if (!netif_running(dev))
7450 tg3_disable_ints(tp);
7452 free_irq(tp->pdev->irq, dev);
7454 err = request_irq(tp->pdev->irq, tg3_test_isr,
7455 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7459 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7460 tg3_enable_ints(tp);
7462 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7465 for (i = 0; i < 5; i++) {
7466 u32 int_mbox, misc_host_ctrl;
7468 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7470 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7472 if ((int_mbox != 0) ||
7473 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7481 tg3_disable_ints(tp);
7483 free_irq(tp->pdev->irq, dev);
7485 err = tg3_request_irq(tp);
7496 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7497 * successfully restored
7499 static int tg3_test_msi(struct tg3 *tp)
7501 struct net_device *dev = tp->dev;
7505 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7508 /* Turn off SERR reporting in case MSI terminates with Master
7511 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7512 pci_write_config_word(tp->pdev, PCI_COMMAND,
7513 pci_cmd & ~PCI_COMMAND_SERR);
7515 err = tg3_test_interrupt(tp);
7517 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7522 /* other failures */
7526 /* MSI test failed, go back to INTx mode */
7527 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7528 "switching to INTx mode. Please report this failure to "
7529 "the PCI maintainer and include system chipset information.\n",
7532 free_irq(tp->pdev->irq, dev);
7533 pci_disable_msi(tp->pdev);
7535 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7537 err = tg3_request_irq(tp);
7541 /* Need to reset the chip because the MSI cycle may have terminated
7542 * with Master Abort.
7544 tg3_full_lock(tp, 1);
7546 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7547 err = tg3_init_hw(tp, 1);
7549 tg3_full_unlock(tp);
7552 free_irq(tp->pdev->irq, dev);
7557 static int tg3_open(struct net_device *dev)
7559 struct tg3 *tp = netdev_priv(dev);
7562 netif_carrier_off(tp->dev);
7564 tg3_full_lock(tp, 0);
7566 err = tg3_set_power_state(tp, PCI_D0);
7568 tg3_full_unlock(tp);
7572 tg3_disable_ints(tp);
7573 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7575 tg3_full_unlock(tp);
7577 /* The placement of this call is tied
7578 * to the setup and use of Host TX descriptors.
7580 err = tg3_alloc_consistent(tp);
7584 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7585 /* All MSI supporting chips should support tagged
7586 * status. Assert that this is the case.
7588 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7589 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7590 "Not using MSI.\n", tp->dev->name);
7591 } else if (pci_enable_msi(tp->pdev) == 0) {
7594 msi_mode = tr32(MSGINT_MODE);
7595 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7596 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7599 err = tg3_request_irq(tp);
7602 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7603 pci_disable_msi(tp->pdev);
7604 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7606 tg3_free_consistent(tp);
7610 napi_enable(&tp->napi);
7612 tg3_full_lock(tp, 0);
7614 err = tg3_init_hw(tp, 1);
7616 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7619 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7620 tp->timer_offset = HZ;
7622 tp->timer_offset = HZ / 10;
7624 BUG_ON(tp->timer_offset > HZ);
7625 tp->timer_counter = tp->timer_multiplier =
7626 (HZ / tp->timer_offset);
7627 tp->asf_counter = tp->asf_multiplier =
7628 ((HZ / tp->timer_offset) * 2);
7630 init_timer(&tp->timer);
7631 tp->timer.expires = jiffies + tp->timer_offset;
7632 tp->timer.data = (unsigned long) tp;
7633 tp->timer.function = tg3_timer;
7636 tg3_full_unlock(tp);
7639 napi_disable(&tp->napi);
7640 free_irq(tp->pdev->irq, dev);
7641 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7642 pci_disable_msi(tp->pdev);
7643 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7645 tg3_free_consistent(tp);
7649 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7650 err = tg3_test_msi(tp);
7653 tg3_full_lock(tp, 0);
7655 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7656 pci_disable_msi(tp->pdev);
7657 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7659 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7661 tg3_free_consistent(tp);
7663 tg3_full_unlock(tp);
7665 napi_disable(&tp->napi);
7670 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7671 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7672 u32 val = tr32(PCIE_TRANSACTION_CFG);
7674 tw32(PCIE_TRANSACTION_CFG,
7675 val | PCIE_TRANS_CFG_1SHOT_MSI);
7680 tg3_full_lock(tp, 0);
7682 add_timer(&tp->timer);
7683 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7684 tg3_enable_ints(tp);
7686 tg3_full_unlock(tp);
7688 netif_start_queue(dev);
7694 /*static*/ void tg3_dump_state(struct tg3 *tp)
7696 u32 val32, val32_2, val32_3, val32_4, val32_5;
7700 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7701 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7702 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7706 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7707 tr32(MAC_MODE), tr32(MAC_STATUS));
7708 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7709 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7710 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7711 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7712 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7713 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7715 /* Send data initiator control block */
7716 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7717 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7718 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7719 tr32(SNDDATAI_STATSCTRL));
7721 /* Send data completion control block */
7722 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7724 /* Send BD ring selector block */
7725 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7726 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7728 /* Send BD initiator control block */
7729 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7730 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7732 /* Send BD completion control block */
7733 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7735 /* Receive list placement control block */
7736 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7737 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7738 printk(" RCVLPC_STATSCTRL[%08x]\n",
7739 tr32(RCVLPC_STATSCTRL));
7741 /* Receive data and receive BD initiator control block */
7742 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7743 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7745 /* Receive data completion control block */
7746 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7749 /* Receive BD initiator control block */
7750 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7751 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7753 /* Receive BD completion control block */
7754 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7755 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7757 /* Receive list selector control block */
7758 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7759 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7761 /* Mbuf cluster free block */
7762 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7763 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7765 /* Host coalescing control block */
7766 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7767 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7768 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7769 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7770 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7771 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7772 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7773 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7774 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7775 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7776 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7777 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7779 /* Memory arbiter control block */
7780 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7781 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7783 /* Buffer manager control block */
7784 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7785 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7786 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7787 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7788 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7789 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7790 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7791 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7793 /* Read DMA control block */
7794 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7795 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7797 /* Write DMA control block */
7798 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7799 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7801 /* DMA completion block */
7802 printk("DEBUG: DMAC_MODE[%08x]\n",
7806 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7807 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7808 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7809 tr32(GRC_LOCAL_CTRL));
7812 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7813 tr32(RCVDBDI_JUMBO_BD + 0x0),
7814 tr32(RCVDBDI_JUMBO_BD + 0x4),
7815 tr32(RCVDBDI_JUMBO_BD + 0x8),
7816 tr32(RCVDBDI_JUMBO_BD + 0xc));
7817 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7818 tr32(RCVDBDI_STD_BD + 0x0),
7819 tr32(RCVDBDI_STD_BD + 0x4),
7820 tr32(RCVDBDI_STD_BD + 0x8),
7821 tr32(RCVDBDI_STD_BD + 0xc));
7822 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7823 tr32(RCVDBDI_MINI_BD + 0x0),
7824 tr32(RCVDBDI_MINI_BD + 0x4),
7825 tr32(RCVDBDI_MINI_BD + 0x8),
7826 tr32(RCVDBDI_MINI_BD + 0xc));
7828 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7829 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7830 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7831 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7832 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7833 val32, val32_2, val32_3, val32_4);
7835 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7836 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7837 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7838 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7839 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7840 val32, val32_2, val32_3, val32_4);
7842 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7843 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7844 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7845 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7846 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7847 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7848 val32, val32_2, val32_3, val32_4, val32_5);
7850 /* SW status block */
7851 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7852 tp->hw_status->status,
7853 tp->hw_status->status_tag,
7854 tp->hw_status->rx_jumbo_consumer,
7855 tp->hw_status->rx_consumer,
7856 tp->hw_status->rx_mini_consumer,
7857 tp->hw_status->idx[0].rx_producer,
7858 tp->hw_status->idx[0].tx_consumer);
7860 /* SW statistics block */
7861 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7862 ((u32 *)tp->hw_stats)[0],
7863 ((u32 *)tp->hw_stats)[1],
7864 ((u32 *)tp->hw_stats)[2],
7865 ((u32 *)tp->hw_stats)[3]);
7868 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7869 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7870 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7871 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7872 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7874 /* NIC side send descriptors. */
7875 for (i = 0; i < 6; i++) {
7878 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7879 + (i * sizeof(struct tg3_tx_buffer_desc));
7880 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7882 readl(txd + 0x0), readl(txd + 0x4),
7883 readl(txd + 0x8), readl(txd + 0xc));
7886 /* NIC side RX descriptors. */
7887 for (i = 0; i < 6; i++) {
7890 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7891 + (i * sizeof(struct tg3_rx_buffer_desc));
7892 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7894 readl(rxd + 0x0), readl(rxd + 0x4),
7895 readl(rxd + 0x8), readl(rxd + 0xc));
7896 rxd += (4 * sizeof(u32));
7897 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7899 readl(rxd + 0x0), readl(rxd + 0x4),
7900 readl(rxd + 0x8), readl(rxd + 0xc));
7903 for (i = 0; i < 6; i++) {
7906 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7907 + (i * sizeof(struct tg3_rx_buffer_desc));
7908 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7910 readl(rxd + 0x0), readl(rxd + 0x4),
7911 readl(rxd + 0x8), readl(rxd + 0xc));
7912 rxd += (4 * sizeof(u32));
7913 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7915 readl(rxd + 0x0), readl(rxd + 0x4),
7916 readl(rxd + 0x8), readl(rxd + 0xc));
7921 static struct net_device_stats *tg3_get_stats(struct net_device *);
7922 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7924 static int tg3_close(struct net_device *dev)
7926 struct tg3 *tp = netdev_priv(dev);
7928 napi_disable(&tp->napi);
7929 cancel_work_sync(&tp->reset_task);
7931 netif_stop_queue(dev);
7933 del_timer_sync(&tp->timer);
7935 tg3_full_lock(tp, 1);
7940 tg3_disable_ints(tp);
7942 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7944 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7946 tg3_full_unlock(tp);
7948 free_irq(tp->pdev->irq, dev);
7949 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7950 pci_disable_msi(tp->pdev);
7951 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7954 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7955 sizeof(tp->net_stats_prev));
7956 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7957 sizeof(tp->estats_prev));
7959 tg3_free_consistent(tp);
7961 tg3_set_power_state(tp, PCI_D3hot);
7963 netif_carrier_off(tp->dev);
7968 static inline unsigned long get_stat64(tg3_stat64_t *val)
7972 #if (BITS_PER_LONG == 32)
7975 ret = ((u64)val->high << 32) | ((u64)val->low);
7980 static unsigned long calc_crc_errors(struct tg3 *tp)
7982 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7984 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7985 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7989 spin_lock_bh(&tp->lock);
7990 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7991 tg3_writephy(tp, MII_TG3_TEST1,
7992 val | MII_TG3_TEST1_CRC_EN);
7993 tg3_readphy(tp, 0x14, &val);
7996 spin_unlock_bh(&tp->lock);
7998 tp->phy_crc_errors += val;
8000 return tp->phy_crc_errors;
8003 return get_stat64(&hw_stats->rx_fcs_errors);
8006 #define ESTAT_ADD(member) \
8007 estats->member = old_estats->member + \
8008 get_stat64(&hw_stats->member)
8010 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8012 struct tg3_ethtool_stats *estats = &tp->estats;
8013 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8014 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8019 ESTAT_ADD(rx_octets);
8020 ESTAT_ADD(rx_fragments);
8021 ESTAT_ADD(rx_ucast_packets);
8022 ESTAT_ADD(rx_mcast_packets);
8023 ESTAT_ADD(rx_bcast_packets);
8024 ESTAT_ADD(rx_fcs_errors);
8025 ESTAT_ADD(rx_align_errors);
8026 ESTAT_ADD(rx_xon_pause_rcvd);
8027 ESTAT_ADD(rx_xoff_pause_rcvd);
8028 ESTAT_ADD(rx_mac_ctrl_rcvd);
8029 ESTAT_ADD(rx_xoff_entered);
8030 ESTAT_ADD(rx_frame_too_long_errors);
8031 ESTAT_ADD(rx_jabbers);
8032 ESTAT_ADD(rx_undersize_packets);
8033 ESTAT_ADD(rx_in_length_errors);
8034 ESTAT_ADD(rx_out_length_errors);
8035 ESTAT_ADD(rx_64_or_less_octet_packets);
8036 ESTAT_ADD(rx_65_to_127_octet_packets);
8037 ESTAT_ADD(rx_128_to_255_octet_packets);
8038 ESTAT_ADD(rx_256_to_511_octet_packets);
8039 ESTAT_ADD(rx_512_to_1023_octet_packets);
8040 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8041 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8042 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8043 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8044 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8046 ESTAT_ADD(tx_octets);
8047 ESTAT_ADD(tx_collisions);
8048 ESTAT_ADD(tx_xon_sent);
8049 ESTAT_ADD(tx_xoff_sent);
8050 ESTAT_ADD(tx_flow_control);
8051 ESTAT_ADD(tx_mac_errors);
8052 ESTAT_ADD(tx_single_collisions);
8053 ESTAT_ADD(tx_mult_collisions);
8054 ESTAT_ADD(tx_deferred);
8055 ESTAT_ADD(tx_excessive_collisions);
8056 ESTAT_ADD(tx_late_collisions);
8057 ESTAT_ADD(tx_collide_2times);
8058 ESTAT_ADD(tx_collide_3times);
8059 ESTAT_ADD(tx_collide_4times);
8060 ESTAT_ADD(tx_collide_5times);
8061 ESTAT_ADD(tx_collide_6times);
8062 ESTAT_ADD(tx_collide_7times);
8063 ESTAT_ADD(tx_collide_8times);
8064 ESTAT_ADD(tx_collide_9times);
8065 ESTAT_ADD(tx_collide_10times);
8066 ESTAT_ADD(tx_collide_11times);
8067 ESTAT_ADD(tx_collide_12times);
8068 ESTAT_ADD(tx_collide_13times);
8069 ESTAT_ADD(tx_collide_14times);
8070 ESTAT_ADD(tx_collide_15times);
8071 ESTAT_ADD(tx_ucast_packets);
8072 ESTAT_ADD(tx_mcast_packets);
8073 ESTAT_ADD(tx_bcast_packets);
8074 ESTAT_ADD(tx_carrier_sense_errors);
8075 ESTAT_ADD(tx_discards);
8076 ESTAT_ADD(tx_errors);
8078 ESTAT_ADD(dma_writeq_full);
8079 ESTAT_ADD(dma_write_prioq_full);
8080 ESTAT_ADD(rxbds_empty);
8081 ESTAT_ADD(rx_discards);
8082 ESTAT_ADD(rx_errors);
8083 ESTAT_ADD(rx_threshold_hit);
8085 ESTAT_ADD(dma_readq_full);
8086 ESTAT_ADD(dma_read_prioq_full);
8087 ESTAT_ADD(tx_comp_queue_full);
8089 ESTAT_ADD(ring_set_send_prod_index);
8090 ESTAT_ADD(ring_status_update);
8091 ESTAT_ADD(nic_irqs);
8092 ESTAT_ADD(nic_avoided_irqs);
8093 ESTAT_ADD(nic_tx_threshold_hit);
8098 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8100 struct tg3 *tp = netdev_priv(dev);
8101 struct net_device_stats *stats = &tp->net_stats;
8102 struct net_device_stats *old_stats = &tp->net_stats_prev;
8103 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8108 stats->rx_packets = old_stats->rx_packets +
8109 get_stat64(&hw_stats->rx_ucast_packets) +
8110 get_stat64(&hw_stats->rx_mcast_packets) +
8111 get_stat64(&hw_stats->rx_bcast_packets);
8113 stats->tx_packets = old_stats->tx_packets +
8114 get_stat64(&hw_stats->tx_ucast_packets) +
8115 get_stat64(&hw_stats->tx_mcast_packets) +
8116 get_stat64(&hw_stats->tx_bcast_packets);
8118 stats->rx_bytes = old_stats->rx_bytes +
8119 get_stat64(&hw_stats->rx_octets);
8120 stats->tx_bytes = old_stats->tx_bytes +
8121 get_stat64(&hw_stats->tx_octets);
8123 stats->rx_errors = old_stats->rx_errors +
8124 get_stat64(&hw_stats->rx_errors);
8125 stats->tx_errors = old_stats->tx_errors +
8126 get_stat64(&hw_stats->tx_errors) +
8127 get_stat64(&hw_stats->tx_mac_errors) +
8128 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8129 get_stat64(&hw_stats->tx_discards);
8131 stats->multicast = old_stats->multicast +
8132 get_stat64(&hw_stats->rx_mcast_packets);
8133 stats->collisions = old_stats->collisions +
8134 get_stat64(&hw_stats->tx_collisions);
8136 stats->rx_length_errors = old_stats->rx_length_errors +
8137 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8138 get_stat64(&hw_stats->rx_undersize_packets);
8140 stats->rx_over_errors = old_stats->rx_over_errors +
8141 get_stat64(&hw_stats->rxbds_empty);
8142 stats->rx_frame_errors = old_stats->rx_frame_errors +
8143 get_stat64(&hw_stats->rx_align_errors);
8144 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8145 get_stat64(&hw_stats->tx_discards);
8146 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8147 get_stat64(&hw_stats->tx_carrier_sense_errors);
8149 stats->rx_crc_errors = old_stats->rx_crc_errors +
8150 calc_crc_errors(tp);
8152 stats->rx_missed_errors = old_stats->rx_missed_errors +
8153 get_stat64(&hw_stats->rx_discards);
8158 static inline u32 calc_crc(unsigned char *buf, int len)
8166 for (j = 0; j < len; j++) {
8169 for (k = 0; k < 8; k++) {
8183 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8185 /* accept or reject all multicast frames */
8186 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8187 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8188 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8189 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8192 static void __tg3_set_rx_mode(struct net_device *dev)
8194 struct tg3 *tp = netdev_priv(dev);
8197 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8198 RX_MODE_KEEP_VLAN_TAG);
8200 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8203 #if TG3_VLAN_TAG_USED
8205 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8206 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8208 /* By definition, VLAN is disabled always in this
8211 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8212 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8215 if (dev->flags & IFF_PROMISC) {
8216 /* Promiscuous mode. */
8217 rx_mode |= RX_MODE_PROMISC;
8218 } else if (dev->flags & IFF_ALLMULTI) {
8219 /* Accept all multicast. */
8220 tg3_set_multi (tp, 1);
8221 } else if (dev->mc_count < 1) {
8222 /* Reject all multicast. */
8223 tg3_set_multi (tp, 0);
8225 /* Accept one or more multicast(s). */
8226 struct dev_mc_list *mclist;
8228 u32 mc_filter[4] = { 0, };
8233 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8234 i++, mclist = mclist->next) {
8236 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8238 regidx = (bit & 0x60) >> 5;
8240 mc_filter[regidx] |= (1 << bit);
8243 tw32(MAC_HASH_REG_0, mc_filter[0]);
8244 tw32(MAC_HASH_REG_1, mc_filter[1]);
8245 tw32(MAC_HASH_REG_2, mc_filter[2]);
8246 tw32(MAC_HASH_REG_3, mc_filter[3]);
8249 if (rx_mode != tp->rx_mode) {
8250 tp->rx_mode = rx_mode;
8251 tw32_f(MAC_RX_MODE, rx_mode);
8256 static void tg3_set_rx_mode(struct net_device *dev)
8258 struct tg3 *tp = netdev_priv(dev);
8260 if (!netif_running(dev))
8263 tg3_full_lock(tp, 0);
8264 __tg3_set_rx_mode(dev);
8265 tg3_full_unlock(tp);
8268 #define TG3_REGDUMP_LEN (32 * 1024)
8270 static int tg3_get_regs_len(struct net_device *dev)
8272 return TG3_REGDUMP_LEN;
8275 static void tg3_get_regs(struct net_device *dev,
8276 struct ethtool_regs *regs, void *_p)
8279 struct tg3 *tp = netdev_priv(dev);
8285 memset(p, 0, TG3_REGDUMP_LEN);
8287 if (tp->link_config.phy_is_low_power)
8290 tg3_full_lock(tp, 0);
8292 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8293 #define GET_REG32_LOOP(base,len) \
8294 do { p = (u32 *)(orig_p + (base)); \
8295 for (i = 0; i < len; i += 4) \
8296 __GET_REG32((base) + i); \
8298 #define GET_REG32_1(reg) \
8299 do { p = (u32 *)(orig_p + (reg)); \
8300 __GET_REG32((reg)); \
8303 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8304 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8305 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8306 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8307 GET_REG32_1(SNDDATAC_MODE);
8308 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8309 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8310 GET_REG32_1(SNDBDC_MODE);
8311 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8312 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8313 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8314 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8315 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8316 GET_REG32_1(RCVDCC_MODE);
8317 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8318 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8319 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8320 GET_REG32_1(MBFREE_MODE);
8321 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8322 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8323 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8324 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8325 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8326 GET_REG32_1(RX_CPU_MODE);
8327 GET_REG32_1(RX_CPU_STATE);
8328 GET_REG32_1(RX_CPU_PGMCTR);
8329 GET_REG32_1(RX_CPU_HWBKPT);
8330 GET_REG32_1(TX_CPU_MODE);
8331 GET_REG32_1(TX_CPU_STATE);
8332 GET_REG32_1(TX_CPU_PGMCTR);
8333 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8334 GET_REG32_LOOP(FTQ_RESET, 0x120);
8335 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8336 GET_REG32_1(DMAC_MODE);
8337 GET_REG32_LOOP(GRC_MODE, 0x4c);
8338 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8339 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8342 #undef GET_REG32_LOOP
8345 tg3_full_unlock(tp);
8348 static int tg3_get_eeprom_len(struct net_device *dev)
8350 struct tg3 *tp = netdev_priv(dev);
8352 return tp->nvram_size;
8355 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8356 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8357 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8359 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8361 struct tg3 *tp = netdev_priv(dev);
8364 u32 i, offset, len, b_offset, b_count;
8367 if (tp->link_config.phy_is_low_power)
8370 offset = eeprom->offset;
8374 eeprom->magic = TG3_EEPROM_MAGIC;
8377 /* adjustments to start on required 4 byte boundary */
8378 b_offset = offset & 3;
8379 b_count = 4 - b_offset;
8380 if (b_count > len) {
8381 /* i.e. offset=1 len=2 */
8384 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8387 memcpy(data, ((char*)&val) + b_offset, b_count);
8390 eeprom->len += b_count;
8393 /* read bytes upto the last 4 byte boundary */
8394 pd = &data[eeprom->len];
8395 for (i = 0; i < (len - (len & 3)); i += 4) {
8396 ret = tg3_nvram_read_le(tp, offset + i, &val);
8401 memcpy(pd + i, &val, 4);
8406 /* read last bytes not ending on 4 byte boundary */
8407 pd = &data[eeprom->len];
8409 b_offset = offset + len - b_count;
8410 ret = tg3_nvram_read_le(tp, b_offset, &val);
8413 memcpy(pd, &val, b_count);
8414 eeprom->len += b_count;
8419 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8421 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8423 struct tg3 *tp = netdev_priv(dev);
8425 u32 offset, len, b_offset, odd_len;
8429 if (tp->link_config.phy_is_low_power)
8432 if (eeprom->magic != TG3_EEPROM_MAGIC)
8435 offset = eeprom->offset;
8438 if ((b_offset = (offset & 3))) {
8439 /* adjustments to start on required 4 byte boundary */
8440 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8451 /* adjustments to end on required 4 byte boundary */
8453 len = (len + 3) & ~3;
8454 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8460 if (b_offset || odd_len) {
8461 buf = kmalloc(len, GFP_KERNEL);
8465 memcpy(buf, &start, 4);
8467 memcpy(buf+len-4, &end, 4);
8468 memcpy(buf + b_offset, data, eeprom->len);
8471 ret = tg3_nvram_write_block(tp, offset, len, buf);
8479 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8481 struct tg3 *tp = netdev_priv(dev);
8483 cmd->supported = (SUPPORTED_Autoneg);
8485 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8486 cmd->supported |= (SUPPORTED_1000baseT_Half |
8487 SUPPORTED_1000baseT_Full);
8489 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8490 cmd->supported |= (SUPPORTED_100baseT_Half |
8491 SUPPORTED_100baseT_Full |
8492 SUPPORTED_10baseT_Half |
8493 SUPPORTED_10baseT_Full |
8495 cmd->port = PORT_TP;
8497 cmd->supported |= SUPPORTED_FIBRE;
8498 cmd->port = PORT_FIBRE;
8501 cmd->advertising = tp->link_config.advertising;
8502 if (netif_running(dev)) {
8503 cmd->speed = tp->link_config.active_speed;
8504 cmd->duplex = tp->link_config.active_duplex;
8506 cmd->phy_address = PHY_ADDR;
8507 cmd->transceiver = 0;
8508 cmd->autoneg = tp->link_config.autoneg;
8514 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8516 struct tg3 *tp = netdev_priv(dev);
8518 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8519 /* These are the only valid advertisement bits allowed. */
8520 if (cmd->autoneg == AUTONEG_ENABLE &&
8521 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8522 ADVERTISED_1000baseT_Full |
8523 ADVERTISED_Autoneg |
8526 /* Fiber can only do SPEED_1000. */
8527 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8528 (cmd->speed != SPEED_1000))
8530 /* Copper cannot force SPEED_1000. */
8531 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8532 (cmd->speed == SPEED_1000))
8534 else if ((cmd->speed == SPEED_1000) &&
8535 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8538 tg3_full_lock(tp, 0);
8540 tp->link_config.autoneg = cmd->autoneg;
8541 if (cmd->autoneg == AUTONEG_ENABLE) {
8542 tp->link_config.advertising = (cmd->advertising |
8543 ADVERTISED_Autoneg);
8544 tp->link_config.speed = SPEED_INVALID;
8545 tp->link_config.duplex = DUPLEX_INVALID;
8547 tp->link_config.advertising = 0;
8548 tp->link_config.speed = cmd->speed;
8549 tp->link_config.duplex = cmd->duplex;
8552 tp->link_config.orig_speed = tp->link_config.speed;
8553 tp->link_config.orig_duplex = tp->link_config.duplex;
8554 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8556 if (netif_running(dev))
8557 tg3_setup_phy(tp, 1);
8559 tg3_full_unlock(tp);
8564 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8566 struct tg3 *tp = netdev_priv(dev);
8568 strcpy(info->driver, DRV_MODULE_NAME);
8569 strcpy(info->version, DRV_MODULE_VERSION);
8570 strcpy(info->fw_version, tp->fw_ver);
8571 strcpy(info->bus_info, pci_name(tp->pdev));
8574 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8576 struct tg3 *tp = netdev_priv(dev);
8578 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8579 wol->supported = WAKE_MAGIC;
8583 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8584 wol->wolopts = WAKE_MAGIC;
8585 memset(&wol->sopass, 0, sizeof(wol->sopass));
8588 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8590 struct tg3 *tp = netdev_priv(dev);
8592 if (wol->wolopts & ~WAKE_MAGIC)
8594 if ((wol->wolopts & WAKE_MAGIC) &&
8595 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8598 spin_lock_bh(&tp->lock);
8599 if (wol->wolopts & WAKE_MAGIC)
8600 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8602 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8603 spin_unlock_bh(&tp->lock);
8608 static u32 tg3_get_msglevel(struct net_device *dev)
8610 struct tg3 *tp = netdev_priv(dev);
8611 return tp->msg_enable;
8614 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8616 struct tg3 *tp = netdev_priv(dev);
8617 tp->msg_enable = value;
8620 static int tg3_set_tso(struct net_device *dev, u32 value)
8622 struct tg3 *tp = netdev_priv(dev);
8624 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8629 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8630 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8632 dev->features |= NETIF_F_TSO6;
8633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8634 dev->features |= NETIF_F_TSO_ECN;
8636 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8638 return ethtool_op_set_tso(dev, value);
8641 static int tg3_nway_reset(struct net_device *dev)
8643 struct tg3 *tp = netdev_priv(dev);
8647 if (!netif_running(dev))
8650 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8653 spin_lock_bh(&tp->lock);
8655 tg3_readphy(tp, MII_BMCR, &bmcr);
8656 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8657 ((bmcr & BMCR_ANENABLE) ||
8658 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8659 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8663 spin_unlock_bh(&tp->lock);
8668 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8670 struct tg3 *tp = netdev_priv(dev);
8672 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8673 ering->rx_mini_max_pending = 0;
8674 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8675 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8677 ering->rx_jumbo_max_pending = 0;
8679 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8681 ering->rx_pending = tp->rx_pending;
8682 ering->rx_mini_pending = 0;
8683 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8684 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8686 ering->rx_jumbo_pending = 0;
8688 ering->tx_pending = tp->tx_pending;
8691 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8693 struct tg3 *tp = netdev_priv(dev);
8694 int irq_sync = 0, err = 0;
8696 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8697 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8698 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8699 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8700 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8701 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8704 if (netif_running(dev)) {
8709 tg3_full_lock(tp, irq_sync);
8711 tp->rx_pending = ering->rx_pending;
8713 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8714 tp->rx_pending > 63)
8715 tp->rx_pending = 63;
8716 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8717 tp->tx_pending = ering->tx_pending;
8719 if (netif_running(dev)) {
8720 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8721 err = tg3_restart_hw(tp, 1);
8723 tg3_netif_start(tp);
8726 tg3_full_unlock(tp);
8731 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8733 struct tg3 *tp = netdev_priv(dev);
8735 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8737 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8738 epause->rx_pause = 1;
8740 epause->rx_pause = 0;
8742 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8743 epause->tx_pause = 1;
8745 epause->tx_pause = 0;
8748 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8750 struct tg3 *tp = netdev_priv(dev);
8751 int irq_sync = 0, err = 0;
8753 if (netif_running(dev)) {
8758 tg3_full_lock(tp, irq_sync);
8760 if (epause->autoneg)
8761 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8763 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8764 if (epause->rx_pause)
8765 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8767 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8768 if (epause->tx_pause)
8769 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8771 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8773 if (netif_running(dev)) {
8774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8775 err = tg3_restart_hw(tp, 1);
8777 tg3_netif_start(tp);
8780 tg3_full_unlock(tp);
8785 static u32 tg3_get_rx_csum(struct net_device *dev)
8787 struct tg3 *tp = netdev_priv(dev);
8788 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8791 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8793 struct tg3 *tp = netdev_priv(dev);
8795 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8801 spin_lock_bh(&tp->lock);
8803 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8805 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8806 spin_unlock_bh(&tp->lock);
8811 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8813 struct tg3 *tp = netdev_priv(dev);
8815 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8825 ethtool_op_set_tx_ipv6_csum(dev, data);
8827 ethtool_op_set_tx_csum(dev, data);
8832 static int tg3_get_sset_count (struct net_device *dev, int sset)
8836 return TG3_NUM_TEST;
8838 return TG3_NUM_STATS;
8844 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8846 switch (stringset) {
8848 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8851 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8854 WARN_ON(1); /* we need a WARN() */
8859 static int tg3_phys_id(struct net_device *dev, u32 data)
8861 struct tg3 *tp = netdev_priv(dev);
8864 if (!netif_running(tp->dev))
8868 data = UINT_MAX / 2;
8870 for (i = 0; i < (data * 2); i++) {
8872 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8873 LED_CTRL_1000MBPS_ON |
8874 LED_CTRL_100MBPS_ON |
8875 LED_CTRL_10MBPS_ON |
8876 LED_CTRL_TRAFFIC_OVERRIDE |
8877 LED_CTRL_TRAFFIC_BLINK |
8878 LED_CTRL_TRAFFIC_LED);
8881 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8882 LED_CTRL_TRAFFIC_OVERRIDE);
8884 if (msleep_interruptible(500))
8887 tw32(MAC_LED_CTRL, tp->led_ctrl);
8891 static void tg3_get_ethtool_stats (struct net_device *dev,
8892 struct ethtool_stats *estats, u64 *tmp_stats)
8894 struct tg3 *tp = netdev_priv(dev);
8895 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8898 #define NVRAM_TEST_SIZE 0x100
8899 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8900 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8901 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8902 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8903 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8905 static int tg3_test_nvram(struct tg3 *tp)
8909 int i, j, k, err = 0, size;
8911 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8914 if (magic == TG3_EEPROM_MAGIC)
8915 size = NVRAM_TEST_SIZE;
8916 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8917 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8918 TG3_EEPROM_SB_FORMAT_1) {
8919 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8920 case TG3_EEPROM_SB_REVISION_0:
8921 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8923 case TG3_EEPROM_SB_REVISION_2:
8924 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8926 case TG3_EEPROM_SB_REVISION_3:
8927 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8934 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8935 size = NVRAM_SELFBOOT_HW_SIZE;
8939 buf = kmalloc(size, GFP_KERNEL);
8944 for (i = 0, j = 0; i < size; i += 4, j++) {
8945 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8951 /* Selfboot format */
8952 magic = swab32(le32_to_cpu(buf[0]));
8953 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8954 TG3_EEPROM_MAGIC_FW) {
8955 u8 *buf8 = (u8 *) buf, csum8 = 0;
8957 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8958 TG3_EEPROM_SB_REVISION_2) {
8959 /* For rev 2, the csum doesn't include the MBA. */
8960 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8962 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8965 for (i = 0; i < size; i++)
8978 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8979 TG3_EEPROM_MAGIC_HW) {
8980 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8981 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8982 u8 *buf8 = (u8 *) buf;
8984 /* Separate the parity bits and the data bytes. */
8985 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8986 if ((i == 0) || (i == 8)) {
8990 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8991 parity[k++] = buf8[i] & msk;
8998 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8999 parity[k++] = buf8[i] & msk;
9002 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9003 parity[k++] = buf8[i] & msk;
9006 data[j++] = buf8[i];
9010 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9011 u8 hw8 = hweight8(data[i]);
9013 if ((hw8 & 0x1) && parity[i])
9015 else if (!(hw8 & 0x1) && !parity[i])
9022 /* Bootstrap checksum at offset 0x10 */
9023 csum = calc_crc((unsigned char *) buf, 0x10);
9024 if(csum != le32_to_cpu(buf[0x10/4]))
9027 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9028 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9029 if (csum != le32_to_cpu(buf[0xfc/4]))
9039 #define TG3_SERDES_TIMEOUT_SEC 2
9040 #define TG3_COPPER_TIMEOUT_SEC 6
9042 static int tg3_test_link(struct tg3 *tp)
9046 if (!netif_running(tp->dev))
9049 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9050 max = TG3_SERDES_TIMEOUT_SEC;
9052 max = TG3_COPPER_TIMEOUT_SEC;
9054 for (i = 0; i < max; i++) {
9055 if (netif_carrier_ok(tp->dev))
9058 if (msleep_interruptible(1000))
9065 /* Only test the commonly used registers */
9066 static int tg3_test_registers(struct tg3 *tp)
9068 int i, is_5705, is_5750;
9069 u32 offset, read_mask, write_mask, val, save_val, read_val;
9073 #define TG3_FL_5705 0x1
9074 #define TG3_FL_NOT_5705 0x2
9075 #define TG3_FL_NOT_5788 0x4
9076 #define TG3_FL_NOT_5750 0x8
9080 /* MAC Control Registers */
9081 { MAC_MODE, TG3_FL_NOT_5705,
9082 0x00000000, 0x00ef6f8c },
9083 { MAC_MODE, TG3_FL_5705,
9084 0x00000000, 0x01ef6b8c },
9085 { MAC_STATUS, TG3_FL_NOT_5705,
9086 0x03800107, 0x00000000 },
9087 { MAC_STATUS, TG3_FL_5705,
9088 0x03800100, 0x00000000 },
9089 { MAC_ADDR_0_HIGH, 0x0000,
9090 0x00000000, 0x0000ffff },
9091 { MAC_ADDR_0_LOW, 0x0000,
9092 0x00000000, 0xffffffff },
9093 { MAC_RX_MTU_SIZE, 0x0000,
9094 0x00000000, 0x0000ffff },
9095 { MAC_TX_MODE, 0x0000,
9096 0x00000000, 0x00000070 },
9097 { MAC_TX_LENGTHS, 0x0000,
9098 0x00000000, 0x00003fff },
9099 { MAC_RX_MODE, TG3_FL_NOT_5705,
9100 0x00000000, 0x000007fc },
9101 { MAC_RX_MODE, TG3_FL_5705,
9102 0x00000000, 0x000007dc },
9103 { MAC_HASH_REG_0, 0x0000,
9104 0x00000000, 0xffffffff },
9105 { MAC_HASH_REG_1, 0x0000,
9106 0x00000000, 0xffffffff },
9107 { MAC_HASH_REG_2, 0x0000,
9108 0x00000000, 0xffffffff },
9109 { MAC_HASH_REG_3, 0x0000,
9110 0x00000000, 0xffffffff },
9112 /* Receive Data and Receive BD Initiator Control Registers. */
9113 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9114 0x00000000, 0xffffffff },
9115 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9116 0x00000000, 0xffffffff },
9117 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9118 0x00000000, 0x00000003 },
9119 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9120 0x00000000, 0xffffffff },
9121 { RCVDBDI_STD_BD+0, 0x0000,
9122 0x00000000, 0xffffffff },
9123 { RCVDBDI_STD_BD+4, 0x0000,
9124 0x00000000, 0xffffffff },
9125 { RCVDBDI_STD_BD+8, 0x0000,
9126 0x00000000, 0xffff0002 },
9127 { RCVDBDI_STD_BD+0xc, 0x0000,
9128 0x00000000, 0xffffffff },
9130 /* Receive BD Initiator Control Registers. */
9131 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9132 0x00000000, 0xffffffff },
9133 { RCVBDI_STD_THRESH, TG3_FL_5705,
9134 0x00000000, 0x000003ff },
9135 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9136 0x00000000, 0xffffffff },
9138 /* Host Coalescing Control Registers. */
9139 { HOSTCC_MODE, TG3_FL_NOT_5705,
9140 0x00000000, 0x00000004 },
9141 { HOSTCC_MODE, TG3_FL_5705,
9142 0x00000000, 0x000000f6 },
9143 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9144 0x00000000, 0xffffffff },
9145 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9146 0x00000000, 0x000003ff },
9147 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9148 0x00000000, 0xffffffff },
9149 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9150 0x00000000, 0x000003ff },
9151 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9152 0x00000000, 0xffffffff },
9153 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9154 0x00000000, 0x000000ff },
9155 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9156 0x00000000, 0xffffffff },
9157 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9158 0x00000000, 0x000000ff },
9159 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9160 0x00000000, 0xffffffff },
9161 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9162 0x00000000, 0xffffffff },
9163 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9164 0x00000000, 0xffffffff },
9165 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9166 0x00000000, 0x000000ff },
9167 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9168 0x00000000, 0xffffffff },
9169 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9170 0x00000000, 0x000000ff },
9171 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9172 0x00000000, 0xffffffff },
9173 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9174 0x00000000, 0xffffffff },
9175 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9176 0x00000000, 0xffffffff },
9177 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9178 0x00000000, 0xffffffff },
9179 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9180 0x00000000, 0xffffffff },
9181 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9182 0xffffffff, 0x00000000 },
9183 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9184 0xffffffff, 0x00000000 },
9186 /* Buffer Manager Control Registers. */
9187 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9188 0x00000000, 0x007fff80 },
9189 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9190 0x00000000, 0x007fffff },
9191 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9192 0x00000000, 0x0000003f },
9193 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9194 0x00000000, 0x000001ff },
9195 { BUFMGR_MB_HIGH_WATER, 0x0000,
9196 0x00000000, 0x000001ff },
9197 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9198 0xffffffff, 0x00000000 },
9199 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9200 0xffffffff, 0x00000000 },
9202 /* Mailbox Registers */
9203 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9204 0x00000000, 0x000001ff },
9205 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9206 0x00000000, 0x000001ff },
9207 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9208 0x00000000, 0x000007ff },
9209 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9210 0x00000000, 0x000001ff },
9212 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9215 is_5705 = is_5750 = 0;
9216 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9218 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9222 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9223 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9226 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9229 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9230 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9233 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9236 offset = (u32) reg_tbl[i].offset;
9237 read_mask = reg_tbl[i].read_mask;
9238 write_mask = reg_tbl[i].write_mask;
9240 /* Save the original register content */
9241 save_val = tr32(offset);
9243 /* Determine the read-only value. */
9244 read_val = save_val & read_mask;
9246 /* Write zero to the register, then make sure the read-only bits
9247 * are not changed and the read/write bits are all zeros.
9253 /* Test the read-only and read/write bits. */
9254 if (((val & read_mask) != read_val) || (val & write_mask))
9257 /* Write ones to all the bits defined by RdMask and WrMask, then
9258 * make sure the read-only bits are not changed and the
9259 * read/write bits are all ones.
9261 tw32(offset, read_mask | write_mask);
9265 /* Test the read-only bits. */
9266 if ((val & read_mask) != read_val)
9269 /* Test the read/write bits. */
9270 if ((val & write_mask) != write_mask)
9273 tw32(offset, save_val);
9279 if (netif_msg_hw(tp))
9280 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9282 tw32(offset, save_val);
9286 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9288 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9292 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9293 for (j = 0; j < len; j += 4) {
9296 tg3_write_mem(tp, offset + j, test_pattern[i]);
9297 tg3_read_mem(tp, offset + j, &val);
9298 if (val != test_pattern[i])
9305 static int tg3_test_memory(struct tg3 *tp)
9307 static struct mem_entry {
9310 } mem_tbl_570x[] = {
9311 { 0x00000000, 0x00b50},
9312 { 0x00002000, 0x1c000},
9313 { 0xffffffff, 0x00000}
9314 }, mem_tbl_5705[] = {
9315 { 0x00000100, 0x0000c},
9316 { 0x00000200, 0x00008},
9317 { 0x00004000, 0x00800},
9318 { 0x00006000, 0x01000},
9319 { 0x00008000, 0x02000},
9320 { 0x00010000, 0x0e000},
9321 { 0xffffffff, 0x00000}
9322 }, mem_tbl_5755[] = {
9323 { 0x00000200, 0x00008},
9324 { 0x00004000, 0x00800},
9325 { 0x00006000, 0x00800},
9326 { 0x00008000, 0x02000},
9327 { 0x00010000, 0x0c000},
9328 { 0xffffffff, 0x00000}
9329 }, mem_tbl_5906[] = {
9330 { 0x00000200, 0x00008},
9331 { 0x00004000, 0x00400},
9332 { 0x00006000, 0x00400},
9333 { 0x00008000, 0x01000},
9334 { 0x00010000, 0x01000},
9335 { 0xffffffff, 0x00000}
9337 struct mem_entry *mem_tbl;
9341 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9346 mem_tbl = mem_tbl_5755;
9347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9348 mem_tbl = mem_tbl_5906;
9350 mem_tbl = mem_tbl_5705;
9352 mem_tbl = mem_tbl_570x;
9354 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9355 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9356 mem_tbl[i].len)) != 0)
9363 #define TG3_MAC_LOOPBACK 0
9364 #define TG3_PHY_LOOPBACK 1
9366 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9368 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9370 struct sk_buff *skb, *rx_skb;
9373 int num_pkts, tx_len, rx_len, i, err;
9374 struct tg3_rx_buffer_desc *desc;
9376 if (loopback_mode == TG3_MAC_LOOPBACK) {
9377 /* HW errata - mac loopback fails in some cases on 5780.
9378 * Normal traffic and PHY loopback are not affected by
9381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9384 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9385 MAC_MODE_PORT_INT_LPBACK;
9386 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9387 mac_mode |= MAC_MODE_LINK_POLARITY;
9388 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9389 mac_mode |= MAC_MODE_PORT_MODE_MII;
9391 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9392 tw32(MAC_MODE, mac_mode);
9393 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9399 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9402 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9403 phytest | MII_TG3_EPHY_SHADOW_EN);
9404 if (!tg3_readphy(tp, 0x1b, &phy))
9405 tg3_writephy(tp, 0x1b, phy & ~0x20);
9406 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9408 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9410 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9412 tg3_phy_toggle_automdix(tp, 0);
9414 tg3_writephy(tp, MII_BMCR, val);
9417 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9419 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9420 mac_mode |= MAC_MODE_PORT_MODE_MII;
9422 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9424 /* reset to prevent losing 1st rx packet intermittently */
9425 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9426 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9428 tw32_f(MAC_RX_MODE, tp->rx_mode);
9430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9431 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9432 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9433 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9434 mac_mode |= MAC_MODE_LINK_POLARITY;
9435 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9436 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9438 tw32(MAC_MODE, mac_mode);
9446 skb = netdev_alloc_skb(tp->dev, tx_len);
9450 tx_data = skb_put(skb, tx_len);
9451 memcpy(tx_data, tp->dev->dev_addr, 6);
9452 memset(tx_data + 6, 0x0, 8);
9454 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9456 for (i = 14; i < tx_len; i++)
9457 tx_data[i] = (u8) (i & 0xff);
9459 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9461 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9466 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9470 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9475 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9477 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9481 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9482 for (i = 0; i < 25; i++) {
9483 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9488 tx_idx = tp->hw_status->idx[0].tx_consumer;
9489 rx_idx = tp->hw_status->idx[0].rx_producer;
9490 if ((tx_idx == tp->tx_prod) &&
9491 (rx_idx == (rx_start_idx + num_pkts)))
9495 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9498 if (tx_idx != tp->tx_prod)
9501 if (rx_idx != rx_start_idx + num_pkts)
9504 desc = &tp->rx_rcb[rx_start_idx];
9505 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9506 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9507 if (opaque_key != RXD_OPAQUE_RING_STD)
9510 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9511 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9514 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9515 if (rx_len != tx_len)
9518 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9520 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9521 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9523 for (i = 14; i < tx_len; i++) {
9524 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9529 /* tg3_free_rings will unmap and free the rx_skb */
9534 #define TG3_MAC_LOOPBACK_FAILED 1
9535 #define TG3_PHY_LOOPBACK_FAILED 2
9536 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9537 TG3_PHY_LOOPBACK_FAILED)
9539 static int tg3_test_loopback(struct tg3 *tp)
9544 if (!netif_running(tp->dev))
9545 return TG3_LOOPBACK_FAILED;
9547 err = tg3_reset_hw(tp, 1);
9549 return TG3_LOOPBACK_FAILED;
9551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9556 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9558 /* Wait for up to 40 microseconds to acquire lock. */
9559 for (i = 0; i < 4; i++) {
9560 status = tr32(TG3_CPMU_MUTEX_GNT);
9561 if (status == CPMU_MUTEX_GNT_DRIVER)
9566 if (status != CPMU_MUTEX_GNT_DRIVER)
9567 return TG3_LOOPBACK_FAILED;
9569 /* Turn off link-based power management. */
9570 cpmuctrl = tr32(TG3_CPMU_CTRL);
9571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9572 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX)
9574 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9575 CPMU_CTRL_LINK_AWARE_MODE));
9578 cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE);
9581 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9582 err |= TG3_MAC_LOOPBACK_FAILED;
9584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9585 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9586 tw32(TG3_CPMU_CTRL, cpmuctrl);
9588 /* Release the mutex */
9589 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9592 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9593 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9594 err |= TG3_PHY_LOOPBACK_FAILED;
9600 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9603 struct tg3 *tp = netdev_priv(dev);
9605 if (tp->link_config.phy_is_low_power)
9606 tg3_set_power_state(tp, PCI_D0);
9608 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9610 if (tg3_test_nvram(tp) != 0) {
9611 etest->flags |= ETH_TEST_FL_FAILED;
9614 if (tg3_test_link(tp) != 0) {
9615 etest->flags |= ETH_TEST_FL_FAILED;
9618 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9619 int err, irq_sync = 0;
9621 if (netif_running(dev)) {
9626 tg3_full_lock(tp, irq_sync);
9628 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9629 err = tg3_nvram_lock(tp);
9630 tg3_halt_cpu(tp, RX_CPU_BASE);
9631 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9632 tg3_halt_cpu(tp, TX_CPU_BASE);
9634 tg3_nvram_unlock(tp);
9636 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9639 if (tg3_test_registers(tp) != 0) {
9640 etest->flags |= ETH_TEST_FL_FAILED;
9643 if (tg3_test_memory(tp) != 0) {
9644 etest->flags |= ETH_TEST_FL_FAILED;
9647 if ((data[4] = tg3_test_loopback(tp)) != 0)
9648 etest->flags |= ETH_TEST_FL_FAILED;
9650 tg3_full_unlock(tp);
9652 if (tg3_test_interrupt(tp) != 0) {
9653 etest->flags |= ETH_TEST_FL_FAILED;
9657 tg3_full_lock(tp, 0);
9659 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9660 if (netif_running(dev)) {
9661 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9662 if (!tg3_restart_hw(tp, 1))
9663 tg3_netif_start(tp);
9666 tg3_full_unlock(tp);
9668 if (tp->link_config.phy_is_low_power)
9669 tg3_set_power_state(tp, PCI_D3hot);
9673 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9675 struct mii_ioctl_data *data = if_mii(ifr);
9676 struct tg3 *tp = netdev_priv(dev);
9681 data->phy_id = PHY_ADDR;
9687 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9688 break; /* We have no PHY */
9690 if (tp->link_config.phy_is_low_power)
9693 spin_lock_bh(&tp->lock);
9694 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9695 spin_unlock_bh(&tp->lock);
9697 data->val_out = mii_regval;
9703 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9704 break; /* We have no PHY */
9706 if (!capable(CAP_NET_ADMIN))
9709 if (tp->link_config.phy_is_low_power)
9712 spin_lock_bh(&tp->lock);
9713 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9714 spin_unlock_bh(&tp->lock);
9725 #if TG3_VLAN_TAG_USED
9726 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9728 struct tg3 *tp = netdev_priv(dev);
9730 if (netif_running(dev))
9733 tg3_full_lock(tp, 0);
9737 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9738 __tg3_set_rx_mode(dev);
9740 if (netif_running(dev))
9741 tg3_netif_start(tp);
9743 tg3_full_unlock(tp);
9747 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9749 struct tg3 *tp = netdev_priv(dev);
9751 memcpy(ec, &tp->coal, sizeof(*ec));
9755 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9757 struct tg3 *tp = netdev_priv(dev);
9758 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9759 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9761 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9762 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9763 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9764 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9765 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9768 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9769 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9770 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9771 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9772 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9773 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9774 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9775 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9776 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9777 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9780 /* No rx interrupts will be generated if both are zero */
9781 if ((ec->rx_coalesce_usecs == 0) &&
9782 (ec->rx_max_coalesced_frames == 0))
9785 /* No tx interrupts will be generated if both are zero */
9786 if ((ec->tx_coalesce_usecs == 0) &&
9787 (ec->tx_max_coalesced_frames == 0))
9790 /* Only copy relevant parameters, ignore all others. */
9791 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9792 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9793 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9794 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9795 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9796 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9797 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9798 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9799 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9801 if (netif_running(dev)) {
9802 tg3_full_lock(tp, 0);
9803 __tg3_set_coalesce(tp, &tp->coal);
9804 tg3_full_unlock(tp);
9809 static const struct ethtool_ops tg3_ethtool_ops = {
9810 .get_settings = tg3_get_settings,
9811 .set_settings = tg3_set_settings,
9812 .get_drvinfo = tg3_get_drvinfo,
9813 .get_regs_len = tg3_get_regs_len,
9814 .get_regs = tg3_get_regs,
9815 .get_wol = tg3_get_wol,
9816 .set_wol = tg3_set_wol,
9817 .get_msglevel = tg3_get_msglevel,
9818 .set_msglevel = tg3_set_msglevel,
9819 .nway_reset = tg3_nway_reset,
9820 .get_link = ethtool_op_get_link,
9821 .get_eeprom_len = tg3_get_eeprom_len,
9822 .get_eeprom = tg3_get_eeprom,
9823 .set_eeprom = tg3_set_eeprom,
9824 .get_ringparam = tg3_get_ringparam,
9825 .set_ringparam = tg3_set_ringparam,
9826 .get_pauseparam = tg3_get_pauseparam,
9827 .set_pauseparam = tg3_set_pauseparam,
9828 .get_rx_csum = tg3_get_rx_csum,
9829 .set_rx_csum = tg3_set_rx_csum,
9830 .set_tx_csum = tg3_set_tx_csum,
9831 .set_sg = ethtool_op_set_sg,
9832 .set_tso = tg3_set_tso,
9833 .self_test = tg3_self_test,
9834 .get_strings = tg3_get_strings,
9835 .phys_id = tg3_phys_id,
9836 .get_ethtool_stats = tg3_get_ethtool_stats,
9837 .get_coalesce = tg3_get_coalesce,
9838 .set_coalesce = tg3_set_coalesce,
9839 .get_sset_count = tg3_get_sset_count,
9842 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9844 u32 cursize, val, magic;
9846 tp->nvram_size = EEPROM_CHIP_SIZE;
9848 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9851 if ((magic != TG3_EEPROM_MAGIC) &&
9852 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9853 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9857 * Size the chip by reading offsets at increasing powers of two.
9858 * When we encounter our validation signature, we know the addressing
9859 * has wrapped around, and thus have our chip size.
9863 while (cursize < tp->nvram_size) {
9864 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9873 tp->nvram_size = cursize;
9876 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9880 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9883 /* Selfboot format */
9884 if (val != TG3_EEPROM_MAGIC) {
9885 tg3_get_eeprom_size(tp);
9889 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9891 tp->nvram_size = (val >> 16) * 1024;
9895 tp->nvram_size = 0x80000;
9898 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9902 nvcfg1 = tr32(NVRAM_CFG1);
9903 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9904 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9907 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9908 tw32(NVRAM_CFG1, nvcfg1);
9911 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9912 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9913 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9914 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9915 tp->nvram_jedecnum = JEDEC_ATMEL;
9916 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9917 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9919 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9920 tp->nvram_jedecnum = JEDEC_ATMEL;
9921 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9923 case FLASH_VENDOR_ATMEL_EEPROM:
9924 tp->nvram_jedecnum = JEDEC_ATMEL;
9925 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9926 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9928 case FLASH_VENDOR_ST:
9929 tp->nvram_jedecnum = JEDEC_ST;
9930 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9931 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9933 case FLASH_VENDOR_SAIFUN:
9934 tp->nvram_jedecnum = JEDEC_SAIFUN;
9935 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9937 case FLASH_VENDOR_SST_SMALL:
9938 case FLASH_VENDOR_SST_LARGE:
9939 tp->nvram_jedecnum = JEDEC_SST;
9940 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9945 tp->nvram_jedecnum = JEDEC_ATMEL;
9946 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9947 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9951 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9955 nvcfg1 = tr32(NVRAM_CFG1);
9957 /* NVRAM protection for TPM */
9958 if (nvcfg1 & (1 << 27))
9959 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9961 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9962 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9963 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9964 tp->nvram_jedecnum = JEDEC_ATMEL;
9965 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9967 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9968 tp->nvram_jedecnum = JEDEC_ATMEL;
9969 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9970 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9972 case FLASH_5752VENDOR_ST_M45PE10:
9973 case FLASH_5752VENDOR_ST_M45PE20:
9974 case FLASH_5752VENDOR_ST_M45PE40:
9975 tp->nvram_jedecnum = JEDEC_ST;
9976 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9977 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9981 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9982 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9983 case FLASH_5752PAGE_SIZE_256:
9984 tp->nvram_pagesize = 256;
9986 case FLASH_5752PAGE_SIZE_512:
9987 tp->nvram_pagesize = 512;
9989 case FLASH_5752PAGE_SIZE_1K:
9990 tp->nvram_pagesize = 1024;
9992 case FLASH_5752PAGE_SIZE_2K:
9993 tp->nvram_pagesize = 2048;
9995 case FLASH_5752PAGE_SIZE_4K:
9996 tp->nvram_pagesize = 4096;
9998 case FLASH_5752PAGE_SIZE_264:
9999 tp->nvram_pagesize = 264;
10004 /* For eeprom, set pagesize to maximum eeprom size */
10005 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10007 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10008 tw32(NVRAM_CFG1, nvcfg1);
10012 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10014 u32 nvcfg1, protect = 0;
10016 nvcfg1 = tr32(NVRAM_CFG1);
10018 /* NVRAM protection for TPM */
10019 if (nvcfg1 & (1 << 27)) {
10020 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10024 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10026 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10027 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10028 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10029 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10030 tp->nvram_jedecnum = JEDEC_ATMEL;
10031 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10032 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10033 tp->nvram_pagesize = 264;
10034 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10035 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10036 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
10037 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10038 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
10040 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
10042 case FLASH_5752VENDOR_ST_M45PE10:
10043 case FLASH_5752VENDOR_ST_M45PE20:
10044 case FLASH_5752VENDOR_ST_M45PE40:
10045 tp->nvram_jedecnum = JEDEC_ST;
10046 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10047 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10048 tp->nvram_pagesize = 256;
10049 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10050 tp->nvram_size = (protect ? 0x10000 : 0x20000);
10051 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10052 tp->nvram_size = (protect ? 0x10000 : 0x40000);
10054 tp->nvram_size = (protect ? 0x20000 : 0x80000);
10059 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10063 nvcfg1 = tr32(NVRAM_CFG1);
10065 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10066 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10067 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10068 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10069 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10070 tp->nvram_jedecnum = JEDEC_ATMEL;
10071 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10072 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10074 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10075 tw32(NVRAM_CFG1, nvcfg1);
10077 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10078 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10079 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10080 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10081 tp->nvram_jedecnum = JEDEC_ATMEL;
10082 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10083 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10084 tp->nvram_pagesize = 264;
10086 case FLASH_5752VENDOR_ST_M45PE10:
10087 case FLASH_5752VENDOR_ST_M45PE20:
10088 case FLASH_5752VENDOR_ST_M45PE40:
10089 tp->nvram_jedecnum = JEDEC_ST;
10090 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10091 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10092 tp->nvram_pagesize = 256;
10097 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10099 u32 nvcfg1, protect = 0;
10101 nvcfg1 = tr32(NVRAM_CFG1);
10103 /* NVRAM protection for TPM */
10104 if (nvcfg1 & (1 << 27)) {
10105 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10109 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10111 case FLASH_5761VENDOR_ATMEL_ADB021D:
10112 case FLASH_5761VENDOR_ATMEL_ADB041D:
10113 case FLASH_5761VENDOR_ATMEL_ADB081D:
10114 case FLASH_5761VENDOR_ATMEL_ADB161D:
10115 case FLASH_5761VENDOR_ATMEL_MDB021D:
10116 case FLASH_5761VENDOR_ATMEL_MDB041D:
10117 case FLASH_5761VENDOR_ATMEL_MDB081D:
10118 case FLASH_5761VENDOR_ATMEL_MDB161D:
10119 tp->nvram_jedecnum = JEDEC_ATMEL;
10120 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10121 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10122 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10123 tp->nvram_pagesize = 256;
10125 case FLASH_5761VENDOR_ST_A_M45PE20:
10126 case FLASH_5761VENDOR_ST_A_M45PE40:
10127 case FLASH_5761VENDOR_ST_A_M45PE80:
10128 case FLASH_5761VENDOR_ST_A_M45PE16:
10129 case FLASH_5761VENDOR_ST_M_M45PE20:
10130 case FLASH_5761VENDOR_ST_M_M45PE40:
10131 case FLASH_5761VENDOR_ST_M_M45PE80:
10132 case FLASH_5761VENDOR_ST_M_M45PE16:
10133 tp->nvram_jedecnum = JEDEC_ST;
10134 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10135 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10136 tp->nvram_pagesize = 256;
10141 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10144 case FLASH_5761VENDOR_ATMEL_ADB161D:
10145 case FLASH_5761VENDOR_ATMEL_MDB161D:
10146 case FLASH_5761VENDOR_ST_A_M45PE16:
10147 case FLASH_5761VENDOR_ST_M_M45PE16:
10148 tp->nvram_size = 0x100000;
10150 case FLASH_5761VENDOR_ATMEL_ADB081D:
10151 case FLASH_5761VENDOR_ATMEL_MDB081D:
10152 case FLASH_5761VENDOR_ST_A_M45PE80:
10153 case FLASH_5761VENDOR_ST_M_M45PE80:
10154 tp->nvram_size = 0x80000;
10156 case FLASH_5761VENDOR_ATMEL_ADB041D:
10157 case FLASH_5761VENDOR_ATMEL_MDB041D:
10158 case FLASH_5761VENDOR_ST_A_M45PE40:
10159 case FLASH_5761VENDOR_ST_M_M45PE40:
10160 tp->nvram_size = 0x40000;
10162 case FLASH_5761VENDOR_ATMEL_ADB021D:
10163 case FLASH_5761VENDOR_ATMEL_MDB021D:
10164 case FLASH_5761VENDOR_ST_A_M45PE20:
10165 case FLASH_5761VENDOR_ST_M_M45PE20:
10166 tp->nvram_size = 0x20000;
10172 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10174 tp->nvram_jedecnum = JEDEC_ATMEL;
10175 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10176 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10179 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10180 static void __devinit tg3_nvram_init(struct tg3 *tp)
10182 tw32_f(GRC_EEPROM_ADDR,
10183 (EEPROM_ADDR_FSM_RESET |
10184 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10185 EEPROM_ADDR_CLKPERD_SHIFT)));
10189 /* Enable seeprom accesses. */
10190 tw32_f(GRC_LOCAL_CTRL,
10191 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10194 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10195 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10196 tp->tg3_flags |= TG3_FLAG_NVRAM;
10198 if (tg3_nvram_lock(tp)) {
10199 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10200 "tg3_nvram_init failed.\n", tp->dev->name);
10203 tg3_enable_nvram_access(tp);
10205 tp->nvram_size = 0;
10207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10208 tg3_get_5752_nvram_info(tp);
10209 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10210 tg3_get_5755_nvram_info(tp);
10211 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10213 tg3_get_5787_nvram_info(tp);
10214 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10215 tg3_get_5761_nvram_info(tp);
10216 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10217 tg3_get_5906_nvram_info(tp);
10219 tg3_get_nvram_info(tp);
10221 if (tp->nvram_size == 0)
10222 tg3_get_nvram_size(tp);
10224 tg3_disable_nvram_access(tp);
10225 tg3_nvram_unlock(tp);
10228 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10230 tg3_get_eeprom_size(tp);
10234 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10235 u32 offset, u32 *val)
10240 if (offset > EEPROM_ADDR_ADDR_MASK ||
10244 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10245 EEPROM_ADDR_DEVID_MASK |
10247 tw32(GRC_EEPROM_ADDR,
10249 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10250 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10251 EEPROM_ADDR_ADDR_MASK) |
10252 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10254 for (i = 0; i < 1000; i++) {
10255 tmp = tr32(GRC_EEPROM_ADDR);
10257 if (tmp & EEPROM_ADDR_COMPLETE)
10261 if (!(tmp & EEPROM_ADDR_COMPLETE))
10264 *val = tr32(GRC_EEPROM_DATA);
10268 #define NVRAM_CMD_TIMEOUT 10000
10270 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10274 tw32(NVRAM_CMD, nvram_cmd);
10275 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10277 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10282 if (i == NVRAM_CMD_TIMEOUT) {
10288 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10290 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10291 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10292 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10293 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10294 (tp->nvram_jedecnum == JEDEC_ATMEL))
10296 addr = ((addr / tp->nvram_pagesize) <<
10297 ATMEL_AT45DB0X1B_PAGE_POS) +
10298 (addr % tp->nvram_pagesize);
10303 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10305 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10306 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10307 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10308 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10309 (tp->nvram_jedecnum == JEDEC_ATMEL))
10311 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10312 tp->nvram_pagesize) +
10313 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10318 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10322 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10323 return tg3_nvram_read_using_eeprom(tp, offset, val);
10325 offset = tg3_nvram_phys_addr(tp, offset);
10327 if (offset > NVRAM_ADDR_MSK)
10330 ret = tg3_nvram_lock(tp);
10334 tg3_enable_nvram_access(tp);
10336 tw32(NVRAM_ADDR, offset);
10337 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10338 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10341 *val = swab32(tr32(NVRAM_RDDATA));
10343 tg3_disable_nvram_access(tp);
10345 tg3_nvram_unlock(tp);
10350 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10353 int res = tg3_nvram_read(tp, offset, &v);
10355 *val = cpu_to_le32(v);
10359 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10364 err = tg3_nvram_read(tp, offset, &tmp);
10365 *val = swab32(tmp);
10369 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10370 u32 offset, u32 len, u8 *buf)
10375 for (i = 0; i < len; i += 4) {
10381 memcpy(&data, buf + i, 4);
10383 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10385 val = tr32(GRC_EEPROM_ADDR);
10386 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10388 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10390 tw32(GRC_EEPROM_ADDR, val |
10391 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10392 (addr & EEPROM_ADDR_ADDR_MASK) |
10393 EEPROM_ADDR_START |
10394 EEPROM_ADDR_WRITE);
10396 for (j = 0; j < 1000; j++) {
10397 val = tr32(GRC_EEPROM_ADDR);
10399 if (val & EEPROM_ADDR_COMPLETE)
10403 if (!(val & EEPROM_ADDR_COMPLETE)) {
10412 /* offset and length are dword aligned */
10413 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10417 u32 pagesize = tp->nvram_pagesize;
10418 u32 pagemask = pagesize - 1;
10422 tmp = kmalloc(pagesize, GFP_KERNEL);
10428 u32 phy_addr, page_off, size;
10430 phy_addr = offset & ~pagemask;
10432 for (j = 0; j < pagesize; j += 4) {
10433 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10434 (__le32 *) (tmp + j))))
10440 page_off = offset & pagemask;
10447 memcpy(tmp + page_off, buf, size);
10449 offset = offset + (pagesize - page_off);
10451 tg3_enable_nvram_access(tp);
10454 * Before we can erase the flash page, we need
10455 * to issue a special "write enable" command.
10457 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10459 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10462 /* Erase the target page */
10463 tw32(NVRAM_ADDR, phy_addr);
10465 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10466 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10468 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10471 /* Issue another write enable to start the write. */
10472 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10474 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10477 for (j = 0; j < pagesize; j += 4) {
10480 data = *((__be32 *) (tmp + j));
10481 /* swab32(le32_to_cpu(data)), actually */
10482 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10484 tw32(NVRAM_ADDR, phy_addr + j);
10486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10490 nvram_cmd |= NVRAM_CMD_FIRST;
10491 else if (j == (pagesize - 4))
10492 nvram_cmd |= NVRAM_CMD_LAST;
10494 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10501 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10502 tg3_nvram_exec_cmd(tp, nvram_cmd);
10509 /* offset and length are dword aligned */
10510 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10515 for (i = 0; i < len; i += 4, offset += 4) {
10516 u32 page_off, phy_addr, nvram_cmd;
10519 memcpy(&data, buf + i, 4);
10520 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10522 page_off = offset % tp->nvram_pagesize;
10524 phy_addr = tg3_nvram_phys_addr(tp, offset);
10526 tw32(NVRAM_ADDR, phy_addr);
10528 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10530 if ((page_off == 0) || (i == 0))
10531 nvram_cmd |= NVRAM_CMD_FIRST;
10532 if (page_off == (tp->nvram_pagesize - 4))
10533 nvram_cmd |= NVRAM_CMD_LAST;
10535 if (i == (len - 4))
10536 nvram_cmd |= NVRAM_CMD_LAST;
10538 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10539 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10540 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10541 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10542 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10543 (tp->nvram_jedecnum == JEDEC_ST) &&
10544 (nvram_cmd & NVRAM_CMD_FIRST)) {
10546 if ((ret = tg3_nvram_exec_cmd(tp,
10547 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10552 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10553 /* We always do complete word writes to eeprom. */
10554 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10557 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10563 /* offset and length are dword aligned */
10564 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10568 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10569 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10570 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10574 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10575 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10580 ret = tg3_nvram_lock(tp);
10584 tg3_enable_nvram_access(tp);
10585 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10586 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10587 tw32(NVRAM_WRITE1, 0x406);
10589 grc_mode = tr32(GRC_MODE);
10590 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10592 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10593 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10595 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10599 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10603 grc_mode = tr32(GRC_MODE);
10604 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10606 tg3_disable_nvram_access(tp);
10607 tg3_nvram_unlock(tp);
10610 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10611 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10618 struct subsys_tbl_ent {
10619 u16 subsys_vendor, subsys_devid;
10623 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10624 /* Broadcom boards. */
10625 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10626 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10627 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10628 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10629 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10630 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10631 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10632 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10633 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10634 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10635 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10638 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10639 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10640 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10641 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10642 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10645 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10646 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10647 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10648 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10650 /* Compaq boards. */
10651 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10652 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10653 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10654 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10655 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10658 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10661 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10665 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10666 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10667 tp->pdev->subsystem_vendor) &&
10668 (subsys_id_to_phy_id[i].subsys_devid ==
10669 tp->pdev->subsystem_device))
10670 return &subsys_id_to_phy_id[i];
10675 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10680 /* On some early chips the SRAM cannot be accessed in D3hot state,
10681 * so need make sure we're in D0.
10683 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10684 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10685 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10688 /* Make sure register accesses (indirect or otherwise)
10689 * will function correctly.
10691 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10692 tp->misc_host_ctrl);
10694 /* The memory arbiter has to be enabled in order for SRAM accesses
10695 * to succeed. Normally on powerup the tg3 chip firmware will make
10696 * sure it is enabled, but other entities such as system netboot
10697 * code might disable it.
10699 val = tr32(MEMARB_MODE);
10700 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10702 tp->phy_id = PHY_ID_INVALID;
10703 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10705 /* Assume an onboard device and WOL capable by default. */
10706 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10709 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10710 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10711 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10713 val = tr32(VCPU_CFGSHDW);
10714 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10715 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10716 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10717 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10718 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10722 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10723 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10724 u32 nic_cfg, led_cfg;
10725 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10726 int eeprom_phy_serdes = 0;
10728 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10729 tp->nic_sram_data_cfg = nic_cfg;
10731 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10732 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10733 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10734 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10735 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10736 (ver > 0) && (ver < 0x100))
10737 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10739 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10740 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10741 eeprom_phy_serdes = 1;
10743 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10744 if (nic_phy_id != 0) {
10745 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10746 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10748 eeprom_phy_id = (id1 >> 16) << 10;
10749 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10750 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10754 tp->phy_id = eeprom_phy_id;
10755 if (eeprom_phy_serdes) {
10756 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10757 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10759 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10762 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10763 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10764 SHASTA_EXT_LED_MODE_MASK);
10766 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10770 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10771 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10774 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10775 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10778 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10779 tp->led_ctrl = LED_CTRL_MODE_MAC;
10781 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10782 * read on some older 5700/5701 bootcode.
10784 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10786 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10788 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10792 case SHASTA_EXT_LED_SHARED:
10793 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10794 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10795 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10796 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10797 LED_CTRL_MODE_PHY_2);
10800 case SHASTA_EXT_LED_MAC:
10801 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10804 case SHASTA_EXT_LED_COMBO:
10805 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10806 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10807 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10808 LED_CTRL_MODE_PHY_2);
10813 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10815 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10816 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10818 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10819 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10821 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10822 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10823 if ((tp->pdev->subsystem_vendor ==
10824 PCI_VENDOR_ID_ARIMA) &&
10825 (tp->pdev->subsystem_device == 0x205a ||
10826 tp->pdev->subsystem_device == 0x2063))
10827 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10829 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10830 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10833 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10834 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10835 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10836 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10838 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10839 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10840 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10841 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10842 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10844 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10845 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10846 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10848 if (cfg2 & (1 << 17))
10849 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10851 /* serdes signal pre-emphasis in register 0x590 set by */
10852 /* bootcode if bit 18 is set */
10853 if (cfg2 & (1 << 18))
10854 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10856 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10859 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10860 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10861 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10866 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10871 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10872 tw32(OTP_CTRL, cmd);
10874 /* Wait for up to 1 ms for command to execute. */
10875 for (i = 0; i < 100; i++) {
10876 val = tr32(OTP_STATUS);
10877 if (val & OTP_STATUS_CMD_DONE)
10882 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10885 /* Read the gphy configuration from the OTP region of the chip. The gphy
10886 * configuration is a 32-bit value that straddles the alignment boundary.
10887 * We do two 32-bit reads and then shift and merge the results.
10889 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10891 u32 bhalf_otp, thalf_otp;
10893 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10895 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10898 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10900 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10903 thalf_otp = tr32(OTP_READ_DATA);
10905 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10907 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10910 bhalf_otp = tr32(OTP_READ_DATA);
10912 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10915 static int __devinit tg3_phy_probe(struct tg3 *tp)
10917 u32 hw_phy_id_1, hw_phy_id_2;
10918 u32 hw_phy_id, hw_phy_id_masked;
10921 /* Reading the PHY ID register can conflict with ASF
10922 * firwmare access to the PHY hardware.
10925 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10926 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10927 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10929 /* Now read the physical PHY_ID from the chip and verify
10930 * that it is sane. If it doesn't look good, we fall back
10931 * to either the hard-coded table based PHY_ID and failing
10932 * that the value found in the eeprom area.
10934 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10935 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10937 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10938 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10939 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10941 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10944 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10945 tp->phy_id = hw_phy_id;
10946 if (hw_phy_id_masked == PHY_ID_BCM8002)
10947 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10949 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10951 if (tp->phy_id != PHY_ID_INVALID) {
10952 /* Do nothing, phy ID already set up in
10953 * tg3_get_eeprom_hw_cfg().
10956 struct subsys_tbl_ent *p;
10958 /* No eeprom signature? Try the hardcoded
10959 * subsys device table.
10961 p = lookup_by_subsys(tp);
10965 tp->phy_id = p->phy_id;
10967 tp->phy_id == PHY_ID_BCM8002)
10968 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10972 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10973 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10974 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10975 u32 bmsr, adv_reg, tg3_ctrl, mask;
10977 tg3_readphy(tp, MII_BMSR, &bmsr);
10978 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10979 (bmsr & BMSR_LSTATUS))
10980 goto skip_phy_reset;
10982 err = tg3_phy_reset(tp);
10986 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10987 ADVERTISE_100HALF | ADVERTISE_100FULL |
10988 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10990 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10991 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10992 MII_TG3_CTRL_ADV_1000_FULL);
10993 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10995 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10996 MII_TG3_CTRL_ENABLE_AS_MASTER);
10999 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11000 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11001 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11002 if (!tg3_copper_is_advertising_all(tp, mask)) {
11003 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11005 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11006 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11008 tg3_writephy(tp, MII_BMCR,
11009 BMCR_ANENABLE | BMCR_ANRESTART);
11011 tg3_phy_set_wirespeed(tp);
11013 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11014 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11015 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11019 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11020 err = tg3_init_5401phy_dsp(tp);
11025 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11026 err = tg3_init_5401phy_dsp(tp);
11029 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11030 tp->link_config.advertising =
11031 (ADVERTISED_1000baseT_Half |
11032 ADVERTISED_1000baseT_Full |
11033 ADVERTISED_Autoneg |
11035 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11036 tp->link_config.advertising &=
11037 ~(ADVERTISED_1000baseT_Half |
11038 ADVERTISED_1000baseT_Full);
11043 static void __devinit tg3_read_partno(struct tg3 *tp)
11045 unsigned char vpd_data[256];
11049 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11050 goto out_not_found;
11052 if (magic == TG3_EEPROM_MAGIC) {
11053 for (i = 0; i < 256; i += 4) {
11056 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11057 goto out_not_found;
11059 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11060 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11061 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11062 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11067 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11068 for (i = 0; i < 256; i += 4) {
11073 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11075 while (j++ < 100) {
11076 pci_read_config_word(tp->pdev, vpd_cap +
11077 PCI_VPD_ADDR, &tmp16);
11078 if (tmp16 & 0x8000)
11082 if (!(tmp16 & 0x8000))
11083 goto out_not_found;
11085 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11087 v = cpu_to_le32(tmp);
11088 memcpy(&vpd_data[i], &v, 4);
11092 /* Now parse and find the part number. */
11093 for (i = 0; i < 254; ) {
11094 unsigned char val = vpd_data[i];
11095 unsigned int block_end;
11097 if (val == 0x82 || val == 0x91) {
11100 (vpd_data[i + 2] << 8)));
11105 goto out_not_found;
11107 block_end = (i + 3 +
11109 (vpd_data[i + 2] << 8)));
11112 if (block_end > 256)
11113 goto out_not_found;
11115 while (i < (block_end - 2)) {
11116 if (vpd_data[i + 0] == 'P' &&
11117 vpd_data[i + 1] == 'N') {
11118 int partno_len = vpd_data[i + 2];
11121 if (partno_len > 24 || (partno_len + i) > 256)
11122 goto out_not_found;
11124 memcpy(tp->board_part_number,
11125 &vpd_data[i], partno_len);
11130 i += 3 + vpd_data[i + 2];
11133 /* Part number not found. */
11134 goto out_not_found;
11138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11139 strcpy(tp->board_part_number, "BCM95906");
11141 strcpy(tp->board_part_number, "none");
11144 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11148 if (tg3_nvram_read_swab(tp, offset, &val) ||
11149 (val & 0xfc000000) != 0x0c000000 ||
11150 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11157 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11159 u32 val, offset, start;
11163 if (tg3_nvram_read_swab(tp, 0, &val))
11166 if (val != TG3_EEPROM_MAGIC)
11169 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11170 tg3_nvram_read_swab(tp, 0x4, &start))
11173 offset = tg3_nvram_logical_addr(tp, offset);
11175 if (!tg3_fw_img_is_valid(tp, offset) ||
11176 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11179 offset = offset + ver_offset - start;
11180 for (i = 0; i < 16; i += 4) {
11182 if (tg3_nvram_read_le(tp, offset + i, &v))
11185 memcpy(tp->fw_ver + i, &v, 4);
11188 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11189 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11192 for (offset = TG3_NVM_DIR_START;
11193 offset < TG3_NVM_DIR_END;
11194 offset += TG3_NVM_DIRENT_SIZE) {
11195 if (tg3_nvram_read_swab(tp, offset, &val))
11198 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11202 if (offset == TG3_NVM_DIR_END)
11205 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11206 start = 0x08000000;
11207 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11210 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11211 !tg3_fw_img_is_valid(tp, offset) ||
11212 tg3_nvram_read_swab(tp, offset + 8, &val))
11215 offset += val - start;
11217 bcnt = strlen(tp->fw_ver);
11219 tp->fw_ver[bcnt++] = ',';
11220 tp->fw_ver[bcnt++] = ' ';
11222 for (i = 0; i < 4; i++) {
11224 if (tg3_nvram_read_le(tp, offset, &v))
11227 offset += sizeof(v);
11229 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11230 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11234 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11238 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11241 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11243 static int __devinit tg3_get_invariants(struct tg3 *tp)
11245 static struct pci_device_id write_reorder_chipsets[] = {
11246 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11247 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11248 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11249 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11250 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11251 PCI_DEVICE_ID_VIA_8385_0) },
11255 u32 cacheline_sz_reg;
11256 u32 pci_state_reg, grc_misc_cfg;
11261 /* Force memory write invalidate off. If we leave it on,
11262 * then on 5700_BX chips we have to enable a workaround.
11263 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11264 * to match the cacheline size. The Broadcom driver have this
11265 * workaround but turns MWI off all the times so never uses
11266 * it. This seems to suggest that the workaround is insufficient.
11268 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11269 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11270 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11272 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11273 * has the register indirect write enable bit set before
11274 * we try to access any of the MMIO registers. It is also
11275 * critical that the PCI-X hw workaround situation is decided
11276 * before that as well.
11278 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11281 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11282 MISC_HOST_CTRL_CHIPREV_SHIFT);
11283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11284 u32 prod_id_asic_rev;
11286 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11287 &prod_id_asic_rev);
11288 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11291 /* Wrong chip ID in 5752 A0. This code can be removed later
11292 * as A0 is not in production.
11294 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11295 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11297 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11298 * we need to disable memory and use config. cycles
11299 * only to access all registers. The 5702/03 chips
11300 * can mistakenly decode the special cycles from the
11301 * ICH chipsets as memory write cycles, causing corruption
11302 * of register and memory space. Only certain ICH bridges
11303 * will drive special cycles with non-zero data during the
11304 * address phase which can fall within the 5703's address
11305 * range. This is not an ICH bug as the PCI spec allows
11306 * non-zero address during special cycles. However, only
11307 * these ICH bridges are known to drive non-zero addresses
11308 * during special cycles.
11310 * Since special cycles do not cross PCI bridges, we only
11311 * enable this workaround if the 5703 is on the secondary
11312 * bus of these ICH bridges.
11314 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11315 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11316 static struct tg3_dev_id {
11320 } ich_chipsets[] = {
11321 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11323 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11331 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11332 struct pci_dev *bridge = NULL;
11334 while (pci_id->vendor != 0) {
11335 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11341 if (pci_id->rev != PCI_ANY_ID) {
11342 if (bridge->revision > pci_id->rev)
11345 if (bridge->subordinate &&
11346 (bridge->subordinate->number ==
11347 tp->pdev->bus->number)) {
11349 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11350 pci_dev_put(bridge);
11356 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11357 static struct tg3_dev_id {
11360 } bridge_chipsets[] = {
11361 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11362 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11365 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11366 struct pci_dev *bridge = NULL;
11368 while (pci_id->vendor != 0) {
11369 bridge = pci_get_device(pci_id->vendor,
11376 if (bridge->subordinate &&
11377 (bridge->subordinate->number <=
11378 tp->pdev->bus->number) &&
11379 (bridge->subordinate->subordinate >=
11380 tp->pdev->bus->number)) {
11381 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11382 pci_dev_put(bridge);
11388 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11389 * DMA addresses > 40-bit. This bridge may have other additional
11390 * 57xx devices behind it in some 4-port NIC designs for example.
11391 * Any tg3 device found behind the bridge will also need the 40-bit
11394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11396 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11397 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11398 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11401 struct pci_dev *bridge = NULL;
11404 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11405 PCI_DEVICE_ID_SERVERWORKS_EPB,
11407 if (bridge && bridge->subordinate &&
11408 (bridge->subordinate->number <=
11409 tp->pdev->bus->number) &&
11410 (bridge->subordinate->subordinate >=
11411 tp->pdev->bus->number)) {
11412 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11413 pci_dev_put(bridge);
11419 /* Initialize misc host control in PCI block. */
11420 tp->misc_host_ctrl |= (misc_ctrl_reg &
11421 MISC_HOST_CTRL_CHIPREV);
11422 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11423 tp->misc_host_ctrl);
11425 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11426 &cacheline_sz_reg);
11428 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11429 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11430 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11431 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11433 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11434 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11435 tp->pdev_peer = tg3_find_peer(tp);
11437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11439 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11444 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11445 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11447 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11448 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11449 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11451 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11452 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11453 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11454 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11455 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11456 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11457 tp->pdev_peer == tp->pdev))
11458 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11465 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11466 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11468 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11469 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11471 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11472 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11476 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11477 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11478 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11479 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11480 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11481 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11482 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11483 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11484 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11486 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11487 if (pcie_cap != 0) {
11488 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11490 pcie_set_readrq(tp->pdev, 4096);
11492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11495 pci_read_config_word(tp->pdev,
11496 pcie_cap + PCI_EXP_LNKCTL,
11498 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11499 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11503 /* If we have an AMD 762 or VIA K8T800 chipset, write
11504 * reordering to the mailbox registers done by the host
11505 * controller can cause major troubles. We read back from
11506 * every mailbox register write to force the writes to be
11507 * posted to the chip in order.
11509 if (pci_dev_present(write_reorder_chipsets) &&
11510 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11511 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11514 tp->pci_lat_timer < 64) {
11515 tp->pci_lat_timer = 64;
11517 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11518 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11519 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11520 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11522 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11526 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11527 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11528 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11529 if (!tp->pcix_cap) {
11530 printk(KERN_ERR PFX "Cannot find PCI-X "
11531 "capability, aborting.\n");
11536 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11539 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11540 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11542 /* If this is a 5700 BX chipset, and we are in PCI-X
11543 * mode, enable register write workaround.
11545 * The workaround is to use indirect register accesses
11546 * for all chip writes not to mailbox registers.
11548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11551 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11553 /* The chip can have it's power management PCI config
11554 * space registers clobbered due to this bug.
11555 * So explicitly force the chip into D0 here.
11557 pci_read_config_dword(tp->pdev,
11558 tp->pm_cap + PCI_PM_CTRL,
11560 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11561 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11562 pci_write_config_dword(tp->pdev,
11563 tp->pm_cap + PCI_PM_CTRL,
11566 /* Also, force SERR#/PERR# in PCI command. */
11567 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11568 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11569 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11573 /* 5700 BX chips need to have their TX producer index mailboxes
11574 * written twice to workaround a bug.
11576 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11577 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11579 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11580 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11581 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11582 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11584 /* Chip-specific fixup from Broadcom driver */
11585 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11586 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11587 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11588 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11591 /* Default fast path register access methods */
11592 tp->read32 = tg3_read32;
11593 tp->write32 = tg3_write32;
11594 tp->read32_mbox = tg3_read32;
11595 tp->write32_mbox = tg3_write32;
11596 tp->write32_tx_mbox = tg3_write32;
11597 tp->write32_rx_mbox = tg3_write32;
11599 /* Various workaround register access methods */
11600 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11601 tp->write32 = tg3_write_indirect_reg32;
11602 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11603 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11604 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11606 * Back to back register writes can cause problems on these
11607 * chips, the workaround is to read back all reg writes
11608 * except those to mailbox regs.
11610 * See tg3_write_indirect_reg32().
11612 tp->write32 = tg3_write_flush_reg32;
11616 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11617 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11618 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11619 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11620 tp->write32_rx_mbox = tg3_write_flush_reg32;
11623 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11624 tp->read32 = tg3_read_indirect_reg32;
11625 tp->write32 = tg3_write_indirect_reg32;
11626 tp->read32_mbox = tg3_read_indirect_mbox;
11627 tp->write32_mbox = tg3_write_indirect_mbox;
11628 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11629 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11634 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11635 pci_cmd &= ~PCI_COMMAND_MEMORY;
11636 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11639 tp->read32_mbox = tg3_read32_mbox_5906;
11640 tp->write32_mbox = tg3_write32_mbox_5906;
11641 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11642 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11645 if (tp->write32 == tg3_write_indirect_reg32 ||
11646 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11647 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11649 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11651 /* Get eeprom hw config before calling tg3_set_power_state().
11652 * In particular, the TG3_FLG2_IS_NIC flag must be
11653 * determined before calling tg3_set_power_state() so that
11654 * we know whether or not to switch out of Vaux power.
11655 * When the flag is set, it means that GPIO1 is used for eeprom
11656 * write protect and also implies that it is a LOM where GPIOs
11657 * are not used to switch power.
11659 tg3_get_eeprom_hw_cfg(tp);
11661 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11662 /* Allow reads and writes to the
11663 * APE register and memory space.
11665 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11666 PCISTATE_ALLOW_APE_SHMEM_WR;
11667 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11673 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11675 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11676 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11677 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11678 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11679 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11682 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11683 * GPIO1 driven high will bring 5700's external PHY out of reset.
11684 * It is also used as eeprom write protect on LOMs.
11686 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11687 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11688 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11689 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11690 GRC_LCLCTRL_GPIO_OUTPUT1);
11691 /* Unused GPIO3 must be driven as output on 5752 because there
11692 * are no pull-up resistors on unused GPIO pins.
11694 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11695 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11698 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11700 /* Force the chip into D0. */
11701 err = tg3_set_power_state(tp, PCI_D0);
11703 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11704 pci_name(tp->pdev));
11708 /* 5700 B0 chips do not support checksumming correctly due
11709 * to hardware bugs.
11711 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11712 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11714 /* Derive initial jumbo mode from MTU assigned in
11715 * ether_setup() via the alloc_etherdev() call
11717 if (tp->dev->mtu > ETH_DATA_LEN &&
11718 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11719 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11721 /* Determine WakeOnLan speed to use. */
11722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11723 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11724 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11725 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11726 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11728 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11731 /* A few boards don't want Ethernet@WireSpeed phy feature */
11732 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11733 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11734 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11735 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11736 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11737 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11738 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11740 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11741 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11742 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11743 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11744 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11750 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11751 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11752 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11753 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11754 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11755 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11756 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11757 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11761 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11762 tp->phy_otp = tg3_read_otp_phycfg(tp);
11763 if (tp->phy_otp == 0)
11764 tp->phy_otp = TG3_OTP_DEFAULT;
11767 tp->coalesce_mode = 0;
11768 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11769 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11770 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11772 /* Initialize MAC MI mode, polling disabled. */
11773 tw32_f(MAC_MI_MODE, tp->mi_mode);
11776 /* Initialize data/descriptor byte/word swapping. */
11777 val = tr32(GRC_MODE);
11778 val &= GRC_MODE_HOST_STACKUP;
11779 tw32(GRC_MODE, val | tp->grc_mode);
11781 tg3_switch_clocks(tp);
11783 /* Clear this out for sanity. */
11784 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11786 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11788 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11789 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11790 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11792 if (chiprevid == CHIPREV_ID_5701_A0 ||
11793 chiprevid == CHIPREV_ID_5701_B0 ||
11794 chiprevid == CHIPREV_ID_5701_B2 ||
11795 chiprevid == CHIPREV_ID_5701_B5) {
11796 void __iomem *sram_base;
11798 /* Write some dummy words into the SRAM status block
11799 * area, see if it reads back correctly. If the return
11800 * value is bad, force enable the PCIX workaround.
11802 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11804 writel(0x00000000, sram_base);
11805 writel(0x00000000, sram_base + 4);
11806 writel(0xffffffff, sram_base + 4);
11807 if (readl(sram_base) != 0x00000000)
11808 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11813 tg3_nvram_init(tp);
11815 grc_misc_cfg = tr32(GRC_MISC_CFG);
11816 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11819 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11820 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11821 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11823 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11824 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11825 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11826 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11827 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11828 HOSTCC_MODE_CLRTICK_TXBD);
11830 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11831 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11832 tp->misc_host_ctrl);
11835 /* these are limited to 10/100 only */
11836 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11837 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11838 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11839 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11840 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11841 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11842 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11843 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11844 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11845 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11846 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11848 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11850 err = tg3_phy_probe(tp);
11852 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11853 pci_name(tp->pdev), err);
11854 /* ... but do not return immediately ... */
11857 tg3_read_partno(tp);
11858 tg3_read_fw_ver(tp);
11860 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11861 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11864 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11866 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11869 /* 5700 {AX,BX} chips have a broken status block link
11870 * change bit implementation, so we must use the
11871 * status register in those cases.
11873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11874 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11876 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11878 /* The led_ctrl is set during tg3_phy_probe, here we might
11879 * have to force the link status polling mechanism based
11880 * upon subsystem IDs.
11882 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11884 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11885 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11886 TG3_FLAG_USE_LINKCHG_REG);
11889 /* For all SERDES we poll the MAC status register. */
11890 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11891 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11893 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11895 /* All chips before 5787 can get confused if TX buffers
11896 * straddle the 4GB address boundary in some cases.
11898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11903 tp->dev->hard_start_xmit = tg3_start_xmit;
11905 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11909 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11912 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11914 /* Increment the rx prod index on the rx std ring by at most
11915 * 8 for these chips to workaround hw errata.
11917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11920 tp->rx_std_max_post = 8;
11922 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11923 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11924 PCIE_PWR_MGMT_L1_THRESH_MSK;
11929 #ifdef CONFIG_SPARC
11930 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11932 struct net_device *dev = tp->dev;
11933 struct pci_dev *pdev = tp->pdev;
11934 struct device_node *dp = pci_device_to_OF_node(pdev);
11935 const unsigned char *addr;
11938 addr = of_get_property(dp, "local-mac-address", &len);
11939 if (addr && len == 6) {
11940 memcpy(dev->dev_addr, addr, 6);
11941 memcpy(dev->perm_addr, dev->dev_addr, 6);
11947 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11949 struct net_device *dev = tp->dev;
11951 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11952 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11957 static int __devinit tg3_get_device_address(struct tg3 *tp)
11959 struct net_device *dev = tp->dev;
11960 u32 hi, lo, mac_offset;
11963 #ifdef CONFIG_SPARC
11964 if (!tg3_get_macaddr_sparc(tp))
11969 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11970 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11971 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11973 if (tg3_nvram_lock(tp))
11974 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11976 tg3_nvram_unlock(tp);
11978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11981 /* First try to get it from MAC address mailbox. */
11982 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11983 if ((hi >> 16) == 0x484b) {
11984 dev->dev_addr[0] = (hi >> 8) & 0xff;
11985 dev->dev_addr[1] = (hi >> 0) & 0xff;
11987 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11988 dev->dev_addr[2] = (lo >> 24) & 0xff;
11989 dev->dev_addr[3] = (lo >> 16) & 0xff;
11990 dev->dev_addr[4] = (lo >> 8) & 0xff;
11991 dev->dev_addr[5] = (lo >> 0) & 0xff;
11993 /* Some old bootcode may report a 0 MAC address in SRAM */
11994 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11997 /* Next, try NVRAM. */
11998 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11999 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12000 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12001 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12002 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12003 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12004 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12005 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12007 /* Finally just fetch it out of the MAC control regs. */
12009 hi = tr32(MAC_ADDR_0_HIGH);
12010 lo = tr32(MAC_ADDR_0_LOW);
12012 dev->dev_addr[5] = lo & 0xff;
12013 dev->dev_addr[4] = (lo >> 8) & 0xff;
12014 dev->dev_addr[3] = (lo >> 16) & 0xff;
12015 dev->dev_addr[2] = (lo >> 24) & 0xff;
12016 dev->dev_addr[1] = hi & 0xff;
12017 dev->dev_addr[0] = (hi >> 8) & 0xff;
12021 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12022 #ifdef CONFIG_SPARC
12023 if (!tg3_get_default_macaddr_sparc(tp))
12028 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12032 #define BOUNDARY_SINGLE_CACHELINE 1
12033 #define BOUNDARY_MULTI_CACHELINE 2
12035 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12037 int cacheline_size;
12041 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12043 cacheline_size = 1024;
12045 cacheline_size = (int) byte * 4;
12047 /* On 5703 and later chips, the boundary bits have no
12050 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12051 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12052 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12055 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12056 goal = BOUNDARY_MULTI_CACHELINE;
12058 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12059 goal = BOUNDARY_SINGLE_CACHELINE;
12068 /* PCI controllers on most RISC systems tend to disconnect
12069 * when a device tries to burst across a cache-line boundary.
12070 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12072 * Unfortunately, for PCI-E there are only limited
12073 * write-side controls for this, and thus for reads
12074 * we will still get the disconnects. We'll also waste
12075 * these PCI cycles for both read and write for chips
12076 * other than 5700 and 5701 which do not implement the
12079 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12080 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12081 switch (cacheline_size) {
12086 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12087 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12088 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12090 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12091 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12096 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12097 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12101 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12102 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12105 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12106 switch (cacheline_size) {
12110 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12111 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12112 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12118 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12119 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12123 switch (cacheline_size) {
12125 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12126 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12127 DMA_RWCTRL_WRITE_BNDRY_16);
12132 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12133 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12134 DMA_RWCTRL_WRITE_BNDRY_32);
12139 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12140 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12141 DMA_RWCTRL_WRITE_BNDRY_64);
12146 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12147 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12148 DMA_RWCTRL_WRITE_BNDRY_128);
12153 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12154 DMA_RWCTRL_WRITE_BNDRY_256);
12157 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12158 DMA_RWCTRL_WRITE_BNDRY_512);
12162 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12163 DMA_RWCTRL_WRITE_BNDRY_1024);
12172 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12174 struct tg3_internal_buffer_desc test_desc;
12175 u32 sram_dma_descs;
12178 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12180 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12181 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12182 tw32(RDMAC_STATUS, 0);
12183 tw32(WDMAC_STATUS, 0);
12185 tw32(BUFMGR_MODE, 0);
12186 tw32(FTQ_RESET, 0);
12188 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12189 test_desc.addr_lo = buf_dma & 0xffffffff;
12190 test_desc.nic_mbuf = 0x00002100;
12191 test_desc.len = size;
12194 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12195 * the *second* time the tg3 driver was getting loaded after an
12198 * Broadcom tells me:
12199 * ...the DMA engine is connected to the GRC block and a DMA
12200 * reset may affect the GRC block in some unpredictable way...
12201 * The behavior of resets to individual blocks has not been tested.
12203 * Broadcom noted the GRC reset will also reset all sub-components.
12206 test_desc.cqid_sqid = (13 << 8) | 2;
12208 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12211 test_desc.cqid_sqid = (16 << 8) | 7;
12213 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12216 test_desc.flags = 0x00000005;
12218 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12221 val = *(((u32 *)&test_desc) + i);
12222 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12223 sram_dma_descs + (i * sizeof(u32)));
12224 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12226 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12229 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12231 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12235 for (i = 0; i < 40; i++) {
12239 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12241 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12242 if ((val & 0xffff) == sram_dma_descs) {
12253 #define TEST_BUFFER_SIZE 0x2000
12255 static int __devinit tg3_test_dma(struct tg3 *tp)
12257 dma_addr_t buf_dma;
12258 u32 *buf, saved_dma_rwctrl;
12261 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12267 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12268 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12270 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12272 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12273 /* DMA read watermark not used on PCIE */
12274 tp->dma_rwctrl |= 0x00180000;
12275 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12278 tp->dma_rwctrl |= 0x003f0000;
12280 tp->dma_rwctrl |= 0x003f000f;
12282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12284 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12285 u32 read_water = 0x7;
12287 /* If the 5704 is behind the EPB bridge, we can
12288 * do the less restrictive ONE_DMA workaround for
12289 * better performance.
12291 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12293 tp->dma_rwctrl |= 0x8000;
12294 else if (ccval == 0x6 || ccval == 0x7)
12295 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12299 /* Set bit 23 to enable PCIX hw bug fix */
12301 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12302 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12304 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12305 /* 5780 always in PCIX mode */
12306 tp->dma_rwctrl |= 0x00144000;
12307 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12308 /* 5714 always in PCIX mode */
12309 tp->dma_rwctrl |= 0x00148000;
12311 tp->dma_rwctrl |= 0x001b000f;
12315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12317 tp->dma_rwctrl &= 0xfffffff0;
12319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12321 /* Remove this if it causes problems for some boards. */
12322 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12324 /* On 5700/5701 chips, we need to set this bit.
12325 * Otherwise the chip will issue cacheline transactions
12326 * to streamable DMA memory with not all the byte
12327 * enables turned on. This is an error on several
12328 * RISC PCI controllers, in particular sparc64.
12330 * On 5703/5704 chips, this bit has been reassigned
12331 * a different meaning. In particular, it is used
12332 * on those chips to enable a PCI-X workaround.
12334 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12337 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12340 /* Unneeded, already done by tg3_get_invariants. */
12341 tg3_switch_clocks(tp);
12345 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12346 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12349 /* It is best to perform DMA test with maximum write burst size
12350 * to expose the 5700/5701 write DMA bug.
12352 saved_dma_rwctrl = tp->dma_rwctrl;
12353 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12354 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12359 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12362 /* Send the buffer to the chip. */
12363 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12365 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12370 /* validate data reached card RAM correctly. */
12371 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12373 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12374 if (le32_to_cpu(val) != p[i]) {
12375 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12376 /* ret = -ENODEV here? */
12381 /* Now read it back. */
12382 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12384 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12390 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12394 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12395 DMA_RWCTRL_WRITE_BNDRY_16) {
12396 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12397 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12398 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12401 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12407 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12413 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12414 DMA_RWCTRL_WRITE_BNDRY_16) {
12415 static struct pci_device_id dma_wait_state_chipsets[] = {
12416 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12417 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12421 /* DMA test passed without adjusting DMA boundary,
12422 * now look for chipsets that are known to expose the
12423 * DMA bug without failing the test.
12425 if (pci_dev_present(dma_wait_state_chipsets)) {
12426 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12427 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12430 /* Safe to use the calculated DMA boundary. */
12431 tp->dma_rwctrl = saved_dma_rwctrl;
12433 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12437 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12442 static void __devinit tg3_init_link_config(struct tg3 *tp)
12444 tp->link_config.advertising =
12445 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12446 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12447 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12448 ADVERTISED_Autoneg | ADVERTISED_MII);
12449 tp->link_config.speed = SPEED_INVALID;
12450 tp->link_config.duplex = DUPLEX_INVALID;
12451 tp->link_config.autoneg = AUTONEG_ENABLE;
12452 tp->link_config.active_speed = SPEED_INVALID;
12453 tp->link_config.active_duplex = DUPLEX_INVALID;
12454 tp->link_config.phy_is_low_power = 0;
12455 tp->link_config.orig_speed = SPEED_INVALID;
12456 tp->link_config.orig_duplex = DUPLEX_INVALID;
12457 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12460 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12462 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12463 tp->bufmgr_config.mbuf_read_dma_low_water =
12464 DEFAULT_MB_RDMA_LOW_WATER_5705;
12465 tp->bufmgr_config.mbuf_mac_rx_low_water =
12466 DEFAULT_MB_MACRX_LOW_WATER_5705;
12467 tp->bufmgr_config.mbuf_high_water =
12468 DEFAULT_MB_HIGH_WATER_5705;
12469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12470 tp->bufmgr_config.mbuf_mac_rx_low_water =
12471 DEFAULT_MB_MACRX_LOW_WATER_5906;
12472 tp->bufmgr_config.mbuf_high_water =
12473 DEFAULT_MB_HIGH_WATER_5906;
12476 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12477 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12478 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12479 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12480 tp->bufmgr_config.mbuf_high_water_jumbo =
12481 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12483 tp->bufmgr_config.mbuf_read_dma_low_water =
12484 DEFAULT_MB_RDMA_LOW_WATER;
12485 tp->bufmgr_config.mbuf_mac_rx_low_water =
12486 DEFAULT_MB_MACRX_LOW_WATER;
12487 tp->bufmgr_config.mbuf_high_water =
12488 DEFAULT_MB_HIGH_WATER;
12490 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12491 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12492 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12493 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12494 tp->bufmgr_config.mbuf_high_water_jumbo =
12495 DEFAULT_MB_HIGH_WATER_JUMBO;
12498 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12499 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12502 static char * __devinit tg3_phy_string(struct tg3 *tp)
12504 switch (tp->phy_id & PHY_ID_MASK) {
12505 case PHY_ID_BCM5400: return "5400";
12506 case PHY_ID_BCM5401: return "5401";
12507 case PHY_ID_BCM5411: return "5411";
12508 case PHY_ID_BCM5701: return "5701";
12509 case PHY_ID_BCM5703: return "5703";
12510 case PHY_ID_BCM5704: return "5704";
12511 case PHY_ID_BCM5705: return "5705";
12512 case PHY_ID_BCM5750: return "5750";
12513 case PHY_ID_BCM5752: return "5752";
12514 case PHY_ID_BCM5714: return "5714";
12515 case PHY_ID_BCM5780: return "5780";
12516 case PHY_ID_BCM5755: return "5755";
12517 case PHY_ID_BCM5787: return "5787";
12518 case PHY_ID_BCM5784: return "5784";
12519 case PHY_ID_BCM5756: return "5722/5756";
12520 case PHY_ID_BCM5906: return "5906";
12521 case PHY_ID_BCM5761: return "5761";
12522 case PHY_ID_BCM8002: return "8002/serdes";
12523 case 0: return "serdes";
12524 default: return "unknown";
12528 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12530 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12531 strcpy(str, "PCI Express");
12533 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12534 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12536 strcpy(str, "PCIX:");
12538 if ((clock_ctrl == 7) ||
12539 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12540 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12541 strcat(str, "133MHz");
12542 else if (clock_ctrl == 0)
12543 strcat(str, "33MHz");
12544 else if (clock_ctrl == 2)
12545 strcat(str, "50MHz");
12546 else if (clock_ctrl == 4)
12547 strcat(str, "66MHz");
12548 else if (clock_ctrl == 6)
12549 strcat(str, "100MHz");
12551 strcpy(str, "PCI:");
12552 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12553 strcat(str, "66MHz");
12555 strcat(str, "33MHz");
12557 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12558 strcat(str, ":32-bit");
12560 strcat(str, ":64-bit");
12564 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12566 struct pci_dev *peer;
12567 unsigned int func, devnr = tp->pdev->devfn & ~7;
12569 for (func = 0; func < 8; func++) {
12570 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12571 if (peer && peer != tp->pdev)
12575 /* 5704 can be configured in single-port mode, set peer to
12576 * tp->pdev in that case.
12584 * We don't need to keep the refcount elevated; there's no way
12585 * to remove one half of this device without removing the other
12592 static void __devinit tg3_init_coal(struct tg3 *tp)
12594 struct ethtool_coalesce *ec = &tp->coal;
12596 memset(ec, 0, sizeof(*ec));
12597 ec->cmd = ETHTOOL_GCOALESCE;
12598 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12599 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12600 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12601 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12602 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12603 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12604 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12605 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12606 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12608 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12609 HOSTCC_MODE_CLRTICK_TXBD)) {
12610 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12611 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12612 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12613 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12616 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12617 ec->rx_coalesce_usecs_irq = 0;
12618 ec->tx_coalesce_usecs_irq = 0;
12619 ec->stats_block_coalesce_usecs = 0;
12623 static int __devinit tg3_init_one(struct pci_dev *pdev,
12624 const struct pci_device_id *ent)
12626 static int tg3_version_printed = 0;
12627 resource_size_t tg3reg_base;
12628 unsigned long tg3reg_len;
12629 struct net_device *dev;
12633 u64 dma_mask, persist_dma_mask;
12634 DECLARE_MAC_BUF(mac);
12636 if (tg3_version_printed++ == 0)
12637 printk(KERN_INFO "%s", version);
12639 err = pci_enable_device(pdev);
12641 printk(KERN_ERR PFX "Cannot enable PCI device, "
12646 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12647 printk(KERN_ERR PFX "Cannot find proper PCI device "
12648 "base address, aborting.\n");
12650 goto err_out_disable_pdev;
12653 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12655 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12657 goto err_out_disable_pdev;
12660 pci_set_master(pdev);
12662 /* Find power-management capability. */
12663 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12665 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12668 goto err_out_free_res;
12671 tg3reg_base = pci_resource_start(pdev, 0);
12672 tg3reg_len = pci_resource_len(pdev, 0);
12674 dev = alloc_etherdev(sizeof(*tp));
12676 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12678 goto err_out_free_res;
12681 SET_NETDEV_DEV(dev, &pdev->dev);
12683 #if TG3_VLAN_TAG_USED
12684 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12685 dev->vlan_rx_register = tg3_vlan_rx_register;
12688 tp = netdev_priv(dev);
12691 tp->pm_cap = pm_cap;
12692 tp->mac_mode = TG3_DEF_MAC_MODE;
12693 tp->rx_mode = TG3_DEF_RX_MODE;
12694 tp->tx_mode = TG3_DEF_TX_MODE;
12695 tp->mi_mode = MAC_MI_MODE_BASE;
12697 tp->msg_enable = tg3_debug;
12699 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12701 /* The word/byte swap controls here control register access byte
12702 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12705 tp->misc_host_ctrl =
12706 MISC_HOST_CTRL_MASK_PCI_INT |
12707 MISC_HOST_CTRL_WORD_SWAP |
12708 MISC_HOST_CTRL_INDIR_ACCESS |
12709 MISC_HOST_CTRL_PCISTATE_RW;
12711 /* The NONFRM (non-frame) byte/word swap controls take effect
12712 * on descriptor entries, anything which isn't packet data.
12714 * The StrongARM chips on the board (one for tx, one for rx)
12715 * are running in big-endian mode.
12717 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12718 GRC_MODE_WSWAP_NONFRM_DATA);
12719 #ifdef __BIG_ENDIAN
12720 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12722 spin_lock_init(&tp->lock);
12723 spin_lock_init(&tp->indirect_lock);
12724 INIT_WORK(&tp->reset_task, tg3_reset_task);
12726 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12728 printk(KERN_ERR PFX "Cannot map device registers, "
12731 goto err_out_free_dev;
12734 tg3_init_link_config(tp);
12736 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12737 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12738 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12740 dev->open = tg3_open;
12741 dev->stop = tg3_close;
12742 dev->get_stats = tg3_get_stats;
12743 dev->set_multicast_list = tg3_set_rx_mode;
12744 dev->set_mac_address = tg3_set_mac_addr;
12745 dev->do_ioctl = tg3_ioctl;
12746 dev->tx_timeout = tg3_tx_timeout;
12747 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12748 dev->ethtool_ops = &tg3_ethtool_ops;
12749 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12750 dev->change_mtu = tg3_change_mtu;
12751 dev->irq = pdev->irq;
12752 #ifdef CONFIG_NET_POLL_CONTROLLER
12753 dev->poll_controller = tg3_poll_controller;
12756 err = tg3_get_invariants(tp);
12758 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12760 goto err_out_iounmap;
12763 /* The EPB bridge inside 5714, 5715, and 5780 and any
12764 * device behind the EPB cannot support DMA addresses > 40-bit.
12765 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12766 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12767 * do DMA address check in tg3_start_xmit().
12769 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12770 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12771 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12772 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12773 #ifdef CONFIG_HIGHMEM
12774 dma_mask = DMA_64BIT_MASK;
12777 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12779 /* Configure DMA attributes. */
12780 if (dma_mask > DMA_32BIT_MASK) {
12781 err = pci_set_dma_mask(pdev, dma_mask);
12783 dev->features |= NETIF_F_HIGHDMA;
12784 err = pci_set_consistent_dma_mask(pdev,
12787 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12788 "DMA for consistent allocations\n");
12789 goto err_out_iounmap;
12793 if (err || dma_mask == DMA_32BIT_MASK) {
12794 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12796 printk(KERN_ERR PFX "No usable DMA configuration, "
12798 goto err_out_iounmap;
12802 tg3_init_bufmgr_config(tp);
12804 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12805 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12807 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12809 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12811 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12812 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12814 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12817 /* TSO is on by default on chips that support hardware TSO.
12818 * Firmware TSO on older chips gives lower performance, so it
12819 * is off by default, but can be enabled using ethtool.
12821 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12822 dev->features |= NETIF_F_TSO;
12823 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12824 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12825 dev->features |= NETIF_F_TSO6;
12826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12827 dev->features |= NETIF_F_TSO_ECN;
12831 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12832 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12833 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12834 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12835 tp->rx_pending = 63;
12838 err = tg3_get_device_address(tp);
12840 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12842 goto err_out_iounmap;
12845 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12846 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12847 printk(KERN_ERR PFX "Cannot find proper PCI device "
12848 "base address for APE, aborting.\n");
12850 goto err_out_iounmap;
12853 tg3reg_base = pci_resource_start(pdev, 2);
12854 tg3reg_len = pci_resource_len(pdev, 2);
12856 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12857 if (!tp->aperegs) {
12858 printk(KERN_ERR PFX "Cannot map APE registers, "
12861 goto err_out_iounmap;
12864 tg3_ape_lock_init(tp);
12868 * Reset chip in case UNDI or EFI driver did not shutdown
12869 * DMA self test will enable WDMAC and we'll see (spurious)
12870 * pending DMA on the PCI bus at that point.
12872 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12873 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12874 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12875 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12878 err = tg3_test_dma(tp);
12880 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12881 goto err_out_apeunmap;
12884 /* Tigon3 can do ipv4 only... and some chips have buggy
12887 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12888 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12893 dev->features |= NETIF_F_IPV6_CSUM;
12895 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12897 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12899 /* flow control autonegotiation is default behavior */
12900 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12901 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12905 pci_set_drvdata(pdev, dev);
12907 err = register_netdev(dev);
12909 printk(KERN_ERR PFX "Cannot register net device, "
12911 goto err_out_apeunmap;
12914 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12915 "(%s) %s Ethernet %s\n",
12917 tp->board_part_number,
12918 tp->pci_chip_rev_id,
12919 tg3_phy_string(tp),
12920 tg3_bus_string(tp, str),
12921 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12922 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12923 "10/100/1000Base-T")),
12924 print_mac(mac, dev->dev_addr));
12926 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12927 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12929 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12930 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12931 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12932 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12933 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12934 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12935 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12936 dev->name, tp->dma_rwctrl,
12937 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12938 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12944 iounmap(tp->aperegs);
12945 tp->aperegs = NULL;
12958 pci_release_regions(pdev);
12960 err_out_disable_pdev:
12961 pci_disable_device(pdev);
12962 pci_set_drvdata(pdev, NULL);
12966 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12968 struct net_device *dev = pci_get_drvdata(pdev);
12971 struct tg3 *tp = netdev_priv(dev);
12973 flush_scheduled_work();
12974 unregister_netdev(dev);
12976 iounmap(tp->aperegs);
12977 tp->aperegs = NULL;
12984 pci_release_regions(pdev);
12985 pci_disable_device(pdev);
12986 pci_set_drvdata(pdev, NULL);
12990 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12992 struct net_device *dev = pci_get_drvdata(pdev);
12993 struct tg3 *tp = netdev_priv(dev);
12996 /* PCI register 4 needs to be saved whether netif_running() or not.
12997 * MSI address and data need to be saved if using MSI and
13000 pci_save_state(pdev);
13002 if (!netif_running(dev))
13005 flush_scheduled_work();
13006 tg3_netif_stop(tp);
13008 del_timer_sync(&tp->timer);
13010 tg3_full_lock(tp, 1);
13011 tg3_disable_ints(tp);
13012 tg3_full_unlock(tp);
13014 netif_device_detach(dev);
13016 tg3_full_lock(tp, 0);
13017 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13018 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13019 tg3_full_unlock(tp);
13021 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13023 tg3_full_lock(tp, 0);
13025 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13026 if (tg3_restart_hw(tp, 1))
13029 tp->timer.expires = jiffies + tp->timer_offset;
13030 add_timer(&tp->timer);
13032 netif_device_attach(dev);
13033 tg3_netif_start(tp);
13036 tg3_full_unlock(tp);
13042 static int tg3_resume(struct pci_dev *pdev)
13044 struct net_device *dev = pci_get_drvdata(pdev);
13045 struct tg3 *tp = netdev_priv(dev);
13048 pci_restore_state(tp->pdev);
13050 if (!netif_running(dev))
13053 err = tg3_set_power_state(tp, PCI_D0);
13057 netif_device_attach(dev);
13059 tg3_full_lock(tp, 0);
13061 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13062 err = tg3_restart_hw(tp, 1);
13066 tp->timer.expires = jiffies + tp->timer_offset;
13067 add_timer(&tp->timer);
13069 tg3_netif_start(tp);
13072 tg3_full_unlock(tp);
13077 static struct pci_driver tg3_driver = {
13078 .name = DRV_MODULE_NAME,
13079 .id_table = tg3_pci_tbl,
13080 .probe = tg3_init_one,
13081 .remove = __devexit_p(tg3_remove_one),
13082 .suspend = tg3_suspend,
13083 .resume = tg3_resume
13086 static int __init tg3_init(void)
13088 return pci_register_driver(&tg3_driver);
13091 static void __exit tg3_cleanup(void)
13093 pci_unregister_driver(&tg3_driver);
13096 module_init(tg3_init);
13097 module_exit(tg3_cleanup);